aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>2021-04-08 21:21:09 +0530
committerManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>2021-04-08 21:21:09 +0530
commit9e656b70b69d7c1c8d9dd69018354823b2f55658 (patch)
treee3f5edcf4c88e4d3549c7ee93af2ea94e00f7806
parentf6a928ab4a2969dc4808f218eb92eb58601634ac (diff)
parentd91d6955f045dc8d94306d7b5ef7ca6604943c72 (diff)
Merge branch 'tracking-qcomlt-sdx55-drivers' into HEAD
-rw-r--r--Documentation/devicetree/bindings/mtd/nand-controller.yaml7
-rw-r--r--Documentation/devicetree/bindings/mtd/qcom,nandc.yaml196
-rw-r--r--Documentation/devicetree/bindings/mtd/qcom_nandc.txt142
-rw-r--r--arch/arm/kernel/asm-offsets.c3
-rw-r--r--arch/arm/kernel/smccc-call.S11
-rw-r--r--drivers/mtd/mtdcore.c3
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c191
-rw-r--r--drivers/pci/controller/dwc/Kconfig11
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c11
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h1
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c1250
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c131
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h64
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c17
17 files changed, 1854 insertions, 188 deletions
diff --git a/Documentation/devicetree/bindings/mtd/nand-controller.yaml b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
index d0e422f4b3e0..15a674bedca3 100644
--- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml
+++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
@@ -143,6 +143,13 @@ patternProperties:
Ready/Busy pins. Active state refers to the NAND ready state and
should be set to GPIOD_ACTIVE_HIGH unless the signal is inverted.
+ secure-regions:
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ description:
+ Regions in the NAND chip which are protected using a secure element
+ like Trustzone. This property contains the start address and size of
+ the secure regions present.
+
required:
- reg
diff --git a/Documentation/devicetree/bindings/mtd/qcom,nandc.yaml b/Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
new file mode 100644
index 000000000000..84ad7ff30121
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
@@ -0,0 +1,196 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/qcom,nandc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm NAND controller
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+properties:
+ compatible:
+ enum:
+ - qcom,ipq806x-nand
+ - qcom,ipq4019-nand
+ - qcom,ipq6018-nand
+ - qcom,ipq8074-nand
+ - qcom,sdx55-nand
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Core Clock
+ - description: Always ON Clock
+
+ clock-names:
+ items:
+ - const: core
+ - const: aon
+
+ "#address-cells": true
+ "#size-cells": true
+
+patternProperties:
+ "^nand@[a-f0-9]$":
+ type: object
+ properties:
+ nand-bus-width:
+ const: 8
+
+ nand-ecc-strength:
+ enum: [1, 4, 8]
+
+ nand-ecc-step-size:
+ enum:
+ - 512
+
+allOf:
+ - $ref: "nand-controller.yaml#"
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: qcom,ipq806x-nand
+ then:
+ properties:
+ dmas:
+ items:
+ - description: rxtx DMA channel
+
+ dma-names:
+ items:
+ - const: rxtx
+
+ qcom,cmd-crci:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Must contain the ADM command type CRCI block instance number
+ specified for the NAND controller on the given platform
+
+ qcom,data-crci:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Must contain the ADM data type CRCI block instance number
+ specified for the NAND controller on the given platform
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,ipq4019-nand
+ - qcom,ipq6018-nand
+ - qcom,ipq8074-nand
+ - qcom,sdx55-nand
+
+ then:
+ properties:
+ dmas:
+ items:
+ - description: tx DMA channel
+ - description: rx DMA channel
+ - description: cmd DMA channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+ - const: cmd
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-ipq806x.h>
+ nand-controller@1ac00000 {
+ compatible = "qcom,ipq806x-nand";
+ reg = <0x1ac00000 0x800>;
+
+ clocks = <&gcc EBI2_CLK>,
+ <&gcc EBI2_AON_CLK>;
+ clock-names = "core", "aon";
+
+ dmas = <&adm_dma 3>;
+ dma-names = "rxtx";
+ qcom,cmd-crci = <15>;
+ qcom,data-crci = <3>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+
+ nand-ecc-strength = <4>;
+ nand-bus-width = <8>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "boot-nand";
+ reg = <0 0x58a0000>;
+ };
+
+ partition@58a0000 {
+ label = "fs-nand";
+ reg = <0x58a0000 0x4000000>;
+ };
+ };
+ };
+ };
+
+ #include <dt-bindings/clock/qcom,gcc-ipq4019.h>
+ nand-controller@79b0000 {
+ compatible = "qcom,ipq4019-nand";
+ reg = <0x79b0000 0x1000>;
+
+ clocks = <&gcc GCC_QPIC_CLK>,
+ <&gcc GCC_QPIC_AHB_CLK>;
+ clock-names = "core", "aon";
+
+ dmas = <&qpicbam 0>,
+ <&qpicbam 1>,
+ <&qpicbam 2>;
+ dma-names = "tx", "rx", "cmd";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+ nand-ecc-strength = <4>;
+ nand-bus-width = <8>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "boot-nand";
+ reg = <0 0x58a0000>;
+ };
+
+ partition@58a0000 {
+ label = "fs-nand";
+ reg = <0x58a0000 0x4000000>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
deleted file mode 100644
index 5647913d8837..000000000000
--- a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
+++ /dev/null
@@ -1,142 +0,0 @@
-* Qualcomm NAND controller
-
-Required properties:
-- compatible: must be one of the following:
- * "qcom,ipq806x-nand" - for EBI2 NAND controller being used in IPQ806x
- SoC and it uses ADM DMA
- * "qcom,ipq4019-nand" - for QPIC NAND controller v1.4.0 being used in
- IPQ4019 SoC and it uses BAM DMA
- * "qcom,ipq6018-nand" - for QPIC NAND controller v1.5.0 being used in
- IPQ6018 SoC and it uses BAM DMA
- * "qcom,ipq8074-nand" - for QPIC NAND controller v1.5.0 being used in
- IPQ8074 SoC and it uses BAM DMA
- * "qcom,sdx55-nand" - for QPIC NAND controller v2.0.0 being used in
- SDX55 SoC and it uses BAM DMA
-
-- reg: MMIO address range
-- clocks: must contain core clock and always on clock
-- clock-names: must contain "core" for the core clock and "aon" for the
- always on clock
-
-EBI2 specific properties:
-- dmas: DMA specifier, consisting of a phandle to the ADM DMA
- controller node and the channel number to be used for
- NAND. Refer to dma.txt and qcom_adm.txt for more details
-- dma-names: must be "rxtx"
-- qcom,cmd-crci: must contain the ADM command type CRCI block instance
- number specified for the NAND controller on the given
- platform
-- qcom,data-crci: must contain the ADM data type CRCI block instance
- number specified for the NAND controller on the given
- platform
-
-QPIC specific properties:
-- dmas: DMA specifier, consisting of a phandle to the BAM DMA
- and the channel number to be used for NAND. Refer to
- dma.txt, qcom_bam_dma.txt for more details
-- dma-names: must contain all 3 channel names : "tx", "rx", "cmd"
-- #address-cells: <1> - subnodes give the chip-select number
-- #size-cells: <0>
-
-* NAND chip-select
-
-Each controller may contain one or more subnodes to represent enabled
-chip-selects which (may) contain NAND flash chips. Their properties are as
-follows.
-
-Required properties:
-- reg: a single integer representing the chip-select
- number (e.g., 0, 1, 2, etc.)
-- #address-cells: see partition.txt
-- #size-cells: see partition.txt
-
-Optional properties:
-- nand-bus-width: see nand-controller.yaml
-- nand-ecc-strength: see nand-controller.yaml. If not specified, then ECC strength will
- be used according to chip requirement and available
- OOB size.
-
-Each nandcs device node may optionally contain a 'partitions' sub-node, which
-further contains sub-nodes describing the flash partition mapping. See
-partition.txt for more detail.
-
-Example:
-
-nand-controller@1ac00000 {
- compatible = "qcom,ipq806x-nand";
- reg = <0x1ac00000 0x800>;
-
- clocks = <&gcc EBI2_CLK>,
- <&gcc EBI2_AON_CLK>;
- clock-names = "core", "aon";
-
- dmas = <&adm_dma 3>;
- dma-names = "rxtx";
- qcom,cmd-crci = <15>;
- qcom,data-crci = <3>;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- nand@0 {
- reg = <0>;
-
- nand-ecc-strength = <4>;
- nand-bus-width = <8>;
-
- partitions {
- compatible = "fixed-partitions";
- #address-cells = <1>;
- #size-cells = <1>;
-
- partition@0 {
- label = "boot-nand";
- reg = <0 0x58a0000>;
- };
-
- partition@58a0000 {
- label = "fs-nand";
- reg = <0x58a0000 0x4000000>;
- };
- };
- };
-};
-
-nand-controller@79b0000 {
- compatible = "qcom,ipq4019-nand";
- reg = <0x79b0000 0x1000>;
-
- clocks = <&gcc GCC_QPIC_CLK>,
- <&gcc GCC_QPIC_AHB_CLK>;
- clock-names = "core", "aon";
-
- dmas = <&qpicbam 0>,
- <&qpicbam 1>,
- <&qpicbam 2>;
- dma-names = "tx", "rx", "cmd";
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- nand@0 {
- reg = <0>;
- nand-ecc-strength = <4>;
- nand-bus-width = <8>;
-
- partitions {
- compatible = "fixed-partitions";
- #address-cells = <1>;
- #size-cells = <1>;
-
- partition@0 {
- label = "boot-nand";
- reg = <0 0x58a0000>;
- };
-
- partition@58a0000 {
- label = "fs-nand";
- reg = <0x58a0000 0x4000000>;
- };
- };
- };
-};
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index be8050b0c3df..70993af22d80 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -24,6 +24,7 @@
#include <asm/vdso_datapage.h>
#include <asm/hardware/cache-l2x0.h>
#include <linux/kbuild.h>
+#include <linux/arm-smccc.h>
#include "signal.h"
/*
@@ -148,6 +149,8 @@ int main(void)
DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
#endif
+ DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
+ DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
BLANK();
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
index 00664c78faca..931df62a7831 100644
--- a/arch/arm/kernel/smccc-call.S
+++ b/arch/arm/kernel/smccc-call.S
@@ -3,7 +3,9 @@
* Copyright (c) 2015, Linaro Limited
*/
#include <linux/linkage.h>
+#include <linux/arm-smccc.h>
+#include <asm/asm-offsets.h>
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
@@ -27,7 +29,14 @@ UNWIND( .fnstart)
UNWIND( .save {r4-r7})
ldm r12, {r4-r7}
\instr
- pop {r4-r7}
+ ldr r4, [sp, #36]
+ cmp r4, #0
+ beq 1f // No quirk structure
+ ldr r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS]
+ cmp r5, #ARM_SMCCC_QUIRK_QCOM_A6
+ bne 1f // No quirk present
+ str r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS]
+1: pop {r4-r7}
ldr r12, [sp, #(4 * 4)]
stm r12, {r0-r3}
bx lr
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 2d6423d89a17..d97ddc65b5d4 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -820,6 +820,9 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
/* Prefer parsed partitions over driver-provided fallback */
ret = parse_mtd_partitions(mtd, types, parser_data);
+ if (ret == -EPROBE_DEFER)
+ goto out;
+
if (ret > 0)
ret = 0;
else if (nr_parts)
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index fd4c318b520f..7129344b704b 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -48,6 +48,10 @@
#define NAND_READ_LOCATION_1 0xf24
#define NAND_READ_LOCATION_2 0xf28
#define NAND_READ_LOCATION_3 0xf2c
+#define NAND_READ_LOCATION_LAST_CW_0 0xf40
+#define NAND_READ_LOCATION_LAST_CW_1 0xf44
+#define NAND_READ_LOCATION_LAST_CW_2 0xf48
+#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
/* dummy register offsets, used by write_reg_dma */
#define NAND_DEV_CMD1_RESTORE 0xdead
@@ -187,6 +191,12 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
((size) << READ_LOCATION_SIZE) | \
((is_last) << READ_LOCATION_LAST))
+#define nandc_set_read_loc_last(nandc, reg, offset, size, is_last) \
+nandc_set_reg(nandc, NAND_READ_LOCATION_LAST_CW_##reg, \
+ ((offset) << READ_LOCATION_OFFSET) | \
+ ((size) << READ_LOCATION_SIZE) | \
+ ((is_last) << READ_LOCATION_LAST))
+
/*
* Returns the actual register address for all NAND_DEV_ registers
* (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
@@ -316,6 +326,10 @@ struct nandc_regs {
__le32 read_location1;
__le32 read_location2;
__le32 read_location3;
+ __le32 read_location_last0;
+ __le32 read_location_last1;
+ __le32 read_location_last2;
+ __le32 read_location_last3;
__le32 erased_cw_detect_cfg_clr;
__le32 erased_cw_detect_cfg_set;
@@ -431,6 +445,11 @@ struct qcom_nand_controller {
* @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
* ecc/non-ecc mode for the current nand flash
* device
+ *
+ * @sec_regions: Array representing the secure regions in the
+ * NAND chip
+ *
+ * @nr_sec_regions: Number of secure regions in the NAND chip
*/
struct qcom_nand_host {
struct nand_chip chip;
@@ -453,6 +472,9 @@ struct qcom_nand_host {
u32 ecc_bch_cfg;
u32 clrflashstatus;
u32 clrreadstatus;
+
+ u32 *sec_regions;
+ u8 nr_sec_regions;
};
/*
@@ -644,6 +666,14 @@ static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
return &regs->read_location2;
case NAND_READ_LOCATION_3:
return &regs->read_location3;
+ case NAND_READ_LOCATION_LAST_CW_0:
+ return &regs->read_location_last0;
+ case NAND_READ_LOCATION_LAST_CW_1:
+ return &regs->read_location_last1;
+ case NAND_READ_LOCATION_LAST_CW_2:
+ return &regs->read_location_last2;
+ case NAND_READ_LOCATION_LAST_CW_3:
+ return &regs->read_location_last3;
default:
return NULL;
}
@@ -662,16 +692,27 @@ static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
}
/* helper to configure address register values */
-static void set_address(struct qcom_nand_host *host, u16 column, int page)
+static int set_address(struct qcom_nand_host *host, u16 column, int page)
{
struct nand_chip *chip = &host->chip;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ u32 offs = page << chip->page_shift;
+ int i, j;
+
+ /* Skip touching the secure regions if present */
+ for (i = 0, j = 0; i < host->nr_sec_regions; i++, j += 2) {
+ if (offs >= host->sec_regions[j] &&
+ (offs <= host->sec_regions[j] + host->sec_regions[j + 1]))
+ return -EIO;
+ }
if (chip->options & NAND_BUSWIDTH_16)
column >>= 1;
nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
+
+ return 0;
}
/*
@@ -719,9 +760,14 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
- if (read)
- nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
- host->cw_data : host->cw_size, 1);
+ if (read) {
+ if (nandc->props->qpic_v2)
+ nandc_set_read_loc_last(nandc, 0, 0, host->use_ecc ?
+ host->cw_data : host->cw_size, 1);
+ else
+ nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
+ host->cw_data : host->cw_size, 1);
+ }
}
/*
@@ -1094,11 +1140,16 @@ static void config_nand_page_read(struct qcom_nand_controller *nandc)
* before reading each codeword in NAND page.
*/
static void
-config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
+config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc, bool last_cw)
{
- if (nandc->props->is_bam)
- write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
- NAND_BAM_NEXT_SGL);
+ if (nandc->props->is_bam) {
+ if (nandc->props->qpic_v2 && last_cw)
+ write_reg_dma(nandc, NAND_READ_LOCATION_LAST_CW_0, 4,
+ NAND_BAM_NEXT_SGL);
+ else
+ write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
+ NAND_BAM_NEXT_SGL);
+ }
write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
@@ -1118,10 +1169,10 @@ config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
*/
static void
config_nand_single_cw_page_read(struct qcom_nand_controller *nandc,
- bool use_ecc)
+ bool use_ecc, bool last_cw)
{
config_nand_page_read(nandc);
- config_nand_cw_read(nandc, use_ecc);
+ config_nand_cw_read(nandc, use_ecc, last_cw);
}
/*
@@ -1215,7 +1266,7 @@ static int nandc_param(struct qcom_nand_host *host)
nandc->buf_count = 512;
memset(nandc->data_buffer, 0xff, nandc->buf_count);
- config_nand_single_cw_page_read(nandc, false);
+ config_nand_single_cw_page_read(nandc, false, false);
read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
nandc->buf_count, 0);
@@ -1491,13 +1542,13 @@ static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
WARN_ON(column != 0);
host->use_ecc = true;
- set_address(host, 0, page_addr);
+ ret = set_address(host, 0, page_addr);
update_rw_regs(host, ecc->steps, true);
break;
case NAND_CMD_SEQIN:
WARN_ON(column != 0);
- set_address(host, 0, page_addr);
+ ret = set_address(host, 0, page_addr);
break;
case NAND_CMD_PAGEPROG:
@@ -1615,7 +1666,10 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
host->use_ecc = false;
clear_bam_transaction(nandc);
- set_address(host, host->cw_size * cw, page);
+ ret = set_address(host, host->cw_size * cw, page);
+ if (ret)
+ return ret;
+
update_rw_regs(host, 1, true);
config_nand_page_read(nandc);
@@ -1633,19 +1687,32 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
}
if (nandc->props->is_bam) {
- nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
- read_loc += data_size1;
+ if (nandc->props->qpic_v2 && cw == (ecc->steps - 1)) {
+ nandc_set_read_loc_last(nandc, 0, read_loc, data_size1, 0);
+ read_loc += data_size1;
+
+ nandc_set_read_loc_last(nandc, 1, read_loc, oob_size1, 0);
+ read_loc += oob_size1;
+
+ nandc_set_read_loc_last(nandc, 2, read_loc, data_size2, 0);
+ read_loc += data_size2;
- nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
- read_loc += oob_size1;
+ nandc_set_read_loc_last(nandc, 3, read_loc, oob_size2, 1);
+ } else {
+ nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
+ read_loc += data_size1;
+
+ nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
+ read_loc += oob_size1;
- nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
- read_loc += data_size2;
+ nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
+ read_loc += data_size2;
- nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
+ nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
+ }
}
- config_nand_cw_read(nandc, false);
+ config_nand_cw_read(nandc, false, cw == ecc->steps - 1 ? true : false);
read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
reg_off += data_size1;
@@ -1873,18 +1940,31 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
if (nandc->props->is_bam) {
if (data_buf && oob_buf) {
- nandc_set_read_loc(nandc, 0, 0, data_size, 0);
- nandc_set_read_loc(nandc, 1, data_size,
- oob_size, 1);
+ if (nandc->props->qpic_v2 && i == (ecc->steps - 1)) {
+ nandc_set_read_loc_last(nandc, 0, 0, data_size, 0);
+ nandc_set_read_loc_last(nandc, 1, data_size,
+ oob_size, 1);
+ } else {
+ nandc_set_read_loc(nandc, 0, 0, data_size, 0);
+ nandc_set_read_loc(nandc, 1, data_size,
+ oob_size, 1);
+ }
} else if (data_buf) {
- nandc_set_read_loc(nandc, 0, 0, data_size, 1);
+ if (nandc->props->qpic_v2 && i == (ecc->steps - 1))
+ nandc_set_read_loc_last(nandc, 0, 0, data_size, 1);
+ else
+ nandc_set_read_loc(nandc, 0, 0, data_size, 1);
} else {
- nandc_set_read_loc(nandc, 0, data_size,
- oob_size, 1);
+ if (nandc->props->qpic_v2 && i == (ecc->steps - 1))
+ nandc_set_read_loc_last(nandc, 0, data_size,
+ oob_size, 1);
+ else
+ nandc_set_read_loc(nandc, 0, data_size,
+ oob_size, 1);
}
}
- config_nand_cw_read(nandc, true);
+ config_nand_cw_read(nandc, true, i == ecc->steps - 1 ? true : false);
if (data_buf)
read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
@@ -1943,10 +2023,13 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
/* prepare a clean read buffer */
memset(nandc->data_buffer, 0xff, size);
- set_address(host, host->cw_size * (ecc->steps - 1), page);
+ ret = set_address(host, host->cw_size * (ecc->steps - 1), page);
+ if (ret)
+ return ret;
+
update_rw_regs(host, 1, true);
- config_nand_single_cw_page_read(nandc, host->use_ecc);
+ config_nand_single_cw_page_read(nandc, host->use_ecc, true);
read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
@@ -2005,12 +2088,16 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
host->use_ecc = true;
- set_address(host, 0, page);
+ ret = set_address(host, 0, page);
+ if (ret)
+ return ret;
+
update_rw_regs(host, ecc->steps, true);
return read_page_ecc(host, NULL, chip->oob_poi, page);
@@ -2188,7 +2275,10 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
0, mtd->oobavail);
- set_address(host, host->cw_size * (ecc->steps - 1), page);
+ ret = set_address(host, host->cw_size * (ecc->steps - 1), page);
+ if (ret)
+ return ret;
+
update_rw_regs(host, 1, false);
config_nand_page_write(nandc);
@@ -2267,7 +2357,10 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
/* prepare write */
host->use_ecc = false;
- set_address(host, host->cw_size * (ecc->steps - 1), page);
+ ret = set_address(host, host->cw_size * (ecc->steps - 1), page);
+ if (ret)
+ return ret;
+
update_rw_regs(host, 1, false);
config_nand_page_write(nandc);
@@ -2830,7 +2923,8 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
struct device *dev = nandc->dev;
- int ret;
+ struct property *prop;
+ int ret, length, nr_elem;
ret = of_property_read_u32(dn, "reg", &host->cs);
if (ret) {
@@ -2872,6 +2966,24 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
/* set up initial status value */
host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+ /*
+ * Look for secure regions in the NAND chip. These regions are supposed
+ * to be protected by a secure element like Trustzone. So the read/write
+ * accesses to these regions will be blocked in the runtime by this
+ * driver.
+ */
+ prop = of_find_property(dn, "secure-regions", &length);
+ if (prop) {
+ nr_elem = length / sizeof(u32);
+ host->nr_sec_regions = nr_elem / 2;
+
+ host->sec_regions = devm_kcalloc(dev, nr_elem, sizeof(u32), GFP_KERNEL);
+ if (!host->sec_regions)
+ return -ENOMEM;
+
+ of_property_read_u32_array(dn, "secure-regions", host->sec_regions, nr_elem);
+ }
+
ret = nand_scan(chip, 1);
if (ret)
return ret;
@@ -2898,7 +3010,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
struct device *dev = nandc->dev;
struct device_node *dn = dev->of_node, *child;
struct qcom_nand_host *host;
- int ret;
+ int ret = -ENODEV;
for_each_available_child_of_node(dn, child) {
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
@@ -2916,10 +3028,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
list_add_tail(&host->node, &nandc->host_list);
}
- if (list_empty(&nandc->host_list))
- return -ENODEV;
-
- return 0;
+ return ret;
}
/* parse custom DT properties here */
@@ -2992,7 +3101,7 @@ static int qcom_nandc_probe(struct platform_device *pdev)
nandc->base_dma = dma_map_resource(dev, res->start,
resource_size(res),
DMA_BIDIRECTIONAL, 0);
- if (!nandc->base_dma)
+ if (dma_mapping_error(dev, nandc->base_dma))
return -ENXIO;
ret = qcom_nandc_alloc(nandc);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 22c5529e9a65..d8e869652a25 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -180,6 +180,17 @@ config PCIE_QCOM
PCIe controller uses the DesignWare core plus Qualcomm-specific
hardware wrappers.
+config PCIE_QCOM_EP
+ bool "Qualcomm PCIe end point controller"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here to enable PCIe end point controller support on
+ Qualcomm SoCs. The PCIe controller uses the DesignWare core plus
+ Qualcomm-specific hardware wrappers.
+
config PCIE_ARMADA_8K
bool "Marvell Armada-8K PCIe controller"
depends on ARCH_MVEBU || COMPILE_TEST
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index a751553fa0db..27db5d542d53 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
+obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 1c25d8337151..8d028a88b375 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -705,6 +705,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
}
}
+ dw_pcie_iatu_detect(pci);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
return -EINVAL;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 7e55b2b66182..24192b40e3a2 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -398,6 +398,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret)
goto err_free_msi;
}
+ dw_pcie_iatu_detect(pci);
dw_pcie_setup_rc(pp);
dw_pcie_msi_init(pp);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 004cb860e266..a945f0c0e73d 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -660,11 +660,9 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
pci->num_ob_windows = ob;
}
-void dw_pcie_setup(struct dw_pcie *pci)
+void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
- u32 val;
struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
if (pci->version >= 0x480A || (!pci->version &&
@@ -693,6 +691,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
pci->num_ob_windows, pci->num_ib_windows);
+}
+
+void dw_pcie_setup(struct dw_pcie *pci)
+{
+ u32 val;
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
if (pci->link_gen > 0)
dw_pcie_link_set_max_speed(pci, pci->link_gen);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 7247c8b01f04..7d6e9b7576be 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -306,6 +306,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
enum dw_pcie_region_type type);
void dw_pcie_setup(struct dw_pcie *pci);
+void dw_pcie_iatu_detect(struct dw_pcie *pci);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
new file mode 100644
index 000000000000..7aaacc4c9e4c
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -0,0 +1,1250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/kernel.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_gpio.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/reset.h>
+#include <linux/delay.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-epc.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_domain.h>
+#include <asm/io.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include "pcie-designware.h"
+
+#define PCIE_EP_PARF_SYS_CTRL 0x00
+#define PCIE_EP_PARF_DB_CTRL 0x10
+#define PCIE_EP_PARF_PM_CTRL 0x20
+#define PCIE_EP_PARF_PM_STTS 0x24
+#define PCIE_EP_PARF_PHY_CTRL 0x40
+#define PCIE_EP_PARF_PHY_REFCLK 0x4C
+#define PCIE_EP_PARF_CONFIG_BITS 0x50
+#define PCIE_EP_PARF_TEST_BUS 0xE4
+#define PCIE_EP_PARF_MHI_BASE_ADDR_LOWER 0x178
+#define PCIE_EP_PARF_MHI_BASE_ADDR_UPPER 0x17c
+#define PCIE_EP_PARF_MSI_GEN 0x188
+#define PCIE_EP_PARF_DEBUG_INT_EN 0x190
+#define PCIE_EP_PARF_MHI_IPA_DBS 0x198
+#define PCIE_EP_PARF_MHI_IPA_CDB_TARGET_LOWER 0x19C
+#define PCIE_EP_PARF_MHI_IPA_EDB_TARGET_LOWER 0x1A0
+#define PCIE_EP_PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1A4
+#define PCIE_EP_PARF_AXI_MSTR_WR_ADDR_HALT 0x1A8
+#define PCIE_EP_PARF_Q2A_FLUSH 0x1AC
+#define PCIE_EP_PARF_LTSSM 0x1B0
+#define PCIE_EP_PARF_CFG_BITS 0x210
+#define PCIE_EP_PARF_LTR_MSI_EXIT_L1SS 0x214
+#define PCIE_EP_PARF_INT_ALL_STATUS 0x224
+#define PCIE_EP_PARF_INT_ALL_CLEAR 0x228
+#define PCIE_EP_PARF_INT_ALL_MASK 0x22C
+#define PCIE_EP_PARF_SLV_ADDR_MSB_CTRL 0x2C0
+#define PCIE_EP_PARF_DBI_BASE_ADDR 0x350
+#define PCIE_EP_PARF_DBI_BASE_ADDR_HI 0x354
+#define PCIE_EP_PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PCIE_EP_PARF_SLV_ADDR_SPACE_SIZE_HI 0x35C
+#define PCIE_EP_PARF_ATU_BASE_ADDR 0x634
+#define PCIE_EP_PARF_ATU_BASE_ADDR_HI 0x638
+#define PCIE_EP_PARF_DEVICE_TYPE 0x1000
+
+#define PCIE_EP_ELBI_VERSION 0x00
+#define PCIE_EP_ELBI_SYS_CTRL 0x04
+#define PCIE_EP_ELBI_SYS_STTS 0x08
+#define PCIE_EP_ELBI_CS2_ENABLE 0xA4
+
+#define PCIE_EP_DEVICE_ID_VENDOR_ID 0x00
+#define PCIE_EP_DEVICE_ID_MASK 0xffff0000
+#define PCIE_EP_VENDOR_ID_MASK 0xffff
+
+#define PCIE_EP_COMMAND_STATUS 0x04
+
+#define PCIE_EP_CLASS_CODE_REV_ID 0x08
+#define PCIE_EP_CLASS_CODE_REV_ID_BASE_CLASS_CODE_MASK 0xff000000
+#define PCIE_EP_CLASS_CODE_REV_ID_SUBCLASS_CODE_MASK 0xff0000
+#define PCIE_EP_CLASS_CODE_REV_ID_PROG_IFACE_MASK 0xff00
+#define PCIE_EP_CLASS_CODE_REV_ID_REV_ID_MASK 0xff
+
+#define PCIE_EP_BIST_HDR_TYPE 0x0C
+#define PCIE_EP_BIST_HDR_TYPE_CACHE_LINE_SIZE_MASK 0xff
+
+#define PCIE_EP_DWC_BAR0 0x10
+#define PCIE_EP_DWC_BAR1 0x14
+#define PCIE_EP_DWC_BAR2 0x18
+#define PCIE_EP_DWC_BAR3 0x1c
+#define PCIE_EP_DWC_BAR4 0x20
+#define PCIE_EP_DWC_BAR5 0x24
+
+#define PCIE_EP_SUBSYSTEM 0x2c
+#define PCIE_EP_SUBSYS_DEV_ID_MASK 0xffff0000
+#define PCIE_EP_SUBSYS_VENDOR_ID_MASK 0xffff
+
+#define PCIE_EP_CAP_ID_NXT_PTR 0x40
+#define PCIE_EP_CON_STATUS 0x44
+#define PCIE_EP_MSI_CAP_ID_NEXT_CTRL 0x50
+#define PCIE_EP_MSI_LOWER 0x54
+#define PCIE_EP_MSI_UPPER 0x58
+#define PCIE_EP_MSI_DATA 0x5C
+#define PCIE_EP_MSI_MASK 0x60
+#define PCIE_EP_DEVICE_CAPABILITIES 0x74
+#define PCIE_EP_MASK_EP_L1_ACCPT_LATENCY 0xE00
+#define PCIE_EP_MASK_EP_L0S_ACCPT_LATENCY 0x1C0
+#define PCIE_EP_LINK_CAPABILITIES 0x7C
+#define PCIE_EP_MASK_CLOCK_POWER_MAN 0x40000
+#define PCIE_EP_MASK_L1_EXIT_LATENCY 0x38000
+#define PCIE_EP_MASK_L0S_EXIT_LATENCY 0x7000
+#define PCIE_EP_CAP_LINKCTRLSTATUS 0x80
+#define PCIE_EP_DEVICE_CONTROL2_STATUS2 0x98
+#define PCIE_EP_LINK_CONTROL2_LINK_STATUS2 0xA0
+#define PCIE_EP_L1SUB_CAPABILITY 0x154
+#define PCIE_EP_L1SUB_CONTROL1 0x158
+#define PCIE_EP_ACK_F_ASPM_CTRL_REG 0x70C
+#define PCIE_EP_MASK_ACK_N_FTS 0xff00
+#define PCIE_EP_MISC_CONTROL_1 0x8BC
+
+#define PCIE_EP_PLR_IATU_VIEWPORT 0x900
+#define PCIE_EP_PLR_IATU_CTRL1 0x904
+#define PCIE_EP_PLR_IATU_CTRL2 0x908
+#define PCIE_EP_PLR_IATU_LBAR 0x90C
+#define PCIE_EP_PLR_IATU_UBAR 0x910
+#define PCIE_EP_PLR_IATU_LAR 0x914
+#define PCIE_EP_PLR_IATU_LTAR 0x918
+#define PCIE_EP_PLR_IATU_UTAR 0x91c
+
+#define PCIE_EP_IATU_BASE(n) (n * 0x200)
+
+#define PCIE_EP_IATU_I_CTRL1(n) (PCIE_EP_IATU_BASE(n) + 0x100)
+#define PCIE_EP_IATU_I_CTRL2(n) (PCIE_EP_IATU_BASE(n) + 0x104)
+#define PCIE_EP_IATU_I_LBAR(n) (PCIE_EP_IATU_BASE(n) + 0x108)
+#define PCIE_EP_IATU_I_UBAR(n) (PCIE_EP_IATU_BASE(n) + 0x10c)
+#define PCIE_EP_IATU_I_LAR(n) (PCIE_EP_IATU_BASE(n) + 0x110)
+#define PCIE_EP_IATU_I_LTAR(n) (PCIE_EP_IATU_BASE(n) + 0x114)
+#define PCIE_EP_IATU_I_UTAR(n) (PCIE_EP_IATU_BASE(n) + 0x118)
+
+#define PCIE_EP_MHICFG 0x110
+#define PCIE_EP_BHI_EXECENV 0x228
+#define PCIE_EP_MHIVER 0x108
+#define PCIE_EP_MHICTRL 0x138
+#define PCIE_EP_MHISTATUS 0x148
+#define PCIE_EP_BHI_VERSION_LOWER 0x200
+#define PCIE_EP_BHI_VERSION_UPPER 0x204
+#define PCIE_EP_BHI_INTVEC 0x220
+
+#define PCIE_EP_AUX_CLK_FREQ_REG 0xB40
+
+#define PERST_TIMEOUT_US_MIN 1000
+#define PERST_TIMEOUT_US_MAX 1000
+#define PERST_CHECK_MAX_COUNT 30000
+#define LINK_UP_TIMEOUT_US_MIN 1000
+#define LINK_UP_TIMEOUT_US_MAX 1000
+#define LINK_UP_CHECK_MAX_COUNT 30000
+#define BME_TIMEOUT_US_MIN 1000
+#define BME_TIMEOUT_US_MAX 1000
+#define BME_CHECK_MAX_COUNT 30000
+#define PHY_STABILIZATION_DELAY_US_MIN 1000
+#define PHY_STABILIZATION_DELAY_US_MAX 1000
+#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
+#define REFCLK_STABILIZATION_DELAY_US_MAX 1000
+#define PHY_READY_TIMEOUT_COUNT 30000
+#define MSI_EXIT_L1SS_WAIT 10
+#define MSI_EXIT_L1SS_WAIT_MAX_COUNT 100
+#define XMLH_LINK_UP 0x400
+#define PARF_XMLH_LINK_UP 0x40000000
+#define EP_CORE_RESET_TIME_MIN 1000
+#define EP_CORE_RESET_TIME_MAX 1005
+#define EP_CORE_LINKDOWN 0xffffffff
+#define EP_MHICFG 0x2800880
+#define EP_BHI_EXECENV 2
+#define EP_MHICTRL_INIT 0x0
+#define EP_MHISTATUS_INIT 0x0
+#define EP_MHIVER_INIT 0x1000000
+#define EP_BHI_VERSION_LOWER_DATA 0x2
+#define EP_BHI_VERSION_UPPER_DATA 0x1
+#define EP_BHI_INTVEC_VAL 0xffffffff
+#define EP_PCIE_INT_MAX 13
+/* 2ms */
+#define WAKE_DELAY_US 2000
+#define TCSR_PERST_SEPARATION_ENABLE 0x270
+
+#define to_pcie_ep(x) dev_get_drvdata((x)->dev)
+
+struct qcom_pcie_ep *pcie_ep;
+struct pcie_ep_plat_data *pcie_ep_pdata;
+
+enum qcom_pcie_ep_irq {
+ EP_PCIE_INT_PM_TURNOFF,
+ EP_PCIE_INT_DSTATE_CHANGE,
+ EP_PCIE_INT_L1SUB_TIMEOUT,
+ EP_PCIE_INT_LINK_UP,
+ EP_PCIE_INT_LINK_DOWN,
+ EP_PCIE_INT_BRIDGE_FLUSH_N,
+ EP_PCIE_INT_BME,
+ EP_PCIE_INT_MHI_A7,
+ EP_PCIE_INT_GLOBAL,
+ EP_PCIE_MAX_IRQ,
+};
+
+enum qcom_ep_pcie_link_state {
+ /* Controller is configured but LTSSM is not enabled */
+ EP_PCIE_LINK_CONFIGURED,
+ /* Parf register link_up bit is set */
+ EP_PCIE_LINK_UP,
+ /* LTSSM state indicates link is up */
+ EP_PCIE_LINK_LTSSM_EN,
+ /* Link enumerated, i.e link is up, BME is set */
+ EP_PCIE_LINK_ENUMERATED,
+ /* Link disabled and not configured */
+ EP_PCIE_LINK_DISABLE,
+ /* Link down detected */
+ EP_PCIE_LINK_DOWN,
+};
+
+struct qcom_pcie_ep_ops {
+ /* Configure vregs and clocks */
+ int (*enable_resources)(struct qcom_pcie_ep *pcie);
+ void (*disable_resources)(struct qcom_pcie_ep *pcie);
+ /* Initialize MHI MMIO */
+ void (*mmio_init)(struct qcom_pcie_ep *pcie);
+ /* Initialize PCIe controller core */
+ int (*core_init)(struct qcom_pcie_ep *pcie);
+ /* Reset PCIe controller core */
+ int (*core_reset)(struct qcom_pcie_ep *pcie);
+ /* Enable PCIe global IRQ's */
+ void (*configure_irq)(struct qcom_pcie_ep *pcie);
+ /* GPIO related functions */
+ void (*toggle_wake)(struct qcom_pcie_ep *pcie);
+ int (*check_perst)(struct qcom_pcie_ep *pcie);
+ /* Update and enumerate the internal PCIe link status */
+ void (*enumerate)(struct qcom_pcie_ep *pcie);
+ /* Start link training sequence */
+ void (*enable_ltssm)(struct qcom_pcie_ep *pcie);
+ /* Misc: Configure TCSR related PCIe configuration */
+ void (*configure_tcsr)(struct qcom_pcie_ep *pcie);
+ /* Check if BME is set */
+ void (*check_bme)(struct qcom_pcie_ep *pcie);
+ /* Check if link is already configured in Bootloader */
+ int (*pcie_early_init)(struct qcom_pcie_ep *pcie);
+};
+
+static const struct pci_epf_header mdm_prairie_ep_header = {
+ .vendorid = 0x17cb,
+ .deviceid = 0x306,
+ .revid = 0x0,
+ .progif_code = 0x0,
+ .subclass_code = 0x0,
+ .baseclass_code = 0xff,
+ .cache_line_size = 0x10,
+ .subsys_vendor_id = 0x0,
+ .subsys_id = 0x0,
+};
+
+struct pcie_ep_plat_data {
+ u32 link_speed;
+ struct pci_epf_header *header;
+ const struct qcom_pcie_ep_ops *ops;
+};
+
+struct qcom_pcie_ep_resources {
+ struct clk *ahb_clk;
+ struct clk *axi_m;
+ struct clk *axi_s;
+ struct clk *aux_clk; /* Set rate: 1000000 */
+ struct clk *ldo;
+ struct clk *sleep_clk;
+ struct clk *slave_q2a_axi_clk;
+ struct clk *pipe_clk; /* Set rate: 62500000 */
+
+ struct reset_control *core_reset;
+
+ struct regulator *vdda;
+ struct regulator *vdda_phy;
+ struct device *gdsc;
+
+ struct gpio_desc *reset;
+ struct gpio_desc *wake;
+ struct gpio_desc *clkreq;
+};
+
+struct qcom_pcie_ep_state {
+ bool mmio_init;
+ bool core_configured;
+ /* Store subsys id for restore after D3_COLD */
+ u32 subsys_id;
+ u32 sys_id;
+ /* Local state of the link state */
+ enum qcom_ep_pcie_link_state link_state;
+};
+
+struct qcom_pcie_ep {
+ struct device *dev;
+
+ struct dw_pcie *pci;
+
+ void __iomem *parf;
+ void __iomem *mmio;
+ void __iomem *msi;
+ void __iomem *elbi;
+ void __iomem *tcsr;
+ void __iomem *phys_base;
+ int phys_addr_size;
+
+ struct qcom_pcie_ep_resources *res;
+
+ struct phy *phy;
+
+ struct qcom_pcie_ep_state state;
+
+ const struct pcie_ep_plat_data *data;
+
+ spinlock_t res_lock;
+
+ struct mutex lock;
+
+ int perst_irq;
+};
+
+static void qcom_pcie_ep_enable_ltssm(struct qcom_pcie_ep *pcie_ep)
+{
+ u32 reg;
+
+ /* enable link training */
+ reg = readl(pcie_ep->parf + PCIE_EP_PARF_LTSSM);
+ reg |= BIT(8);
+ writel_relaxed(reg, pcie_ep->parf + PCIE_EP_PARF_LTSSM);
+}
+
+static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = pcie_ep->dev;
+ struct qcom_pcie_ep_resources *res = pcie_ep->res;
+ int ret = 0;
+
+ ret = reset_control_assert(res->core_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert core\n");
+ return ret;
+ }
+
+ usleep_range(EP_CORE_RESET_TIME_MIN, EP_CORE_RESET_TIME_MAX);
+
+ ret = reset_control_deassert(res->core_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert core\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_ep_pcie_early_init(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = pcie_ep->pci;
+ u32 reg;
+
+ /* Check link status */
+ reg = readl(pcie_ep->parf + PCIE_EP_PARF_PM_STTS);
+ pr_info("pcie_ep: parf_pm_stts:0x%x\n", reg);
+ if (reg & PARF_XMLH_LINK_UP) {
+ pr_info("pcie_ep: Link already initialized in bootloader\n");
+ /*
+ * Read and store subsystem ID set in bootloader
+ * and restore it during D3 to D0 state.
+ */
+ pcie_ep->state.subsys_id = dw_pcie_readl_dbi(pci, PCIE_EP_SUBSYSTEM);
+ /* MMIO is already initialized in bootloader */
+ pcie_ep->state.mmio_init = true;
+ pcie_ep->state.link_state = EP_PCIE_LINK_UP;
+ pcie_ep->data->ops->check_bme(pcie_ep);
+
+ return 0;
+ } else {
+ reg = readl(pcie_ep->parf + PCIE_EP_PARF_LTSSM) & BIT(8);
+ if (reg) {
+ pr_info("pcie_ep: Link is not up with LTSSM set\n");
+ return -ENODEV;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static int qcom_pcie_ep_check_perst(struct qcom_pcie_ep *pcie_ep)
+{
+ u32 retries = 0;
+ struct qcom_pcie_ep_resources *res = pcie_ep->res;
+ /* wait for host side to deassert PERST */
+ do {
+ if (gpiod_get_value(res->reset) == 1)
+ break;
+ retries++;
+ usleep_range(PERST_TIMEOUT_US_MIN, PERST_TIMEOUT_US_MAX);
+ } while (retries < PERST_CHECK_MAX_COUNT);
+
+ pr_info("pcie_ep: number of PERST retries: %d\n", retries);
+
+ if (retries == PERST_CHECK_MAX_COUNT)
+ return -ENODEV;
+ else
+ return 0;
+}
+
+static void qcom_pcie_ep_toggle_wake(struct qcom_pcie_ep *pcie_ep)
+{
+ struct qcom_pcie_ep_resources *res = pcie_ep->res;
+
+ /* assert PCIe WAKE# */
+ pr_info("pcie_ep: WAKE# GPIO initial:%d\n",
+ gpiod_get_value(res->wake));
+
+ gpiod_set_value_cansleep(res->wake, 0);
+}
+
+static void qcom_pcie_ep_configure_irq(struct qcom_pcie_ep *pcie_ep)
+{
+ u32 reg;
+
+ writel_relaxed(0, pcie_ep->parf + PCIE_EP_PARF_INT_ALL_MASK);
+ reg = BIT(EP_PCIE_INT_LINK_DOWN) |
+ BIT(EP_PCIE_INT_BME) |
+ BIT(EP_PCIE_INT_PM_TURNOFF) |
+ BIT(EP_PCIE_INT_DSTATE_CHANGE) |
+ BIT(EP_PCIE_INT_LINK_UP);
+ writel_relaxed(reg, pcie_ep->parf + PCIE_EP_PARF_INT_ALL_MASK);
+ pr_info("pcie_ep: PARF interrupt enable:0x%x\n", reg);
+}
+
+static void qcom_pcie_ep_mmio_init(struct qcom_pcie_ep *ep)
+{
+ if (ep->state.mmio_init) {
+ pr_info("EP MMIO alreadly initialized\n");
+ return;
+ }
+
+ writel_relaxed(EP_MHICFG, ep->mmio + PCIE_EP_MHICFG);
+ writel_relaxed(EP_BHI_EXECENV, ep->mmio + PCIE_EP_BHI_EXECENV);
+ writel_relaxed(EP_MHICTRL_INIT, ep->mmio + PCIE_EP_MHICTRL);
+ writel_relaxed(EP_MHISTATUS_INIT, ep->mmio + PCIE_EP_MHISTATUS);
+ writel_relaxed(EP_MHIVER_INIT, ep->mmio + PCIE_EP_MHIVER);
+ writel_relaxed(EP_BHI_VERSION_LOWER_DATA,
+ ep->mmio + PCIE_EP_BHI_VERSION_LOWER);
+ writel_relaxed(EP_BHI_VERSION_UPPER_DATA,
+ ep->mmio + PCIE_EP_BHI_VERSION_UPPER);
+ writel_relaxed(EP_BHI_INTVEC_VAL, ep->mmio + PCIE_EP_BHI_INTVEC);
+
+ ep->state.mmio_init = true;
+}
+
+#if 0
+static void qcom_pcie_ep_wake_assert(struct qcom_pcie_ep *ep)
+{
+ gpiod_set_value_cansleep(ep->wake, 1);
+ usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
+}
+#endif
+
+static void qcom_pcie_ep_wake_deassert(struct qcom_pcie_ep *pcie_ep)
+{
+ struct qcom_pcie_ep_resources *res = pcie_ep->res;
+
+ gpiod_set_value_cansleep(res->wake, 0);
+ usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
+}
+
+static const struct pci_epc_features qcom_pcie_epc_features = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features *
+qcom_pcie_epc_get_features(struct dw_pcie_ep *pci_ep)
+{
+ return &qcom_pcie_epc_features;
+}
+
+static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static struct dw_pcie_ep_ops pci_ep_ops = {
+ .ep_init = qcom_pcie_ep_init,
+ .get_features = qcom_pcie_epc_get_features,
+};
+
+static int qcom_pcie_ep_enable_resources(struct qcom_pcie_ep *pcie_ep)
+{
+ struct qcom_pcie_ep_resources *res = pcie_ep->res;
+ struct device *dev = pcie_ep->dev;
+ int ret = 0;
+
+ pr_info("%s: %d",__func__, __LINE__);
+ ret = regulator_enable(res->vdda);
+ if (ret) {
+ dev_err(dev, "Cannot enable vdda\n");
+ return ret;
+ }
+
+ ret = regulator_enable(res->vdda_phy);
+ if (ret) {
+ dev_err(dev, "Cannot prepare vdda phy\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->ahb_clk);
+ if (ret) {
+ dev_err(dev, "Cannot prepare AHB clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "Cannot prepare aux clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->axi_m);
+ if (ret) {
+ dev_err(dev, "Cannot prepare axi master clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->axi_s);
+ if (ret) {
+ dev_err(dev, "Cannot prepare axi slave clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->ldo);
+ if (ret) {
+ dev_err(dev, "Cannot prepare LDO clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->sleep_clk);
+ if (ret) {
+ dev_err(dev, "Cannot prepare sleep clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->slave_q2a_axi_clk);
+ if (ret) {
+ dev_err(dev, "Cannot prepare slave_bus clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->pipe_clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable pipe clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qcom_pcie_ep_disable_resources(struct qcom_pcie_ep *pcie_ep)
+{
+ struct qcom_pcie_ep_resources *res = pcie_ep->res;
+
+ clk_disable_unprepare(res->slave_q2a_axi_clk);
+ clk_disable_unprepare(res->pipe_clk);
+ clk_disable_unprepare(res->sleep_clk);
+ clk_disable_unprepare(res->ldo);
+ clk_disable_unprepare(res->aux_clk);
+ clk_disable_unprepare(res->axi_s);
+ clk_disable_unprepare(res->axi_m);
+ clk_disable_unprepare(res->ahb_clk);
+ regulator_disable(res->vdda_phy);
+ regulator_disable(res->vdda);
+}
+
+static void qcom_pcie_ep_enumerate(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = pcie_ep->pci;
+ if (pcie_ep->state.link_state == EP_PCIE_LINK_ENUMERATED) {
+ pr_info("PCIe EP link already enumerated\n");
+ return;
+ }
+
+ pcie_ep->state.sys_id = dw_pcie_readl_dbi(pci, 0);
+ pcie_ep->state.link_state = EP_PCIE_LINK_ENUMERATED;
+}
+
+static void qcom_pcie_ep_check_bme(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = pcie_ep->pci;
+ /*
+ * De-assert WAKE# GPIO following link until L2/3 and WAKE#
+ * is triggered to send data from device to host at which point
+ * it will assert WAKE#.
+ */
+ qcom_pcie_ep_wake_deassert(pcie_ep);
+
+ dw_pcie_writel_dbi(pci, PCIE_EP_AUX_CLK_FREQ_REG, 0x14);
+ if (dw_pcie_readl_dbi(pci, PCIE_EP_COMMAND_STATUS) & BIT(2)) {
+ pr_info("pcie_ep: BME is set\n");
+ pcie_ep->data->ops->enumerate(pcie_ep);
+ }
+}
+
+static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
+{
+ u32 reg;
+
+ reg = readl_relaxed(pcie_ep->tcsr + 0x258);
+ pr_info("pcie_ep: TSCR PERST_EN:val:0x%x\n", reg);
+ writel_relaxed(reg, pcie_ep->tcsr + 0x258);
+
+ reg = readl_relaxed(pcie_ep->tcsr + TCSR_PERST_SEPARATION_ENABLE);
+ pr_info("pcie_ep: TSCR PERST_SEP_EN:val:0x%x\n", reg);
+ writel_relaxed(reg, pcie_ep->tcsr + TCSR_PERST_SEPARATION_ENABLE);
+}
+
+static int qcom_pcie_confirm_linkup(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ u32 reg;
+
+ reg = readl_relaxed(pcie_ep->elbi + PCIE_EP_ELBI_SYS_STTS);
+ pr_info("pcie_ep:elbi_sys_stts:0x%x\n", reg);
+ if (!(reg & XMLH_LINK_UP))
+ return -ENODEV;
+
+ return 0;
+}
+
+static inline void ep_pcie_write_mask(void __iomem *addr,
+ u32 clear_mask, u32 set_mask)
+{
+ u32 val;
+
+ val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
+ writel_relaxed(val, addr);
+ /* ensure register write goes through before next regiser operation */
+ wmb();
+}
+
+static int qcom_pcie_ep_core_init(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = pcie_ep->pci;
+ struct pci_epc *epc = pci->ep.epc;
+ struct pci_epf_header *hdr = pcie_ep->data->header;
+
+ /* enable debug IRQ */
+ writel_relaxed((BIT(3) | BIT(2) | BIT(1)),
+ pcie_ep->parf + PCIE_EP_PARF_DEBUG_INT_EN);
+
+ /* Configure PCIe to endpoint mode */
+ writel_relaxed(0x0, pcie_ep->parf + PCIE_EP_PARF_DEVICE_TYPE);
+
+ /* adjust DBI base address */
+ writel_relaxed(0x3FFFE000, pcie_ep->parf + PCIE_EP_PARF_DBI_BASE_ADDR);
+
+ /* Configure PCIe core to support 1GB aperture */
+ writel_relaxed(0x40000000, pcie_ep->parf + PCIE_EP_PARF_SLV_ADDR_SPACE_SIZE);
+
+ writel_relaxed(0x101, pcie_ep->parf + PCIE_EP_PARF_PM_CTRL);
+
+ /* Configure Slave address, DBI and iATU */
+ writel_relaxed(0x0, pcie_ep->parf + PCIE_EP_PARF_SLV_ADDR_MSB_CTRL);
+ writel_relaxed(0x200, pcie_ep->parf + PCIE_EP_PARF_SLV_ADDR_SPACE_SIZE_HI);
+ writel_relaxed(0x0, pcie_ep->parf + PCIE_EP_PARF_SLV_ADDR_SPACE_SIZE);
+ writel_relaxed(0x100, pcie_ep->parf + PCIE_EP_PARF_DBI_BASE_ADDR_HI);
+ writel_relaxed(pci->atu_base, pcie_ep->parf + PCIE_EP_PARF_DBI_BASE_ADDR);
+ writel_relaxed(0x100, pcie_ep->parf + PCIE_EP_PARF_ATU_BASE_ADDR_HI);
+ writel_relaxed(pci->atu_base, pcie_ep->parf + PCIE_EP_PARF_ATU_BASE_ADDR);
+
+ dw_pcie_writel_dbi(pci, PCIE_EP_LINK_CONTROL2_LINK_STATUS2,
+ pcie_ep->data->link_speed);
+
+ /* Read halts write */
+ writel_relaxed(0, pcie_ep->parf + PCIE_EP_PARF_AXI_MSTR_RD_HALT_NO_WRITES);
+ /* Write after write halt */
+ writel_relaxed(BIT(31), pcie_ep->parf + PCIE_EP_PARF_AXI_MSTR_WR_ADDR_HALT);
+ /* Q2A flush disable */
+ writel_relaxed(0, pcie_ep->parf + PCIE_EP_PARF_Q2A_FLUSH);
+ /* Disable the DBI Wakeup */
+ writel_relaxed(BIT(11), pcie_ep->parf + PCIE_EP_PARF_SYS_CTRL);
+ /* Disable the debouncers */
+ writel_relaxed(0x73, pcie_ep->parf + PCIE_EP_PARF_DB_CTRL);
+ /* Disable core clock CGC */
+ writel_relaxed(BIT(6), pcie_ep->parf + PCIE_EP_PARF_SYS_CTRL);
+ /* Set AUX power to be on */
+ writel_relaxed(BIT(4), pcie_ep->parf + PCIE_EP_PARF_SYS_CTRL);
+ /* Request to exit from L1SS for MSI and LTR MSG */
+ writel_relaxed(BIT(1), pcie_ep->parf + PCIE_EP_PARF_CFG_BITS);
+
+ /* Update config space header information */
+ pci->ep.epc->ops->write_header(epc, 0, hdr);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ /* Set the PMC Register - to support PME in D0/D3hot/D3cold */
+ dw_pcie_writel_dbi(pci, PCIE_EP_CAP_ID_NXT_PTR,
+ (BIT(31) | BIT(30) | BIT(27)));
+ /* Set the Endpoint L0s Acceptable Latency to 1us (max) */
+ dw_pcie_writel_dbi(pci, PCIE_EP_DEVICE_CAPABILITIES, 0x7);
+ /* Set the L0s Exit Latency to 2us-4us = 0x6 */
+ /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */
+ dw_pcie_writel_dbi(pci, PCIE_EP_LINK_CAPABILITIES, 0x6);
+ /* L1ss is supported */
+ dw_pcie_writel_dbi(pci, PCIE_EP_L1SUB_CAPABILITY, 0x1f);
+ /* Enable Clock Power Management */
+ dw_pcie_writel_dbi(pci, PCIE_EP_LINK_CAPABILITIES, 0x1);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ /* Set FTS value to match the PHY setting */
+ dw_pcie_writel_dbi(pci, PCIE_EP_ACK_F_ASPM_CTRL_REG, 0x80);
+ dw_pcie_writel_dbi(pci, PCIE_EP_AUX_CLK_FREQ_REG, 0x14);
+
+ /* Enable L1 */
+ writel_relaxed(BIT(5), pcie_ep->parf + PCIE_EP_PARF_PM_CTRL);
+
+ /* Configure aggregated IRQ's */
+ pcie_ep->data->ops->configure_irq(pcie_ep);
+
+ /* Configure MMIO */
+// pcie_ep->data->ops->mmio_init(pcie_ep);
+
+ return 0;
+}
+
+static int qcom_pcie_establish_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct pci_epc *epc = pci->ep.epc;
+ struct pci_epf_bar epf_bar;
+ int ret;
+
+ if (pcie_ep->state.link_state == EP_PCIE_LINK_ENUMERATED) {
+ pr_err("Link is already enumerated");
+ return 0;
+ }
+
+ mutex_lock(&pcie_ep->lock);
+
+ /* Enable power and clocks */
+ ret = pcie_ep->data->ops->enable_resources(pcie_ep);
+ if (ret) {
+ pr_err("pcie_ep: Enable resources failed\n");
+ return ret;
+ }
+
+ /* Configure tcsr to avoid device reset during host reboot */
+ pcie_ep->data->ops->configure_tcsr(pcie_ep);
+
+ /* Check if link is initialized in bootloader */
+ ret = pcie_ep->data->ops->pcie_early_init(pcie_ep);
+ if (ret == -ENODEV) {
+ pr_info("pcie_ep: pcie early init failure %d\n", ret);
+ } else {
+ pr_info("pcie_ep: link initialized in bootloader\n");
+ goto exit;
+ }
+
+ /* Perform controller reset */
+ ret = pcie_ep->data->ops->core_reset(pcie_ep);
+ if (ret) {
+ pr_info("pcie_ep: Failed to reset the core\n");
+ goto disable_resource;
+ }
+
+ /* Assert WAKE# to RC to indicate device is ready */
+ pcie_ep->data->ops->toggle_wake(pcie_ep);
+
+ /* Check for PERST deassertion from host */
+ ret = pcie_ep->data->ops->check_perst(pcie_ep);
+ if (ret) {
+ pr_info("pcie_ep: Failed to detect perst deassert\n");
+ goto disable_resource;
+ }
+
+ /* Initialize PHY */
+ ret = phy_init(pcie_ep->phy);
+ if (ret) {
+ pr_info("pcie_ep: PHY init failed\n");
+ goto disable_resource;
+ }
+
+ /* TODO: check for phy is ready */
+
+
+ /* Initialize the controller */
+ ret = pcie_ep->data->ops->core_init(pcie_ep);
+ if (ret) {
+ pr_info("pcie_ep: Controller init failed\n");
+ goto disable_resource;
+ }
+
+ /* Set the BAR and program iATU */
+ epf_bar.phys_addr = pcie_ep->phys_base;
+ epf_bar.size = pcie_ep->phys_addr_size;
+ epf_bar.barno = BAR_0;
+ epf_bar.flags = PCI_BASE_ADDRESS_SPACE;
+ ret = pci->ep.epc->ops->set_bar(epc, 0, &epf_bar);
+ if (ret) {
+ pr_info("pcie_ep: setting BAR and ATU mapping failed\n");
+ goto disable_resource;
+ }
+
+ /* Enable LTSSM */
+ pcie_ep->data->ops->enable_ltssm(pcie_ep);
+
+ qcom_pcie_ep_wake_deassert(pcie_ep);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ pr_err("Link training failed");
+
+ mutex_unlock(&pcie_ep->lock);
+
+ return 0;
+
+disable_resource:
+ pcie_ep->data->ops->disable_resources(pcie_ep);
+exit:
+ mutex_unlock(&pcie_ep->lock);
+
+ return 0;
+}
+
+static void qcom_pcie_disable_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+
+ return pcie_ep->data->ops->disable_resources(pcie_ep);
+}
+
+/* Device specific ops */
+static struct qcom_pcie_ep_ops ops_mdm = {
+ .enable_resources = qcom_pcie_ep_enable_resources,
+ .disable_resources = qcom_pcie_ep_disable_resources,
+ .mmio_init = qcom_pcie_ep_mmio_init,
+ .core_init = qcom_pcie_ep_core_init,
+ .core_reset = qcom_pcie_ep_core_reset,
+ .configure_irq = qcom_pcie_ep_configure_irq,
+ .toggle_wake = qcom_pcie_ep_toggle_wake,
+ .check_perst = qcom_pcie_ep_check_perst,
+ .enumerate = qcom_pcie_ep_enumerate,
+ .enable_ltssm = qcom_pcie_ep_enable_ltssm,
+ .configure_tcsr = qcom_pcie_ep_configure_tcsr,
+ .check_bme = qcom_pcie_ep_check_bme,
+ .pcie_early_init = qcom_pcie_ep_pcie_early_init,
+};
+
+static const struct pcie_ep_plat_data data_prairie_ep = {
+ .link_speed = 3,
+ .header = &mdm_prairie_ep_header,
+ .ops = &ops_mdm,
+};
+
+static irqreturn_t qcom_pcie_ep_clkreq_threaded_irq(int irq, void *data)
+{
+ pr_info("Received CLKREQ IRQ\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_pcie_ep_perst_threaded_irq(int irq, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = data;
+ struct dw_pcie *pci = pcie_ep->pci;
+ struct qcom_pcie_ep_resources *res = pcie_ep->res;
+ u32 perst;
+
+ perst = gpiod_get_value(res->reset);
+
+ pr_info("PCIe PERST is %sasserted\n", perst ? "de" : "");
+ if (perst) {
+ /* start work for link enumeration with the host side */
+ pr_info("Start enumeration due to PERST deassertion\n");
+ pci->ops->start_link(pci);
+ } else {
+ /* shutdown the link if the link is already on */
+ pr_info("Shutdown the PCIe link\n");
+ pci->ops->stop_link(pci);
+ }
+
+ /* Set trigger type based on the next expected value of perst gpio */
+ irq_set_irq_type(gpiod_to_irq(res->reset),
+ (perst ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH));
+
+ return IRQ_HANDLED;
+}
+
+/* Common DWC controller ops */
+static const struct dw_pcie_ops pci_ops = {
+ .link_up = qcom_pcie_confirm_linkup,
+ .start_link = qcom_pcie_establish_link,
+ .stop_link = qcom_pcie_disable_link,
+};
+
+static irqreturn_t qcom_pcie_ep_global_threaded_irq(int irq, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = data;
+ struct dw_pcie *pci = pcie_ep->pci;
+ u32 status = readl(pcie_ep->parf + PCIE_EP_PARF_INT_ALL_STATUS);
+ u32 mask = readl(pcie_ep->parf + PCIE_EP_PARF_INT_ALL_MASK);
+ u32 dstate, int_num;
+
+ writel_relaxed(status, pcie_ep->parf + PCIE_EP_PARF_INT_ALL_CLEAR);
+ status &= mask;
+
+ for (int_num = 1; int_num <= EP_PCIE_INT_MAX; int_num++) {
+ if (status & BIT(int_num)) {
+ switch (int_num) {
+ case EP_PCIE_INT_LINK_DOWN:
+ pr_info("linkdown event\n");
+ pcie_ep->data->ops->disable_resources(pcie_ep);
+ break;
+ case EP_PCIE_INT_BME:
+ pr_info("handle BME event\n");
+ pcie_ep->data->ops->enumerate(pcie_ep);
+ break;
+ case EP_PCIE_INT_PM_TURNOFF:
+ pr_info("handle PM Turn-off event\n");
+ pcie_ep->data->ops->disable_resources(pcie_ep);
+ break;
+ case EP_PCIE_INT_MHI_A7:
+ pr_info("received MHI A7 event\n");
+ break;
+ case EP_PCIE_INT_DSTATE_CHANGE:
+ dstate = dw_pcie_readl_dbi(
+ pci, PCIE_EP_CON_STATUS) & 0x3;
+ pr_info("Received D state:%x\n", dstate);
+ break;
+ case EP_PCIE_INT_LINK_UP:
+ pr_info("linkup event\n");
+ break;
+ case EP_PCIE_INT_L1SUB_TIMEOUT:
+ pr_info("L1ss timeout event\n");
+ break;
+ default:
+ pr_err("Unexpected event %d\n", int_num);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct qcom_pcie_ep_resources *res = pcie_ep->res;
+ int irq, ret;
+
+ irq = platform_get_irq_byname(pdev, "int_global");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq %s\n",
+ "int_global");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_ep_global_threaded_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "int_global", pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get irq %s\n", "int_global");
+ return ret;
+ }
+
+ irq = gpiod_to_irq(res->reset);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_ep_perst_threaded_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "ep_pcie_perst", pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get irq %s\n", "ep_pcie_perst");
+ return ret;
+ }
+ pcie_ep->perst_irq = irq;
+
+ irq = gpiod_to_irq(res->clkreq);
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_ep_clkreq_threaded_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "ep_pcie_clkreq", pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get irq %s\n", "ep_pcie_clkreq");
+ return ret;
+ }
+ //enable_irq_wake(irq);
+
+ return 0;
+}
+
+static int qcom_pcie_ep_get_vreg_clock_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct qcom_pcie_ep_resources *res = pcie_ep->res;
+ struct device *dev = &pdev->dev;
+
+ res->vdda = devm_regulator_get(dev, "vdda");
+ if (IS_ERR(res->vdda))
+ return PTR_ERR(res->vdda);
+
+ res->vdda_phy = devm_regulator_get(dev, "vdda_phy");
+ if (IS_ERR(res->vdda_phy))
+ return PTR_ERR(res->vdda_phy);
+
+ res->gdsc = dev_pm_domain_attach(dev, true);
+ if (IS_ERR(res->gdsc))
+ return PTR_ERR(res->gdsc);
+
+ res->ahb_clk = devm_clk_get(dev, "ahb_clk");
+ if (IS_ERR(res->ahb_clk))
+ return PTR_ERR(res->ahb_clk);
+
+ res->axi_m = devm_clk_get(dev, "master_axi_clk");
+ if (IS_ERR(res->axi_m))
+ return PTR_ERR(res->axi_m);
+
+ res->axi_s = devm_clk_get(dev, "slave_axi_clk");
+ if (IS_ERR(res->axi_s))
+ return PTR_ERR(res->axi_s);
+
+ res->aux_clk = devm_clk_get(dev, "aux_clk");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->ldo = devm_clk_get(dev, "ldo");
+ if (IS_ERR(res->ldo))
+ return PTR_ERR(res->ldo);
+
+ res->sleep_clk = devm_clk_get(dev, "sleep_clk");
+ if (IS_ERR(res->sleep_clk))
+ return PTR_ERR(res->sleep_clk);
+
+ res->pipe_clk = devm_clk_get(dev, "pipe_clk");
+ if (IS_ERR(res->pipe_clk))
+ return PTR_ERR(res->pipe_clk);
+
+ res->slave_q2a_axi_clk = devm_clk_get(dev, "slave_q2a_axi_clk");
+ if (IS_ERR(res->slave_q2a_axi_clk))
+ return PTR_ERR(res->slave_q2a_axi_clk);
+
+ res->core_reset = devm_reset_control_get_exclusive(dev, "core_reset");
+ if (IS_ERR(res->core_reset))
+ return PTR_ERR(res->core_reset);
+
+ return 0;
+}
+
+static int qcom_pcie_ep_get_gpio_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_pcie_ep_resources *res = pcie_ep->res;
+
+ res->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_IN);
+ if (IS_ERR(res->reset))
+ return PTR_ERR(res->reset);
+
+ res->wake = devm_gpiod_get_optional(dev, "wake", GPIOD_OUT_HIGH);
+ if (IS_ERR(res->wake))
+ return PTR_ERR(res->wake);
+
+ res->clkreq = devm_gpiod_get_optional(dev, "clkreq", GPIOD_OUT_LOW);
+ if (IS_ERR(res->clkreq))
+ return PTR_ERR(res->clkreq);
+
+ return 0;
+}
+
+static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = pcie_ep->pci;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ pci->dbi_base2 = pci->dbi_base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie_ep->elbi))
+ return PTR_ERR(pcie_ep->elbi);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iatu");
+ pci->atu_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->atu_base))
+ return PTR_ERR(pci->atu_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
+ pcie_ep->parf = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie_ep->parf))
+ return PTR_ERR(pcie_ep->parf);
+
+// res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
+// pcie_ep->mmio = devm_ioremap_resource(dev, res);
+// if (IS_ERR(pcie_ep->mmio))
+// return PTR_ERR(pcie_ep->mmio);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr");
+ pcie_ep->tcsr = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie_ep->tcsr))
+ return PTR_ERR(pcie_ep->tcsr);
+
+ pci->ep.phys_base = res->start;
+ pci->ep.addr_size = resource_size(res);
+
+ return 0;
+}
+
+static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ int ret;
+
+ ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get io resources %d\n", ret);
+ return ret;
+ }
+
+ ret = qcom_pcie_ep_get_vreg_clock_resources(pdev, pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get vreg, clocks %d\n", ret);
+ return ret;
+ }
+
+ ret = qcom_pcie_ep_get_gpio_resources(pdev, pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get GPIO resources %d\n", ret);
+ return ret;
+ }
+
+ pcie_ep->phy = devm_phy_optional_get(&pdev->dev, "pciephy");
+ if (IS_ERR(pcie_ep->phy))
+ ret = PTR_ERR(pcie_ep->phy);
+
+ return ret;
+}
+
+static const struct of_device_id qcom_pcie_ep_match[] = {
+ { .compatible = "qcom,pcie-ep", .data = &data_prairie_ep },
+ { }
+};
+
+static int qcom_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct pcie_ep_plat_data *data;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *id;
+ struct qcom_pcie_ep *pcie_ep;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL);
+ if (!pcie_ep)
+ return -ENOMEM;
+
+ pcie_ep->res = devm_kzalloc(dev, sizeof(*pcie_ep->res), GFP_KERNEL);
+ if (!pcie_ep->res)
+ return -ENOMEM;
+
+ id = of_match_node(qcom_pcie_ep_match, node);
+ if (id)
+ data = id->data;
+ else
+ data = &data_prairie_ep;
+
+ pcie_ep->data = data;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+ pci->dev = dev;
+ pci->ops = &pci_ops;
+ pci->ep.ops = &pci_ep_ops;
+ pcie_ep->pci = pci;
+
+ spin_lock_init(&pcie_ep->res_lock);
+ mutex_init(&pcie_ep->lock);
+
+ ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
+ if (ret) {
+ dev_err(dev, "failed to get resources:%d\n", ret);
+ return ret;
+ }
+
+ /* Enable power and clocks */
+ ret = pcie_ep->data->ops->enable_resources(pcie_ep);
+ if (ret) {
+ pr_err("pcie_ep: Enable resources failed\n");
+ return ret;
+ }
+
+ /* Perform controller reset */
+ ret = pcie_ep->data->ops->core_reset(pcie_ep);
+ if (ret) {
+ pr_info("pcie_ep: Failed to reset the core\n");
+ return ret;
+ }
+
+ /* Initialize PHY */
+ ret = phy_init(pcie_ep->phy);
+ if (ret) {
+ pr_info("pcie_ep: PHY init failed\n");
+ return ret;
+ }
+
+ ret = phy_power_on(pcie_ep->phy);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint:%d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pcie_ep);
+
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint:%d\n", ret);
+ return ret;
+ }
+
+ ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
+ if (ret) {
+ dev_err(dev, "failed to get IRQ resources %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver qcom_pcie_ep_driver = {
+ .probe = qcom_pcie_ep_probe,
+ .driver = {
+ .name = "pcie-ep",
+ .suppress_bind_attrs = true,
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_pcie_ep_match,
+ },
+};
+builtin_platform_driver(qcom_pcie_ep_driver);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 9cdebe7f26cb..ca96245ca76e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -2030,6 +2030,101 @@ static const struct qmp_phy_init_tbl sdx55_usb3_uniphy_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
};
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BG_TIMER, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYS_CLK_CTRL, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x50),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE1, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_CONFIG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_MISC1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTERNAL_DIG_CORECLK_DIV, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_MODE, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_DC_LEVEL_CTRL, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x56),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x22),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_3, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_VMODE_CTRL1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_PI_QEC_CTRL, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_AUX_DATA_TCOARSE_TFINE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_3, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_DAC_ENABLE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_DAC_ENABLE2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_VGA_CAL_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x27),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B1, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B2, 0x5a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B4, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B0, 0xbd),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B1, 0xf9),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B3, 0xce),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B4, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B0, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B1, 0x7d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B3, 0xcf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B4, 0xd6),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_PHPRE_CTRL, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_MARG_COARSE_CTRL2, 0x12),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_RX_SIGDET_LVL, 0x77),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG2, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG4, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG5, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_EQ_CONFIG1, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G3_RXEQEVAL_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_RXEQEVAL_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG2, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
+};
+
static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0xd9),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x11),
@@ -3151,6 +3246,37 @@ static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
+static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 2,
+
+ .serdes_tbl = sdx55_qmp_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl),
+ .tx_tbl = sdx55_qmp_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_tx_tbl),
+ .rx_tbl = sdx55_qmp_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_rx_tbl),
+ .pcs_tbl = sdx55_qmp_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_tbl),
+ .pcs_misc_tbl = sdx55_qmp_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .is_dual_lane_phy = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.type = PHY_TYPE_UFS,
.nlanes = 2,
@@ -3853,6 +3979,8 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
dev_err(qmp->dev, "phy initialization timed-out\n");
goto err_pcs_ready;
}
+
+ pr_info("PHY init complete: val: %d", val);
}
return 0;
@@ -4593,6 +4721,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sm8250-qmp-modem-pcie-phy",
.data = &sm8250_qmp_gen3x2_pciephy_cfg,
}, {
+ .compatible = "qcom,sdx55-qmp-pcie-phy",
+ .data = &sdx55_qmp_pciephy_cfg,
+ }, {
.compatible = "qcom,sdx55-qmp-usb3-uni-phy",
.data = &sdx55_usb3_uniphy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 71ce3aa174ae..27de9749560a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -396,6 +396,7 @@
#define QSERDES_V3_DP_PHY_STATUS 0x0c0
/* Only for QMP V4 PHY - QSERDES COM registers */
+#define QSERDES_V4_COM_BG_TIMER 0x00c
#define QSERDES_V4_COM_SSC_EN_CENTER 0x010
#define QSERDES_V4_COM_SSC_PER1 0x01c
#define QSERDES_V4_COM_SSC_PER2 0x020
@@ -403,7 +404,9 @@
#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0 0x028
#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1 0x030
#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1 0x034
+#define QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN 0x044
#define QSERDES_V4_COM_CLK_ENABLE1 0x048
+#define QSERDES_V4_COM_SYS_CLK_CTRL 0x04c
#define QSERDES_V4_COM_SYSCLK_BUF_ENABLE 0x050
#define QSERDES_V4_COM_PLL_IVCO 0x058
#define QSERDES_V4_COM_CMN_IPTRIM 0x060
@@ -415,6 +418,7 @@
#define QSERDES_V4_COM_PLL_CCTRL_MODE1 0x088
#define QSERDES_V4_COM_SYSCLK_EN_SEL 0x094
#define QSERDES_V4_COM_LOCK_CMP_EN 0x0a4
+#define QSERDES_V4_COM_LOCK_CMP_CFG 0x0a8
#define QSERDES_V4_COM_LOCK_CMP1_MODE0 0x0ac
#define QSERDES_V4_COM_LOCK_CMP2_MODE0 0x0b0
#define QSERDES_V4_COM_LOCK_CMP1_MODE1 0x0b4
@@ -427,6 +431,10 @@
#define QSERDES_V4_COM_DIV_FRAC_START1_MODE1 0x0d8
#define QSERDES_V4_COM_DIV_FRAC_START2_MODE1 0x0dc
#define QSERDES_V4_COM_DIV_FRAC_START3_MODE1 0x0e0
+#define QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0 0x0ec
+#define QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0 0x0f0
+#define QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE1 0x0f4
+#define QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE1 0x0f8
#define QSERDES_V4_COM_VCO_TUNE_MAP 0x10c
#define QSERDES_V4_COM_VCO_TUNE1_MODE0 0x110
#define QSERDES_V4_COM_VCO_TUNE2_MODE0 0x114
@@ -436,13 +444,19 @@
#define QSERDES_V4_COM_CLK_SELECT 0x154
#define QSERDES_V4_COM_HSCLK_SEL 0x158
#define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V4_COM_CORECLK_DIV_MODE0 0x168
#define QSERDES_V4_COM_CORECLK_DIV_MODE1 0x16c
+#define QSERDES_V4_COM_CMN_CONFIG 0x17c
#define QSERDES_V4_COM_SVS_MODE_CLK_SEL 0x184
+#define QSERDES_V4_COM_CMN_MISC1 0x19c
+#define QSERDES_V4_COM_INTERNAL_DIG_CORECLK_DIV 0x1a0
+#define QSERDES_V4_COM_CMN_MODE 0x1a4
+#define QSERDES_V4_COM_VCO_DC_LEVEL_CTRL 0x1a8
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
-#define QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
+#define QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
/* Only for QMP V4 PHY - TX registers */
#define QSERDES_V4_TX_RES_CODE_LANE_TX 0x34
@@ -459,6 +473,13 @@
#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN 0xb8
#define QSERDES_V4_TX_PI_QEC_CTRL 0x104
+/* Only for QMP V4_20 PHY - TX registers */
+#define QSERDES_V4_20_TX_LANE_MODE_1 0x88
+#define QSERDES_V4_20_TX_LANE_MODE_2 0x8c
+#define QSERDES_V4_20_TX_LANE_MODE_3 0x90
+#define QSERDES_V4_20_TX_VMODE_CTRL1 0xc4
+#define QSERDES_V4_20_TX_PI_QEC_CTRL 0xe0
+
/* Only for QMP V4 PHY - RX registers */
#define QSERDES_V4_RX_UCDR_FO_GAIN 0x008
#define QSERDES_V4_RX_UCDR_SO_GAIN 0x014
@@ -514,6 +535,33 @@
#define QSERDES_V4_RX_DCC_CTRL1 0x1bc
#define QSERDES_V4_RX_VTH_CODE 0x1c4
+/* Only for QMP V4_20 PHY - RX registers */
+#define QSERDES_V4_20_RX_FO_GAIN_RATE2 0x008
+#define QSERDES_V4_20_RX_UCDR_PI_CONTROLS 0x058
+#define QSERDES_V4_20_RX_AUX_DATA_TCOARSE_TFINE 0x0ac
+#define QSERDES_V4_20_RX_DFE_3 0x110
+#define QSERDES_V4_20_RX_DFE_DAC_ENABLE1 0x134
+#define QSERDES_V4_20_RX_DFE_DAC_ENABLE2 0x138
+#define QSERDES_V4_20_RX_VGA_CAL_CNTRL2 0x150
+#define QSERDES_V4_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x178
+#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B1 0x1c8
+#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B2 0x1cc
+#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B3 0x1d0
+#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B4 0x1d4
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B0 0x1d8
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B1 0x1dc
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B2 0x1e0
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B3 0x1e4
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B4 0x1e8
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B0 0x1ec
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B1 0x1f0
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B2 0x1f4
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B3 0x1f8
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B4 0x1fc
+#define QSERDES_V4_20_RX_PHPRE_CTRL 0x200
+#define QSERDES_V4_20_RX_DFE_CTLE_POST_CAL_OFFSET 0x20c
+#define QSERDES_V4_20_RX_MARG_COARSE_CTRL2 0x23c
+
/* Only for QMP V4 PHY - UFS PCS registers */
#define QPHY_V4_PCS_UFS_PHY_START 0x000
#define QPHY_V4_PCS_UFS_POWER_DOWN_CONTROL 0x004
@@ -799,6 +847,12 @@
#define QPHY_V4_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x354
#define QPHY_V4_PCS_USB3_TEST_CONTROL 0x358
+/* Only for QMP V4_20 PHY - USB/PCIe PCS registers */
+#define QPHY_V4_20_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V4_20_PCS_EQ_CONFIG2 0x1d8
+#define QPHY_V4_20_PCS_EQ_CONFIG4 0x1e0
+#define QPHY_V4_20_PCS_EQ_CONFIG5 0x1e4
+
/* Only for QMP V4 PHY - UNI has 0x300 offset for PCS_USB3 regs */
#define QPHY_V4_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL 0x618
#define QPHY_V4_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2 0x638
@@ -824,6 +878,14 @@
#define QPHY_V4_PCS_PCIE_PRESET_P10_PRE 0xbc
#define QPHY_V4_PCS_PCIE_PRESET_P10_POST 0xe0
+#define QPHY_V4_20_PCS_PCIE_EQ_CONFIG1 0x0a0
+#define QPHY_V4_20_PCS_PCIE_G3_RXEQEVAL_TIME 0x0f0
+#define QPHY_V4_20_PCS_PCIE_G4_RXEQEVAL_TIME 0x0f4
+#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG2 0x0fc
+#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
+#define QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2 0x824
+#define QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2 0x828
+
/* Only for QMP V5 PHY - QSERDES COM registers */
#define QSERDES_V5_COM_PLL_IVCO 0x058
#define QSERDES_V5_COM_CP_CTRL_MODE0 0x074
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index e635454d6170..292141877260 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -785,6 +785,22 @@ static const struct adsp_data wcss_resource_init = {
.ssctl_id = 0x12,
};
+static const struct adsp_data sdx55_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x22,
+};
+
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init},
@@ -797,6 +813,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
+ { .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
{ .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},