aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinaro CI <ci_notify@linaro.org>2021-03-08 16:08:06 +0000
committerLinaro CI <ci_notify@linaro.org>2021-03-08 16:08:06 +0000
commitfe0490191e9408c042401c2409263e2fa7b9e7c5 (patch)
treebb23cd8a1a8fe828b54248cfdd25fad7aa8db3d6
parent03061b9fc4f67ff047ef1d3d5e69886b73e87561 (diff)
parent53f84cd0b2c61b6bb3b83e840094a6fde87c05be (diff)
Merge remote-tracking branch 'gsi/tracking-qcomlt-gsi' into integration-linux-qcomlt
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi57
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c246
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c55
-rw-r--r--drivers/spi/spi-geni-qcom.c392
-rw-r--r--include/linux/qcom-geni-se.h20
6 files changed, 747 insertions, 27 deletions
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index c4ac6f5dc008..068b351acd9e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -420,6 +420,10 @@
<GCC_LPASS_SWAY_CLK>;
};
+&gpi_dma0 {
+ status = "okay";
+};
+
&gpu {
zap-shader {
memory-region = <&gpu_mem>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 454f794af547..5e88f204d2eb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -12,6 +12,7 @@
#include <dt-bindings/clock/qcom,lpass-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,videocc-sdm845.h>
+#include <dt-bindings/dma/qcom-gpi.h>
#include <dt-bindings/interconnect/qcom,osm-l3.h>
#include <dt-bindings/interconnect/qcom,sdm845.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -1114,6 +1115,29 @@
};
};
+ gpi_dma0: dma-controller@800000 {
+ #dma-cells = <3>;
+ compatible = "qcom,sdm845-gpi-dma";
+ reg = <0 0x00800000 0 0x60000>;
+ interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
+ dma-channels = <13>;
+ dma-channel-mask = <0xfa>;
+ iommus = <&apps_smmu 0x0016 0x0>;
+ status = "disabled";
+ };
+
qupv3_id_0: geniqup@8c0000 {
compatible = "qcom,geni-se-qup";
reg = <0 0x008c0000 0 0x6000>;
@@ -1160,6 +1184,9 @@
interconnects = <&aggre1_noc MASTER_QUP_1 0 &config_noc SLAVE_BLSP_1 0>,
<&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_BLSP_1 0>;
interconnect-names = "qup-core", "qup-config";
+ dmas = <&gpi_dma0 0 0 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 0 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -1533,6 +1560,29 @@
};
};
+ gpi_dma1: dma-controller@0xa00000 {
+ #dma-cells = <3>;
+ compatible = "qcom,sdm845-gpi-dma";
+ reg = <0 0x00a00000 0 0x60000>;
+ interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 295 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 296 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>;
+ dma-channels = <13>;
+ dma-channel-mask = <0xfa>;
+ iommus = <&apps_smmu 0x06d6 0x0>;
+ status = "disabled";
+ };
+
qupv3_id_1: geniqup@ac0000 {
compatible = "qcom,geni-se-qup";
reg = <0 0x00ac0000 0 0x6000>;
@@ -2574,6 +2624,13 @@
"gpio2", "gpio3";
function = "qup0";
};
+
+ config {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ drive-strength = <6>;
+ bias-disable;
+ };
};
qup_spi1_default: qup-spi1-default {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 214b4c913a13..8b733498fa8b 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -12,7 +12,9 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/dmaengine.h>
#include <linux/qcom-geni-se.h>
+#include <linux/dma/qcom-gpi-dma.h>
#include <linux/spinlock.h>
#define SE_I2C_TX_TRANS_LEN 0x26c
@@ -48,6 +50,8 @@
#define LOW_COUNTER_SHFT 10
#define CYCLE_COUNTER_MSK GENMASK(9, 0)
+#define I2C_PACK_EN (BIT(0) | BIT(1))
+
enum geni_i2c_err_code {
GP_IRQ0,
NACK,
@@ -72,6 +76,12 @@ enum geni_i2c_err_code {
#define XFER_TIMEOUT HZ
#define RST_TIMEOUT HZ
+enum i2c_se_mode {
+ UNINITIALIZED,
+ FIFO_SE_DMA,
+ GSI_ONLY,
+};
+
struct geni_i2c_dev {
struct geni_se se;
u32 tx_wm;
@@ -89,6 +99,17 @@ struct geni_i2c_dev {
void *dma_buf;
size_t xfer_len;
dma_addr_t dma_addr;
+ struct dma_chan *tx_c;
+ struct dma_chan *rx_c;
+ dma_cookie_t rx_cookie, tx_cookie;
+ dma_addr_t tx_ph;
+ dma_addr_t rx_ph;
+ int cfg_sent;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct dma_async_tx_descriptor *rx_desc;
+ enum i2c_se_mode se_mode;
+ bool cmd_done;
+ bool is_shared;
};
struct geni_i2c_err_log {
@@ -456,6 +477,183 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
return gi2c->err;
}
+static void i2c_gsi_cb_result(void *cb, const struct dmaengine_result *result)
+{
+ struct geni_i2c_dev *gi2c = cb;
+
+ if (result->result != DMA_TRANS_NOERROR) {
+ dev_err(gi2c->se.dev, "DMA txn failed:%d\n", result->result);
+ return;
+ }
+
+ if (result->residue)
+ dev_dbg(gi2c->se.dev, "DMA xfer has pending: %d\n", result->residue);
+
+ complete(&gi2c->done);
+}
+
+static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
+ struct dma_slave_config config;
+ struct gpi_i2c_config peripheral;
+ int i, ret = 0, timeout = 0;
+
+ memset(&config, 0, sizeof(config));
+ memset(&peripheral, 0, sizeof(peripheral));
+ config.peripheral_config = &peripheral;
+ config.peripheral_size = sizeof(peripheral);
+
+ if (!gi2c->tx_c) {
+ gi2c->tx_c = dma_request_slave_channel(gi2c->se.dev, "tx");
+ if (!gi2c->tx_c) {
+ dev_err(gi2c->se.dev, "tx dma_request_slave_channel fail\n");
+ ret = -EIO;
+ goto geni_i2c_gsi_xfer_out;
+ }
+ }
+
+ if (!gi2c->rx_c) {
+ gi2c->rx_c = dma_request_slave_channel(gi2c->se.dev, "rx");
+ if (!gi2c->rx_c) {
+ dev_err(gi2c->se.dev, "rx dma_request_slave_channel fail\n");
+ ret = -EIO;
+ goto geni_i2c_gsi_xfer_out;
+ }
+ }
+
+ if (!gi2c->cfg_sent) {
+ const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
+
+ peripheral.pack_enable = I2C_PACK_EN;
+ peripheral.cycle_count = itr->t_cycle_cnt;
+ peripheral.high_count = itr->t_high_cnt;
+ peripheral.low_count = itr->t_low_cnt;
+ peripheral.clk_div = itr->clk_div;
+ gi2c->cfg_sent = true;
+ peripheral.set_config = true;
+ }
+
+ peripheral.multi_msg = false;
+ for (i = 0; i < num; i++) {
+ struct device *rx_dev = gi2c->se.wrapper->dev;
+ struct device *tx_dev = gi2c->se.wrapper->dev;
+ int stretch = (i < (num - 1));
+ u8 *dma_buf = NULL;
+ unsigned int flags;
+
+ gi2c->cur = &msgs[i];
+
+ peripheral.addr = msgs[i].addr;
+ peripheral.stretch = stretch;
+ if (msgs[i].flags & I2C_M_RD)
+ peripheral.op = I2C_READ;
+ else
+ peripheral.op = I2C_WRITE;
+
+ dma_buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1);
+ if (!dma_buf) {
+ ret = -ENOMEM;
+ goto geni_i2c_gsi_xfer_out;
+ }
+
+ if (msgs[i].flags & I2C_M_RD) {
+ gi2c->rx_ph = dma_map_single(rx_dev, dma_buf,
+ msgs[i].len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_dev, gi2c->rx_ph)) {
+ dev_err(gi2c->se.dev, "dma_map_single for rx failed :%d\n", ret);
+ i2c_put_dma_safe_msg_buf(dma_buf, &msgs[i], false);
+ goto geni_i2c_gsi_xfer_out;
+ }
+
+ peripheral.op = I2C_READ;
+ peripheral.stretch = stretch;
+ ret = dmaengine_slave_config(gi2c->rx_c, &config);
+ if (ret) {
+ dev_err(gi2c->se.dev, "rx dma config error:%d\n", ret);
+ goto geni_i2c_gsi_xfer_out;
+ }
+ peripheral.set_config = false;
+ peripheral.multi_msg = true;
+ peripheral.rx_len = msgs[i].len;
+
+ flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ gi2c->rx_desc = dmaengine_prep_slave_single(gi2c->rx_c, gi2c->rx_ph,
+ msgs[i].len,
+ DMA_DEV_TO_MEM, flags);
+ if (!gi2c->rx_desc) {
+ dev_err(gi2c->se.dev, "prep_slave_sg for rx failed\n");
+ gi2c->err = -EIO;
+ goto geni_i2c_err_prep_sg;
+ }
+
+ gi2c->rx_desc->callback_result = i2c_gsi_cb_result;
+ gi2c->rx_desc->callback_param = gi2c;
+
+ /* Issue RX */
+ gi2c->rx_cookie = dmaengine_submit(gi2c->rx_desc);
+ dma_async_issue_pending(gi2c->rx_c);
+ }
+
+ dev_dbg(gi2c->se.dev, "msg[%d].len:%d W\n", i, gi2c->cur->len);
+ gi2c->tx_ph = dma_map_single(tx_dev, dma_buf, msgs[i].len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_dev, gi2c->tx_ph)) {
+ dev_err(gi2c->se.dev, "dma_map_single for tx failed :%d\n", ret);
+ i2c_put_dma_safe_msg_buf(dma_buf, &msgs[i], false);
+ goto geni_i2c_gsi_xfer_out;
+ }
+
+ peripheral.stretch = stretch;
+ peripheral.op = I2C_WRITE;
+ ret = dmaengine_slave_config(gi2c->tx_c, &config);
+ if (ret) {
+ dev_err(gi2c->se.dev, "tx dma config error:%d\n", ret);
+ goto geni_i2c_gsi_xfer_out;
+ }
+ peripheral.set_config = false;
+ peripheral.multi_msg = true;
+ gi2c->tx_desc = dmaengine_prep_slave_single(gi2c->tx_c, gi2c->tx_ph, msgs[i].len,
+ DMA_MEM_TO_DEV,
+ (DMA_PREP_INTERRUPT | DMA_CTRL_ACK));
+ if (!gi2c->tx_desc) {
+ dev_err(gi2c->se.dev, "prep_slave_sg for tx failed\n");
+ gi2c->err = -ENOMEM;
+ goto geni_i2c_err_prep_sg;
+ }
+ gi2c->tx_desc->callback_result = i2c_gsi_cb_result;
+ gi2c->tx_desc->callback_param = gi2c;
+
+ /* Issue TX */
+ gi2c->tx_cookie = dmaengine_submit(gi2c->tx_desc);
+ dma_async_issue_pending(gi2c->tx_c);
+
+ timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ if (!timeout) {
+ dev_err(gi2c->se.dev, "I2C timeout gsi flags:%d addr:0x%x\n",
+ gi2c->cur->flags, gi2c->cur->addr);
+ gi2c->err = -ETIMEDOUT;
+ }
+geni_i2c_err_prep_sg:
+ if (gi2c->err) {
+ dmaengine_terminate_all(gi2c->tx_c);
+ gi2c->cfg_sent = 0;
+ }
+ if (msgs[i].flags & I2C_M_RD)
+ dma_unmap_single(rx_dev, gi2c->rx_ph, msgs[i].len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_single(tx_dev, gi2c->tx_ph, msgs[i].len, DMA_TO_DEVICE);
+ i2c_put_dma_safe_msg_buf(dma_buf, &msgs[i], !gi2c->err);
+ if (gi2c->err)
+ goto geni_i2c_gsi_xfer_out;
+ }
+
+geni_i2c_gsi_xfer_out:
+ if (!ret && gi2c->err)
+ ret = gi2c->err;
+ return ret;
+}
+
static int geni_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg msgs[],
int num)
@@ -475,6 +673,15 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
}
qcom_geni_i2c_conf(gi2c);
+
+ if (gi2c->se_mode == GSI_ONLY) {
+ ret = geni_i2c_gsi_xfer(adap, msgs, num);
+ goto geni_i2c_txn_ret;
+ } else {
+ /* Don't set shared flag in non-GSI mode */
+ gi2c->is_shared = false;
+ }
+
for (i = 0; i < num; i++) {
u32 m_param = i < (num - 1) ? STOP_STRETCH : 0;
@@ -489,6 +696,7 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
if (ret)
break;
}
+geni_i2c_txn_ret:
if (ret == 0)
ret = num;
@@ -655,7 +863,8 @@ static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
int ret;
struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
- disable_irq(gi2c->irq);
+ if (gi2c->se_mode == FIFO_SE_DMA)
+ disable_irq(gi2c->irq);
ret = geni_se_resources_off(&gi2c->se);
if (ret) {
enable_irq(gi2c->irq);
@@ -680,8 +889,41 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
ret = geni_se_resources_on(&gi2c->se);
if (ret)
return ret;
+ if (gi2c->se_mode == UNINITIALIZED) {
+ int proto = geni_se_read_proto(&gi2c->se);
+ u32 se_mode;
+
+ if (unlikely(proto != GENI_SE_I2C)) {
+ dev_err(gi2c->se.dev, "Invalid proto %d\n", proto);
+ geni_se_resources_off(&gi2c->se);
+ return -ENXIO;
+ }
+
+ se_mode = readl_relaxed(gi2c->se.base + GENI_IF_DISABLE_RO) &
+ FIFO_IF_DISABLE;
+ if (se_mode) {
+ gi2c->se_mode = GSI_ONLY;
+ geni_se_select_mode(&gi2c->se, GENI_GPI_DMA);
+ dev_dbg(gi2c->se.dev, "i2c GSI mode\n");
+ } else {
+ int gi2c_tx_depth = geni_se_get_tx_fifo_depth(&gi2c->se);
+
+ gi2c->se_mode = FIFO_SE_DMA;
+ gi2c->tx_wm = gi2c_tx_depth - 1;
+ geni_se_init(&gi2c->se, gi2c->tx_wm, gi2c_tx_depth);
+ geni_se_config_packing(&gi2c->se, BITS_PER_BYTE,
+ PACKING_BYTES_PW, true, true, true);
+ qcom_geni_i2c_conf(gi2c);
+ dev_dbg(gi2c->se.dev,
+ "i2c fifo/se-dma mode. fifo depth:%d\n", gi2c_tx_depth);
+ }
+ dev_dbg(gi2c->se.dev, "i2c-%d: %s\n",
+ gi2c->adap.nr, dev_name(gi2c->se.dev));
+ }
+
+ if (gi2c->se_mode == FIFO_SE_DMA)
+ enable_irq(gi2c->irq);
- enable_irq(gi2c->irq);
gi2c->suspended = 0;
return 0;
}
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index f42954e2c98e..db44dc32e049 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -79,21 +79,6 @@
*/
#define MAX_CLK_PERF_LEVEL 32
-#define NUM_AHB_CLKS 2
-
-/**
- * struct geni_wrapper - Data structure to represent the QUP Wrapper Core
- * @dev: Device pointer of the QUP wrapper core
- * @base: Base address of this instance of QUP wrapper core
- * @ahb_clks: Handle to the primary & secondary AHB clocks
- * @to_core: Core ICC path
- */
-struct geni_wrapper {
- struct device *dev;
- void __iomem *base;
- struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
- struct geni_icc_path to_core;
-};
static const char * const icc_path_names[] = {"qup-core", "qup-config",
"qup-memory"};
@@ -108,7 +93,6 @@ static struct geni_wrapper *earlycon_wrapper;
#define GENI_OUTPUT_CTRL 0x24
#define GENI_CGC_CTRL 0x28
#define GENI_CLK_CTRL_RO 0x60
-#define GENI_IF_DISABLE_RO 0x64
#define GENI_FW_S_REVISION_RO 0x6c
#define SE_GENI_BYTE_GRAN 0x254
#define SE_GENI_TX_PACKING_CFG0 0x260
@@ -326,6 +310,39 @@ static void geni_se_select_dma_mode(struct geni_se *se)
writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
}
+static int geni_se_select_gpi_mode(struct geni_se *se)
+{
+ unsigned int geni_dma_mode = 0;
+ unsigned int gpi_event_en = 0;
+ unsigned int common_geni_m_irq_en = 0;
+ unsigned int common_geni_s_irq_en = 0;
+
+ common_geni_m_irq_en = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+ common_geni_s_irq_en = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
+ common_geni_m_irq_en &=
+ ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN |
+ M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ common_geni_s_irq_en &= ~S_CMD_DONE_EN;
+ geni_dma_mode = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
+ gpi_event_en = readl_relaxed(se->base + SE_GSI_EVENT_EN);
+
+ geni_dma_mode |= GENI_DMA_MODE_EN;
+ gpi_event_en |= (DMA_RX_EVENT_EN | DMA_TX_EVENT_EN |
+ GENI_M_EVENT_EN | GENI_S_EVENT_EN);
+
+ writel_relaxed(0, se->base + SE_IRQ_EN);
+ writel_relaxed(common_geni_s_irq_en, se->base + SE_GENI_S_IRQ_EN);
+ writel_relaxed(common_geni_m_irq_en, se->base + SE_GENI_M_IRQ_EN);
+ writel_relaxed(0xFFFFFFFF, se->base + SE_GENI_M_IRQ_CLEAR);
+ writel_relaxed(0xFFFFFFFF, se->base + SE_GENI_S_IRQ_CLEAR);
+ writel_relaxed(0xFFFFFFFF, se->base + SE_DMA_TX_IRQ_CLR);
+ writel_relaxed(0xFFFFFFFF, se->base + SE_DMA_RX_IRQ_CLR);
+ writel_relaxed(geni_dma_mode, se->base + SE_GENI_DMA_MODE_EN);
+ writel_relaxed(gpi_event_en, se->base + SE_GSI_EVENT_EN);
+
+ return 0;
+}
+
/**
* geni_se_select_mode() - Select the serial engine transfer mode
* @se: Pointer to the concerned serial engine.
@@ -333,7 +350,8 @@ static void geni_se_select_dma_mode(struct geni_se *se)
*/
void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode)
{
- WARN_ON(mode != GENI_SE_FIFO && mode != GENI_SE_DMA);
+ WARN_ON(mode != GENI_SE_FIFO && mode != GENI_SE_DMA &&
+ mode != GENI_GPI_DMA);
switch (mode) {
case GENI_SE_FIFO:
@@ -342,6 +360,9 @@ void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode)
case GENI_SE_DMA:
geni_se_select_dma_mode(se);
break;
+ case GENI_GPI_DMA:
+ geni_se_select_gpi_mode(se);
+ break;
case GENI_SE_INVALID:
default:
break;
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 881f645661cc..660cfaaf3d01 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -2,6 +2,8 @@
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/log2.h>
@@ -10,6 +12,7 @@
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
+#include <linux/dma/qcom-gpi-dma.h>
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
@@ -63,6 +66,35 @@
#define TIMESTAMP_AFTER BIT(3)
#define POST_CMD_DELAY BIT(4)
+#define GSI_LOOPBACK_EN (BIT(0))
+#define GSI_CS_TOGGLE (BIT(3))
+#define GSI_CPHA (BIT(4))
+#define GSI_CPOL (BIT(5))
+
+#define MAX_TX_SG (3)
+#define NUM_SPI_XFER (8)
+#define SPI_XFER_TIMEOUT_MS (250)
+
+struct gsi_desc_cb {
+ struct spi_geni_master *mas;
+ struct spi_transfer *xfer;
+};
+
+struct spi_geni_gsi {
+ dma_cookie_t tx_cookie;
+ dma_cookie_t rx_cookie;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct dma_async_tx_descriptor *rx_desc;
+ struct gsi_desc_cb desc_cb;
+};
+
+enum spi_m_cmd_opcode {
+ CMD_NONE,
+ CMD_XFER,
+ CMD_CS,
+ CMD_CANCEL,
+};
+
struct spi_geni_master {
struct geni_se se;
struct device *dev;
@@ -79,11 +111,22 @@ struct spi_geni_master {
struct completion cs_done;
struct completion cancel_done;
struct completion abort_done;
+ struct completion xfer_done;
unsigned int oversampling;
spinlock_t lock;
int irq;
bool cs_flag;
bool abort_failed;
+ struct spi_geni_gsi *gsi;
+ struct dma_chan *tx;
+ struct dma_chan *rx;
+ struct completion tx_cb;
+ struct completion rx_cb;
+ bool qn_err;
+ int cur_xfer_mode;
+ int num_tx_eot;
+ int num_rx_eot;
+ int num_xfers;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -330,11 +373,239 @@ static int setup_fifo_params(struct spi_device *spi_slv,
return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz);
}
+static int get_xfer_mode(struct spi_master *spi)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct geni_se *se = &mas->se;
+ int mode = GENI_SE_FIFO;
+ int fifo_disable;
+ bool dma_chan_valid;
+
+ fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
+ dma_chan_valid = !(IS_ERR_OR_NULL(mas->tx) || IS_ERR_OR_NULL(mas->rx));
+
+ /*
+ * If FIFO Interface is disabled and there are no DMA channels then we
+ * can't do this transfer.
+ * If FIFO interface is disabled, we can do GSI only,
+ * else pick FIFO mode.
+ */
+ if (fifo_disable && !dma_chan_valid) {
+ dev_err(mas->dev, "Fifo and dma mode disabled!! can't xfer\n");
+ mode = -EINVAL;
+ } else if (fifo_disable) {
+ mode = GENI_GPI_DMA;
+ } else {
+ mode = GENI_SE_FIFO;
+ }
+
+ return mode;
+}
+
+static void
+spi_gsi_callback_result(void *cb, const struct dmaengine_result *result, bool tx)
+{
+ struct gsi_desc_cb *gsi = cb;
+
+ if (result->result != DMA_TRANS_NOERROR) {
+ dev_err(gsi->mas->dev, "%s: DMA %s txn failed\n", __func__, tx ? "tx" : "rx");
+ return;
+ }
+
+ if (!result->residue) {
+ dev_dbg(gsi->mas->dev, "%s\n", __func__);
+ if (tx)
+ complete(&gsi->mas->tx_cb);
+ else
+ complete(&gsi->mas->rx_cb);
+ } else {
+ dev_err(gsi->mas->dev, "DMA xfer has pending: %d\n", result->residue);
+ }
+}
+
+static void
+spi_gsi_rx_callback_result(void *cb, const struct dmaengine_result *result)
+{
+ spi_gsi_callback_result(cb, result, false);
+}
+
+static void
+spi_gsi_tx_callback_result(void *cb, const struct dmaengine_result *result)
+{
+ spi_gsi_callback_result(cb, result, true);
+}
+
+static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas,
+ struct spi_device *spi_slv, struct spi_master *spi)
+{
+ int ret = 0;
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ struct spi_geni_gsi *gsi;
+ struct dma_slave_config config;
+ struct gpi_spi_config peripheral;
+
+ memset(&config, 0, sizeof(config));
+ memset(&peripheral, 0, sizeof(peripheral));
+ config.peripheral_config = &peripheral;
+ config.peripheral_size = sizeof(peripheral);
+
+ if (xfer->bits_per_word != mas->cur_bits_per_word ||
+ xfer->speed_hz != mas->cur_speed_hz) {
+ mas->cur_bits_per_word = xfer->bits_per_word;
+ mas->cur_speed_hz = xfer->speed_hz;
+ peripheral.set_config = true;
+ }
+
+ if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) {
+ peripheral.rx_len = ((xfer->len << 3) / mas->cur_bits_per_word);
+ } else {
+ int bytes_per_word = (mas->cur_bits_per_word / BITS_PER_BYTE) + 1;
+
+ peripheral.rx_len = (xfer->len / bytes_per_word);
+ }
+
+ if (xfer->tx_buf && xfer->rx_buf) {
+ peripheral.cmd = SPI_DUPLEX;
+ } else if (xfer->tx_buf) {
+ peripheral.cmd = SPI_TX;
+ peripheral.rx_len = 0;
+ } else if (xfer->rx_buf) {
+ peripheral.cmd = SPI_RX;
+ }
+
+ peripheral.cs = spi_slv->chip_select;
+
+ if (spi_slv->mode & SPI_LOOP)
+ peripheral.loopback_en = true;
+ if (spi_slv->mode & SPI_CPOL)
+ peripheral.clock_pol_high = true;
+ if (spi_slv->mode & SPI_CPHA)
+ peripheral.data_pol_high = true;
+ peripheral.pack_en = true;
+ peripheral.word_len = xfer->bits_per_word - MIN_WORD_LEN;
+ ret = get_spi_clk_cfg(mas->cur_speed_hz, mas,
+ &peripheral.clk_src, &peripheral.clk_div);
+ if (ret) {
+ dev_err(mas->dev, "%s:Err setting clks:%d\n", __func__, ret);
+ return ret;
+ }
+
+ if (!xfer->cs_change) {
+ if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
+ peripheral.fragmentation = FRAGMENTATION;
+ }
+
+ gsi = &mas->gsi[mas->num_xfers];
+ gsi->desc_cb.mas = mas;
+ gsi->desc_cb.xfer = xfer;
+ if (peripheral.cmd & SPI_RX) {
+ dmaengine_slave_config(mas->rx, &config);
+ gsi->rx_desc = dmaengine_prep_slave_single(mas->rx, xfer->rx_dma,
+ xfer->len, DMA_DEV_TO_MEM, flags);
+ if (IS_ERR_OR_NULL(gsi->rx_desc)) {
+ dev_err(mas->dev, "Err setting up rx desc\n");
+ return -EIO;
+ }
+ gsi->rx_desc->callback_result = spi_gsi_rx_callback_result;
+ gsi->rx_desc->callback_param = &gsi->desc_cb;
+ mas->num_rx_eot++;
+ }
+
+ if (peripheral.cmd & SPI_TX_ONLY)
+ mas->num_tx_eot++;
+
+ dmaengine_slave_config(mas->tx, &config);
+ gsi->tx_desc = dmaengine_prep_slave_single(mas->tx, xfer->tx_dma,
+ xfer->len, DMA_MEM_TO_DEV, flags);
+
+ if (IS_ERR_OR_NULL(gsi->tx_desc)) {
+ dev_err(mas->dev, "Err setting up tx desc\n");
+ return -EIO;
+ }
+ gsi->tx_desc->callback_result = spi_gsi_tx_callback_result;
+ gsi->tx_desc->callback_param = &gsi->desc_cb;
+ if (peripheral.cmd & SPI_RX)
+ gsi->rx_cookie = dmaengine_submit(gsi->rx_desc);
+ gsi->tx_cookie = dmaengine_submit(gsi->tx_desc);
+ if (peripheral.cmd & SPI_RX)
+ dma_async_issue_pending(mas->rx);
+ dma_async_issue_pending(mas->tx);
+ mas->num_xfers++;
+ return ret;
+}
+
+static int spi_geni_map_buf(struct spi_geni_master *mas, struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ struct device *gsi_dev = mas->dev->parent;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (xfer->rx_buf) {
+ xfer->rx_dma = dma_map_single(gsi_dev, xfer->rx_buf,
+ xfer->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(mas->dev, xfer->rx_dma)) {
+ dev_err(mas->dev, "Err mapping buf\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (xfer->tx_buf) {
+ xfer->tx_dma = dma_map_single(gsi_dev, (void *)xfer->tx_buf,
+ xfer->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(gsi_dev, xfer->tx_dma)) {
+ dev_err(mas->dev, "Err mapping buf\n");
+ dma_unmap_single(gsi_dev, xfer->rx_dma, xfer->len, DMA_FROM_DEVICE);
+ return -ENOMEM;
+ }
+ }
+ };
+
+ return 0;
+}
+
+static void spi_geni_unmap_buf(struct spi_geni_master *mas, struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ struct device *gsi_dev = mas->dev;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (xfer->rx_buf)
+ dma_unmap_single(gsi_dev, xfer->rx_dma, xfer->len, DMA_FROM_DEVICE);
+ if (xfer->tx_buf)
+ dma_unmap_single(gsi_dev, xfer->tx_dma, xfer->len, DMA_TO_DEVICE);
+ };
+}
+
static int spi_geni_prepare_message(struct spi_master *spi,
struct spi_message *spi_msg)
{
int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct geni_se *se = &mas->se;
+
+ mas->cur_xfer_mode = get_xfer_mode(spi);
+
+ if (mas->cur_xfer_mode == GENI_SE_FIFO) {
+ geni_se_select_mode(se, GENI_SE_FIFO);
+ reinit_completion(&mas->xfer_done);
+ ret = setup_fifo_params(spi_msg->spi, spi);
+ if (ret)
+ dev_err(mas->dev, "Couldn't select mode %d\n", ret);
+
+ } else if (mas->cur_xfer_mode == GENI_GPI_DMA) {
+ mas->num_tx_eot = 0;
+ mas->num_rx_eot = 0;
+ mas->num_xfers = 0;
+ reinit_completion(&mas->tx_cb);
+ reinit_completion(&mas->rx_cb);
+ memset(mas->gsi, 0, (sizeof(struct spi_geni_gsi) * NUM_SPI_XFER));
+ geni_se_select_mode(se, GENI_GPI_DMA);
+ ret = spi_geni_map_buf(mas, spi_msg);
+
+ } else {
+ dev_err(mas->dev, "%s: Couldn't select mode %d", __func__, mas->cur_xfer_mode);
+ ret = -EINVAL;
+ }
if (spi_geni_is_abort_still_pending(mas))
return -EBUSY;
@@ -345,19 +616,32 @@ static int spi_geni_prepare_message(struct spi_master *spi,
return ret;
}
+static int spi_geni_unprepare_message(struct spi_master *spi_mas, struct spi_message *spi_msg)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
+
+ mas->cur_speed_hz = 0;
+ mas->cur_bits_per_word = 0;
+ if (mas->cur_xfer_mode == GENI_GPI_DMA)
+ spi_geni_unmap_buf(mas, spi_msg);
+ return 0;
+}
+
static int spi_geni_init(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
unsigned int proto, major, minor, ver;
u32 spi_tx_cfg;
+ size_t gsi_sz;
+ int ret = 0;
pm_runtime_get_sync(mas->dev);
proto = geni_se_read_proto(se);
if (proto != GENI_SE_SPI) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
- pm_runtime_put(mas->dev);
- return -ENXIO;
+ ret = -ENXIO;
+ goto out_pm;
}
mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
@@ -387,8 +671,34 @@ static int spi_geni_init(struct spi_geni_master *mas)
spi_tx_cfg &= ~CS_TOGGLE;
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
+ mas->tx = dma_request_slave_channel(mas->dev, "tx");
+ if (IS_ERR_OR_NULL(mas->tx)) {
+ dev_err(mas->dev, "Failed to get tx DMA ch %ld", PTR_ERR(mas->tx));
+ ret = PTR_ERR(mas->tx);
+ goto out_pm;
+ } else {
+ mas->rx = dma_request_slave_channel(mas->dev, "rx");
+ if (IS_ERR_OR_NULL(mas->rx)) {
+ dev_err(mas->dev, "Failed to get rx DMA ch %ld", PTR_ERR(mas->rx));
+ dma_release_channel(mas->tx);
+ ret = PTR_ERR(mas->rx);
+ goto out_pm;
+ }
+
+ gsi_sz = sizeof(struct spi_geni_gsi) * NUM_SPI_XFER;
+ mas->gsi = devm_kzalloc(mas->dev, gsi_sz, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(mas->gsi)) {
+ dma_release_channel(mas->tx);
+ dma_release_channel(mas->rx);
+ mas->tx = NULL;
+ mas->rx = NULL;
+ goto out_pm;
+ }
+ }
+
+out_pm:
pm_runtime_put(mas->dev);
- return 0;
+ return ret;
}
static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
@@ -493,6 +803,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
{
u32 m_cmd = 0;
u32 len;
+ u32 m_param = 0;
struct geni_se *se = &mas->se;
int ret;
@@ -530,6 +841,11 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
len &= TRANS_LEN_MSK;
+ if (!xfer->cs_change) {
+ if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
+ m_param |= FRAGMENTATION;
+ }
+
mas->cur_xfer = xfer;
if (xfer->tx_buf) {
m_cmd |= SPI_TX_ONLY;
@@ -548,7 +864,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
* interrupt could come in at any time now.
*/
spin_lock_irq(&mas->lock);
- geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
+ geni_se_setup_m_cmd(se, m_cmd, m_param);
/*
* TX_WATERMARK_REG should be set after SPI configuration and
@@ -567,16 +883,55 @@ static int spi_geni_transfer_one(struct spi_master *spi,
struct spi_transfer *xfer)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ unsigned long timeout, jiffies;
+ int ret = 0i, i;
if (spi_geni_is_abort_still_pending(mas))
return -EBUSY;
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
- return 0;
+ return ret;
+
+ if (mas->cur_xfer_mode == GENI_SE_FIFO) {
+ setup_fifo_xfer(xfer, mas, slv->mode, spi);
+ } else {
+ setup_gsi_xfer(xfer, mas, slv, spi);
+ if (mas->num_xfers >= NUM_SPI_XFER ||
+ (list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))) {
+ for (i = 0 ; i < mas->num_tx_eot; i++) {
+ jiffies = msecs_to_jiffies(SPI_XFER_TIMEOUT_MS);
+ timeout = wait_for_completion_timeout(&mas->tx_cb, jiffies);
+ if (timeout <= 0) {
+ dev_err(mas->dev, "Tx[%d] timeout%lu\n", i, timeout);
+ ret = -ETIMEDOUT;
+ goto err_gsi_geni_transfer_one;
+ }
+ spi_finalize_current_transfer(spi);
+ }
+ for (i = 0 ; i < mas->num_rx_eot; i++) {
+ jiffies = msecs_to_jiffies(SPI_XFER_TIMEOUT_MS);
+ timeout = wait_for_completion_timeout(&mas->tx_cb, jiffies);
+ if (timeout <= 0) {
+ dev_err(mas->dev, "Rx[%d] timeout%lu\n", i, timeout);
+ ret = -ETIMEDOUT;
+ goto err_gsi_geni_transfer_one;
+ }
+ spi_finalize_current_transfer(spi);
+ }
+ if (mas->qn_err) {
+ ret = -EIO;
+ mas->qn_err = false;
+ goto err_gsi_geni_transfer_one;
+ }
+ }
+ }
- setup_fifo_xfer(xfer, mas, slv->mode, spi);
- return 1;
+ return ret;
+
+err_gsi_geni_transfer_one:
+ dmaengine_terminate_all(mas->tx);
+ return ret;
}
static irqreturn_t geni_spi_isr(int irq, void *data)
@@ -671,6 +1026,15 @@ static int spi_geni_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "could not set DMA mask\n");
+ return ret;
+ }
+ }
+
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -708,15 +1072,18 @@ static int spi_geni_probe(struct platform_device *pdev)
spi->num_chipselect = 4;
spi->max_speed_hz = 50000000;
spi->prepare_message = spi_geni_prepare_message;
+ spi->unprepare_message = spi_geni_unprepare_message;
spi->transfer_one = spi_geni_transfer_one;
spi->auto_runtime_pm = true;
spi->handle_err = handle_fifo_timeout;
- spi->set_cs = spi_geni_set_cs;
spi->use_gpio_descriptors = true;
init_completion(&mas->cs_done);
init_completion(&mas->cancel_done);
init_completion(&mas->abort_done);
+ init_completion(&mas->xfer_done);
+ init_completion(&mas->tx_cb);
+ init_completion(&mas->rx_cb);
spin_lock_init(&mas->lock);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
@@ -737,6 +1104,15 @@ static int spi_geni_probe(struct platform_device *pdev)
if (ret)
goto spi_geni_probe_runtime_disable;
+ /*
+ * query the mode supported and set_cs for fifo mode only
+ * for dma (gsi) mode, the gsi will set cs based on params passed in
+ * TRE
+ */
+ mas->cur_xfer_mode = get_xfer_mode(spi);
+ if (mas->cur_xfer_mode == GENI_SE_FIFO)
+ spi->set_cs = spi_geni_set_cs;
+
ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
if (ret)
goto spi_geni_probe_runtime_disable;
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index ec2ad4b0fe14..12003a6cb133 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -12,6 +12,7 @@
enum geni_se_xfer_mode {
GENI_SE_INVALID,
GENI_SE_FIFO,
+ GENI_GPI_DMA,
GENI_SE_DMA,
};
@@ -38,6 +39,21 @@ struct geni_icc_path {
unsigned int avg_bw;
};
+#define NUM_AHB_CLKS 2
+
+/**
+ * @struct geni_wrapper - Data structure to represent the QUP Wrapper Core
+ * @dev: Device pointer of the QUP wrapper core
+ * @base: Base address of this instance of QUP wrapper core
+ * @ahb_clks: Handle to the primary & secondary AHB clocks
+ */
+struct geni_wrapper {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
+ struct geni_icc_path to_core;
+};
+
/**
* struct geni_se - GENI Serial Engine
* @base: Base Address of the Serial Engine's register block
@@ -65,6 +81,7 @@ struct geni_se {
#define SE_GENI_STATUS 0x40
#define GENI_SER_M_CLK_CFG 0x48
#define GENI_SER_S_CLK_CFG 0x4c
+#define GENI_IF_DISABLE_RO 0x64
#define GENI_FW_REVISION_RO 0x68
#define SE_GENI_CLK_SEL 0x7c
#define SE_GENI_DMA_MODE_EN 0x258
@@ -107,6 +124,9 @@ struct geni_se {
#define CLK_DIV_MSK GENMASK(15, 4)
#define CLK_DIV_SHFT 4
+/* GENI_IF_DISABLE_RO fields */
+#define FIFO_IF_DISABLE (BIT(0))
+
/* GENI_FW_REVISION_RO fields */
#define FW_REV_PROTOCOL_MSK GENMASK(15, 8)
#define FW_REV_PROTOCOL_SHFT 8