aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>2020-05-13 11:49:04 +0530
committerManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>2020-05-13 13:45:28 +0530
commitc2b94b7810a1f683dd9968582827bdcb3ff1fb7d (patch)
tree217f1e46ac945f29e5cbc4906d5df8aa370af887
parent3f5e09bda0bcd72c694a8180ef8b098ac7727a95 (diff)
[TEMP]: Add DMA support to GENI SPI driversdm845-spi-dma
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi24
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c1
-rw-r--r--drivers/spi/spi-geni-qcom.c600
-rw-r--r--include/linux/qcom-geni-se.h10
4 files changed, 617 insertions, 18 deletions
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 11e5ff0f47c7..58fbef65fe2b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -804,15 +804,24 @@
clock-names = "core";
};
- gpi_dma0: qcom,gpi-dma@0x800000 {
+ gpi_dma0: dma@800000 {
#dma-cells = <5>;
compatible = "qcom,gpi-dma";
reg = <0 0x00800000 0 0x60000>;
reg-names = "gpi-top";
- interrupts = <0 244 0>, <0 245 0>, <0 246 0>, <0 247 0>,
- <0 248 0>, <0 249 0>, <0 250 0>, <0 251 0>,
- <0 252 0>, <0 253 0>, <0 254 0>, <0 255 0>,
- <0 256 0>;
+ interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
qcom,max-num-gpii = <13>;
qcom,gpii-mask = <0xfa>;
qcom,ev-factor = <2>;
@@ -827,6 +836,7 @@
clock-names = "m-ahb", "s-ahb";
clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
<&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ iommus = <&apps_smmu 0x003 0>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -855,6 +865,9 @@
interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
+ dmas = <&gpi_dma0 0 0 1 64 0>,
+ <&gpi_dma0 1 0 1 64 0>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -1135,6 +1148,7 @@
clock-names = "m-ahb", "s-ahb";
clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
<&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ iommus = <&apps_smmu 0x6c3 0x0>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 5e59268712be..79bcca1a5d1a 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -100,7 +100,6 @@ struct geni_wrapper {
#define GENI_OUTPUT_CTRL 0x24
#define GENI_CGC_CTRL 0x28
#define GENI_CLK_CTRL_RO 0x60
-#define GENI_IF_DISABLE_RO 0x64
#define GENI_FW_S_REVISION_RO 0x6c
#define SE_GENI_BYTE_GRAN 0x254
#define SE_GENI_TX_PACKING_CFG0 0x260
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index c3972424af71..698fb8e7895c 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -2,10 +2,13 @@
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/module.h>
+#include <linux/msm_gpi.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
@@ -63,6 +66,36 @@
#define TIMESTAMP_AFTER BIT(3)
#define POST_CMD_DELAY BIT(4)
+#define GSI_LOOPBACK_EN (BIT(0))
+#define GSI_CS_TOGGLE (BIT(3))
+#define GSI_CPHA (BIT(4))
+#define GSI_CPOL (BIT(5))
+
+#define MAX_TX_SG (3)
+#define NUM_SPI_XFER (8)
+#define SPI_XFER_TIMEOUT_MS (250)
+
+struct gsi_desc_cb {
+ struct spi_master *spi;
+ struct spi_transfer *xfer;
+};
+
+struct spi_geni_gsi {
+ struct msm_gpi_tre config0_tre;
+ struct msm_gpi_tre go_tre;
+ struct msm_gpi_tre tx_dma_tre;
+ struct msm_gpi_tre rx_dma_tre;
+ struct scatterlist tx_sg[MAX_TX_SG];
+ struct scatterlist rx_sg;
+ dma_cookie_t tx_cookie;
+ dma_cookie_t rx_cookie;
+ struct msm_gpi_dma_async_tx_cb_param tx_cb_param;
+ struct msm_gpi_dma_async_tx_cb_param rx_cb_param;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct dma_async_tx_descriptor *rx_desc;
+ struct gsi_desc_cb desc_cb;
+};
+
enum spi_m_cmd_opcode {
CMD_NONE,
CMD_XFER,
@@ -77,7 +110,7 @@ struct spi_geni_master {
u32 fifo_width_bits;
u32 tx_wm;
unsigned long cur_speed_hz;
- unsigned int cur_bits_per_word;
+ int cur_bits_per_word;
unsigned int tx_rem_bytes;
unsigned int rx_rem_bytes;
const struct spi_transfer *cur_xfer;
@@ -86,6 +119,18 @@ struct spi_geni_master {
spinlock_t lock;
enum spi_m_cmd_opcode cur_mcmd;
int irq;
+ struct spi_geni_gsi *gsi;
+ struct dma_chan *tx;
+ struct dma_chan *rx;
+ struct msm_gpi_ctrl tx_event;
+ struct msm_gpi_ctrl rx_event;
+ struct completion tx_cb;
+ struct completion rx_cb;
+ bool qn_err;
+ int cur_xfer_mode;
+ int num_tx_eot;
+ int num_rx_eot;
+ int num_xfers;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -247,6 +292,383 @@ static int setup_fifo_params(struct spi_device *spi_slv,
return 0;
}
+static int select_xfer_mode(struct spi_master *spi,
+ struct spi_message *spi_msg)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct geni_se *se = &mas->se;
+ int mode = GENI_SE_FIFO;
+ int fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) &
+ FIFO_IF_DISABLE;
+ bool dma_chan_valid =
+ !(IS_ERR_OR_NULL(mas->tx) || IS_ERR_OR_NULL(mas->rx));
+
+ /*
+ * If FIFO Interface is disabled and there are no DMA channels then we
+ * can't do this transfer.
+ * If FIFO interface is disabled, we can do GSI only,
+ * else pick FIFO mode.
+ */
+ if (fifo_disable && !dma_chan_valid)
+ mode = -EINVAL;
+ else if (fifo_disable)
+ mode = GENI_GPI_DMA;
+ else
+ mode = GENI_SE_FIFO;
+ return mode;
+}
+
+static struct msm_gpi_tre *setup_config0_tre(struct spi_transfer *xfer,
+ struct spi_geni_master *mas, u16 mode)
+{
+ struct msm_gpi_tre *c0_tre = &mas->gsi[mas->num_xfers].config0_tre;
+ u8 flags = 0;
+ u8 word_len = 0;
+ u8 pack = 0;
+ int div = 0;
+ int idx = 0;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(c0_tre))
+ return c0_tre;
+
+ if (mode & SPI_LOOP)
+ flags |= GSI_LOOPBACK_EN;
+
+ if (mode & SPI_CPOL)
+ flags |= GSI_CPOL;
+
+ if (mode & SPI_CPHA)
+ flags |= GSI_CPHA;
+
+ word_len = xfer->bits_per_word - MIN_WORD_LEN;
+ pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN);
+ ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
+ if (ret) {
+ dev_err(mas->dev, "%s:Err setting clks:%d\n", __func__, ret);
+ return ERR_PTR(ret);
+ }
+ c0_tre->dword[0] = MSM_GPI_SPI_CONFIG0_TRE_DWORD0(pack, flags,
+ word_len);
+ c0_tre->dword[1] = MSM_GPI_SPI_CONFIG0_TRE_DWORD1(0, 0, 0);
+ c0_tre->dword[2] = MSM_GPI_SPI_CONFIG0_TRE_DWORD2(idx, div);
+ c0_tre->dword[3] = MSM_GPI_SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 0, 1);
+ dev_dbg(mas->dev, "%s: flags 0x%x word %d pack %d idx %d div %d\n",
+ __func__, flags, word_len, pack, idx, div);
+
+ return c0_tre;
+}
+
+static struct msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags,
+ struct spi_geni_master *mas)
+{
+ struct msm_gpi_tre *go_tre = &mas->gsi[mas->num_xfers].go_tre;
+ int chain;
+ int eot;
+ int eob;
+ int link_rx = 0;
+
+ if (IS_ERR_OR_NULL(go_tre))
+ return go_tre;
+
+ go_tre->dword[0] = MSM_GPI_SPI_GO_TRE_DWORD0(flags, cs, cmd);
+ go_tre->dword[1] = MSM_GPI_SPI_GO_TRE_DWORD1;
+ go_tre->dword[2] = MSM_GPI_SPI_GO_TRE_DWORD2(rx_len);
+ if (cmd == SPI_RX_ONLY) {
+ eot = 0;
+ chain = 0;
+ eob = 1;
+ } else {
+ eot = 0;
+ chain = 1;
+ eob = 0;
+ }
+ go_tre->dword[3] = MSM_GPI_SPI_GO_TRE_DWORD3(link_rx, 0, eot, eob,
+ chain);
+ dev_dbg(mas->dev,
+ "%s: rx len %d flags 0x%x cs %d cmd %d eot %d eob %d chain %d\n",
+ __func__, rx_len, flags, cs, cmd, eot, eob, chain);
+ return go_tre;
+}
+
+static struct msm_gpi_tre *setup_dma_tre(struct msm_gpi_tre *tre,
+ dma_addr_t buf, u32 len,
+ struct spi_geni_master *mas,
+ bool is_tx)
+{
+ if (IS_ERR_OR_NULL(tre))
+ return tre;
+
+ tre->dword[0] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(buf);
+ tre->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(buf);
+ tre->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(len);
+ tre->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, is_tx, 0, 0);
+ return tre;
+}
+
+static void spi_gsi_ch_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb,
+ void *ptr)
+{
+ struct spi_master *spi = ptr;
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ switch (cb->cb_event) {
+ case MSM_GPI_QUP_NOTIFY:
+ case MSM_GPI_QUP_MAX_EVENT:
+ dev_dbg(mas->dev,
+ "%s:cb_ev%d status%llu ts%llu count%llu\n",
+ __func__, cb->cb_event, cb->status,
+ cb->timestamp, cb->count);
+ break;
+ case MSM_GPI_QUP_ERROR:
+ case MSM_GPI_QUP_CH_ERROR:
+ case MSM_GPI_QUP_FW_ERROR:
+ case MSM_GPI_QUP_PENDING_EVENT:
+ case MSM_GPI_QUP_EOT_DESC_MISMATCH:
+ case MSM_GPI_QUP_SW_ERROR:
+ dev_err(mas->dev,
+ "%s: cb_ev %d status %llu ts %llu count %llu\n",
+ __func__, cb->cb_event, cb->status,
+ cb->timestamp, cb->count);
+ mas->qn_err = true;
+ complete_all(&mas->tx_cb);
+ complete_all(&mas->rx_cb);
+
+ break;
+ };
+}
+
+static void spi_gsi_rx_callback(void *cb)
+{
+ struct msm_gpi_dma_async_tx_cb_param *cb_param =
+ (struct msm_gpi_dma_async_tx_cb_param *)cb;
+ struct gsi_desc_cb *desc_cb = (struct gsi_desc_cb *)cb_param->userdata;
+ struct spi_master *spi = desc_cb->spi;
+ struct spi_transfer *xfer = desc_cb->xfer;
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ if (xfer->rx_buf) {
+ if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
+ dev_err(mas->dev,
+ "%s: Unexpected GSI CB error\n", __func__);
+ return;
+ }
+ if (cb_param->length == xfer->len) {
+ dev_dbg(mas->dev,
+ "%s\n", __func__);
+ complete(&mas->rx_cb);
+ } else {
+ dev_err(mas->dev,
+ "%s: Length mismatch. Expected %d Callback %d\n",
+ __func__, xfer->len, cb_param->length);
+ }
+ }
+}
+
+static void spi_gsi_tx_callback(void *cb)
+{
+ struct msm_gpi_dma_async_tx_cb_param *cb_param = cb;
+ struct gsi_desc_cb *desc_cb = (struct gsi_desc_cb *)cb_param->userdata;
+ struct spi_master *spi = desc_cb->spi;
+ struct spi_transfer *xfer = desc_cb->xfer;
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ if (xfer->tx_buf) {
+ if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
+ dev_err(mas->dev,
+ "%s: Unexpected GSI CB error\n", __func__);
+ return;
+ }
+ if (cb_param->length == xfer->len) {
+ dev_dbg(mas->dev,
+ "%s\n", __func__);
+ complete(&mas->tx_cb);
+ } else {
+ dev_err(mas->dev,
+ "%s: Length mismatch. Expected %d Callback %d\n",
+ __func__, xfer->len, cb_param->length);
+ }
+ }
+}
+
+static int setup_gsi_xfer(struct spi_transfer *xfer,
+ struct spi_geni_master *mas,
+ struct spi_device *spi_slv,
+ struct spi_master *spi)
+{
+ int ret = 0;
+ struct msm_gpi_tre *c0_tre = NULL;
+ struct msm_gpi_tre *go_tre = NULL;
+ struct msm_gpi_tre *tx_tre = NULL;
+ struct msm_gpi_tre *rx_tre = NULL;
+ struct scatterlist *xfer_tx_sg = mas->gsi[mas->num_xfers].tx_sg;
+ struct scatterlist *xfer_rx_sg = &mas->gsi[mas->num_xfers].rx_sg;
+ int rx_nent = 0;
+ int tx_nent = 0;
+ u8 cmd = 0;
+ u8 cs = 0;
+ u32 rx_len = 0;
+ int go_flags = 0;
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+ if ((xfer->bits_per_word != mas->cur_bits_per_word) ||
+ (xfer->speed_hz != mas->cur_speed_hz)) {
+ mas->cur_bits_per_word = xfer->bits_per_word;
+ mas->cur_speed_hz = xfer->speed_hz;
+ tx_nent++;
+ c0_tre = setup_config0_tre(xfer, mas, spi_slv->mode);
+ if (IS_ERR_OR_NULL(c0_tre)) {
+ dev_err(mas->dev, "%s:Err setting c0tre:%d\n",
+ __func__, ret);
+ return PTR_ERR(c0_tre);
+ }
+ }
+
+ if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) {
+ rx_len = ((xfer->len << 3) / mas->cur_bits_per_word);
+ } else {
+ int bytes_per_word = (mas->cur_bits_per_word / BITS_PER_BYTE) + 1;
+
+ rx_len = (xfer->len / bytes_per_word);
+ }
+
+ if (xfer->tx_buf && xfer->rx_buf) {
+ cmd = SPI_FULL_DUPLEX;
+ tx_nent += 2;
+ rx_nent++;
+ } else if (xfer->tx_buf) {
+ cmd = SPI_TX_ONLY;
+ tx_nent += 2;
+ rx_len = 0;
+ } else if (xfer->rx_buf) {
+ cmd = SPI_RX_ONLY;
+ tx_nent++;
+ rx_nent++;
+ }
+
+ cs |= spi_slv->chip_select;
+ if (!xfer->cs_change) {
+ if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
+ go_flags |= FRAGMENTATION;
+ }
+ go_tre = setup_go_tre(cmd, cs, rx_len, go_flags, mas);
+
+ sg_init_table(xfer_tx_sg, tx_nent);
+ if (rx_nent)
+ sg_init_table(xfer_rx_sg, rx_nent);
+
+ if (c0_tre)
+ sg_set_buf(xfer_tx_sg++, c0_tre, sizeof(*c0_tre));
+
+ sg_set_buf(xfer_tx_sg++, go_tre, sizeof(*go_tre));
+ mas->gsi[mas->num_xfers].desc_cb.spi = spi;
+ mas->gsi[mas->num_xfers].desc_cb.xfer = xfer;
+ if (cmd & SPI_RX_ONLY) {
+ rx_tre = &mas->gsi[mas->num_xfers].rx_dma_tre;
+ rx_tre = setup_dma_tre(rx_tre, xfer->rx_dma, xfer->len, mas, 0);
+ if (IS_ERR_OR_NULL(rx_tre)) {
+ dev_err(mas->dev, "Err setting up rx tre\n");
+ return PTR_ERR(rx_tre);
+ }
+ sg_set_buf(xfer_rx_sg, rx_tre, sizeof(*rx_tre));
+ mas->gsi[mas->num_xfers].rx_desc =
+ dmaengine_prep_slave_sg(mas->rx,
+ &mas->gsi[mas->num_xfers].rx_sg, rx_nent,
+ DMA_DEV_TO_MEM, flags);
+ if (IS_ERR_OR_NULL(mas->gsi[mas->num_xfers].rx_desc)) {
+ dev_err(mas->dev, "Err setting up rx desc\n");
+ return -EIO;
+ }
+ mas->gsi[mas->num_xfers].rx_desc->callback =
+ spi_gsi_rx_callback;
+ mas->gsi[mas->num_xfers].rx_desc->callback_param =
+ &mas->gsi[mas->num_xfers].rx_cb_param;
+ mas->gsi[mas->num_xfers].rx_cb_param.userdata =
+ &mas->gsi[mas->num_xfers].desc_cb;
+ mas->num_rx_eot++;
+ }
+
+ if (cmd & SPI_TX_ONLY) {
+ tx_tre = &mas->gsi[mas->num_xfers].tx_dma_tre;
+ tx_tre = setup_dma_tre(tx_tre, xfer->tx_dma, xfer->len, mas, 1);
+ if (IS_ERR_OR_NULL(tx_tre)) {
+ dev_err(mas->dev, "Err setting up tx tre\n");
+ return PTR_ERR(tx_tre);
+ }
+ sg_set_buf(xfer_tx_sg++, tx_tre, sizeof(*tx_tre));
+ mas->num_tx_eot++;
+ }
+ mas->gsi[mas->num_xfers].tx_desc = dmaengine_prep_slave_sg(mas->tx,
+ mas->gsi[mas->num_xfers].tx_sg, tx_nent,
+ DMA_MEM_TO_DEV, flags);
+ if (IS_ERR_OR_NULL(mas->gsi[mas->num_xfers].tx_desc)) {
+ dev_err(mas->dev, "Err setting up tx desc\n");
+ return -EIO;
+ }
+ mas->gsi[mas->num_xfers].tx_desc->callback = spi_gsi_tx_callback;
+ mas->gsi[mas->num_xfers].tx_desc->callback_param =
+ &mas->gsi[mas->num_xfers].tx_cb_param;
+ mas->gsi[mas->num_xfers].tx_cb_param.userdata =
+ &mas->gsi[mas->num_xfers].desc_cb;
+ mas->gsi[mas->num_xfers].tx_cookie =
+ dmaengine_submit(mas->gsi[mas->num_xfers].tx_desc);
+ if (cmd & SPI_RX_ONLY)
+ mas->gsi[mas->num_xfers].rx_cookie =
+ dmaengine_submit(mas->gsi[mas->num_xfers].rx_desc);
+ dma_async_issue_pending(mas->tx);
+ if (cmd & SPI_RX_ONLY)
+ dma_async_issue_pending(mas->rx);
+ mas->num_xfers++;
+ return ret;
+}
+
+static int spi_geni_map_buf(struct spi_geni_master *mas,
+ struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ struct device *gsi_dev = mas->dev->parent;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (xfer->rx_buf) {
+ xfer->rx_dma = dma_map_single(gsi_dev, xfer->rx_buf,
+ xfer->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(mas->dev, xfer->rx_dma)) {
+ dev_err(mas->dev, "Err mapping buf\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (xfer->tx_buf) {
+ xfer->tx_dma = dma_map_single(gsi_dev,
+ (void *)xfer->tx_buf, xfer->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(gsi_dev, xfer->tx_dma)) {
+ dev_err(mas->dev, "Err mapping buf\n");
+ dma_unmap_single(gsi_dev, xfer->rx_dma,
+ xfer->len, DMA_FROM_DEVICE);
+ return -ENOMEM;
+ }
+ }
+ };
+
+ return 0;
+}
+
+static void spi_geni_unmap_buf(struct spi_geni_master *mas,
+ struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ struct device *gsi_dev = mas->dev;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (xfer->rx_buf)
+ dma_unmap_single(gsi_dev, xfer->rx_dma,
+ xfer->len, DMA_FROM_DEVICE);
+ if (xfer->tx_buf)
+ dma_unmap_single(gsi_dev, xfer->tx_dma,
+ xfer->len, DMA_TO_DEVICE);
+ };
+}
+
static int spi_geni_prepare_message(struct spi_master *spi,
struct spi_message *spi_msg)
{
@@ -254,25 +676,59 @@ static int spi_geni_prepare_message(struct spi_master *spi,
struct spi_geni_master *mas = spi_master_get_devdata(spi);
struct geni_se *se = &mas->se;
- geni_se_select_mode(se, GENI_SE_FIFO);
- ret = setup_fifo_params(spi_msg->spi, spi);
- if (ret)
- dev_err(mas->dev, "Couldn't select mode %d\n", ret);
+ mas->cur_xfer_mode = select_xfer_mode(spi, spi_msg);
+
+ if (mas->cur_xfer_mode == GENI_SE_FIFO) {
+ geni_se_select_mode(se, GENI_SE_FIFO);
+ reinit_completion(&mas->xfer_done);
+ ret = setup_fifo_params(spi_msg->spi, spi);
+ if (ret)
+ dev_err(mas->dev, "Couldn't select mode %d\n", ret);
+ } else if (mas->cur_xfer_mode == GENI_GPI_DMA) {
+ mas->num_tx_eot = 0;
+ mas->num_rx_eot = 0;
+ mas->num_xfers = 0;
+ reinit_completion(&mas->tx_cb);
+ reinit_completion(&mas->rx_cb);
+ memset(mas->gsi, 0,
+ (sizeof(struct spi_geni_gsi) * NUM_SPI_XFER));
+ geni_se_select_mode(se, GENI_GPI_DMA);
+ ret = spi_geni_map_buf(mas, spi_msg);
+ } else {
+ dev_err(mas->dev, "%s: Couldn't select mode %d", __func__,
+ mas->cur_xfer_mode);
+ ret = -EINVAL;
+ }
+
return ret;
}
+static int spi_geni_unprepare_message(struct spi_master *spi_mas,
+ struct spi_message *spi_msg)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
+
+ mas->cur_speed_hz = 0;
+ mas->cur_bits_per_word = 0;
+ if (mas->cur_xfer_mode == GENI_GPI_DMA)
+ spi_geni_unmap_buf(mas, spi_msg);
+ return 0;
+}
+
static int spi_geni_init(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
+ struct spi_master *spi = dev_get_drvdata(mas->dev);
unsigned int proto, major, minor, ver;
+ int ret = 0;
pm_runtime_get_sync(mas->dev);
proto = geni_se_read_proto(se);
if (proto != GENI_SE_SPI) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
- pm_runtime_put(mas->dev);
- return -ENXIO;
+ ret = -ENXIO;
+ goto out_pm;
}
mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
@@ -295,8 +751,59 @@ static int spi_geni_init(struct spi_geni_master *mas)
else
mas->oversampling = 1;
+ mas->tx = dma_request_slave_channel(mas->dev, "tx");
+ if (IS_ERR_OR_NULL(mas->tx)) {
+ dev_info(mas->dev, "Failed to get tx DMA ch %ld",
+ PTR_ERR(mas->tx));
+ ret = PTR_ERR(mas->tx);
+ goto out_pm;
+ } else {
+ mas->rx = dma_request_slave_channel(mas->dev, "rx");
+ if (IS_ERR_OR_NULL(mas->rx)) {
+ dev_info(mas->dev, "Failed to get rx DMA ch %ld",
+ PTR_ERR(mas->rx));
+ dma_release_channel(mas->tx);
+ ret = PTR_ERR(mas->rx);
+ goto out_pm;
+ }
+ mas->gsi = devm_kzalloc(mas->dev,
+ (sizeof(struct spi_geni_gsi) * NUM_SPI_XFER),
+ GFP_KERNEL);
+ if (IS_ERR_OR_NULL(mas->gsi)) {
+ dev_err(mas->dev, "Failed to get GSI mem\n");
+ dma_release_channel(mas->tx);
+ dma_release_channel(mas->rx);
+ mas->tx = NULL;
+ mas->rx = NULL;
+ goto out_pm;
+ }
+ mas->tx_event.init.callback = spi_gsi_ch_cb;
+ mas->tx_event.init.cb_param = spi;
+ mas->tx_event.cmd = MSM_GPI_INIT;
+ mas->tx->private = &mas->tx_event;
+ mas->rx_event.init.callback = spi_gsi_ch_cb;
+ mas->rx_event.init.cb_param = spi;
+ mas->rx_event.cmd = MSM_GPI_INIT;
+ mas->rx->private = &mas->rx_event;
+ if (dmaengine_slave_config(mas->tx, NULL)) {
+ dev_err(mas->dev, "Failed to Config Tx\n");
+ dma_release_channel(mas->tx);
+ dma_release_channel(mas->rx);
+ mas->tx = NULL;
+ mas->rx = NULL;
+ }
+ if (dmaengine_slave_config(mas->rx, NULL)) {
+ dev_err(mas->dev, "Failed to Config Rx\n");
+ dma_release_channel(mas->tx);
+ dma_release_channel(mas->rx);
+ mas->tx = NULL;
+ mas->rx = NULL;
+ }
+ }
+
+out_pm:
pm_runtime_put(mas->dev);
- return 0;
+ return ret;
}
static void setup_fifo_xfer(struct spi_transfer *xfer,
@@ -304,6 +811,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
u16 mode, struct spi_master *spi)
{
u32 m_cmd = 0;
+ u32 m_param = 0;
u32 spi_tx_cfg, len;
struct geni_se *se = &mas->se;
@@ -355,6 +863,12 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
len &= TRANS_LEN_MSK;
+ if (!xfer->cs_change) {
+ if (!list_is_last(&xfer->transfer_list,
+ &spi->cur_msg->transfers))
+ m_param |= FRAGMENTATION;
+ }
+
mas->cur_xfer = xfer;
if (m_cmd & SPI_TX_ONLY) {
mas->tx_rem_bytes = xfer->len;
@@ -367,7 +881,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
}
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
mas->cur_mcmd = CMD_XFER;
- geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
+ geni_se_setup_m_cmd(se, m_cmd, m_param);
/*
* TX_WATERMARK_REG should be set after SPI configuration and
@@ -383,13 +897,63 @@ static int spi_geni_transfer_one(struct spi_master *spi,
struct spi_transfer *xfer)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ unsigned long timeout;
+ int ret = 0;
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
- return 0;
+ return ret;
- setup_fifo_xfer(xfer, mas, slv->mode, spi);
- return 1;
+ if (mas->cur_xfer_mode == GENI_SE_FIFO) {
+ setup_fifo_xfer(xfer, mas, slv->mode, spi);
+ } else {
+ setup_gsi_xfer(xfer, mas, slv, spi);
+ if ((mas->num_xfers >= NUM_SPI_XFER) ||
+ (list_is_last(&xfer->transfer_list,
+ &spi->cur_msg->transfers))) {
+ int i;
+
+ for (i = 0 ; i < mas->num_tx_eot; i++) {
+ timeout =
+ wait_for_completion_timeout(
+ &mas->tx_cb,
+ msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
+ if (timeout <= 0) {
+ dev_err(mas->dev,
+ "Tx[%d] timeout%lu\n", i, timeout);
+ ret = -ETIMEDOUT;
+ goto err_gsi_geni_transfer_one;
+ }
+ /* TODO: Move this to tx callback */
+ spi_finalize_current_transfer(spi);
+ }
+ for (i = 0 ; i < mas->num_rx_eot; i++) {
+ timeout =
+ wait_for_completion_timeout(
+ &mas->rx_cb,
+ msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
+ if (timeout <= 0) {
+ dev_err(mas->dev,
+ "Rx[%d] timeout%lu\n", i, timeout);
+ ret = -ETIMEDOUT;
+ goto err_gsi_geni_transfer_one;
+ }
+ /* TODO: Move this to rx callback */
+ spi_finalize_current_transfer(spi);
+ }
+ if (mas->qn_err) {
+ ret = -EIO;
+ mas->qn_err = false;
+ goto err_gsi_geni_transfer_one;
+ }
+ }
+ }
+
+ return ret;
+
+err_gsi_geni_transfer_one:
+ dmaengine_terminate_all(mas->tx);
+ return ret;
}
static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
@@ -541,6 +1105,15 @@ static int spi_geni_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "could not set DMA mask\n");
+ return ret;
+ }
+ }
+
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -569,12 +1142,15 @@ static int spi_geni_probe(struct platform_device *pdev)
spi->num_chipselect = 4;
spi->max_speed_hz = 50000000;
spi->prepare_message = spi_geni_prepare_message;
+ spi->unprepare_message = spi_geni_unprepare_message;
spi->transfer_one = spi_geni_transfer_one;
spi->auto_runtime_pm = true;
spi->handle_err = handle_fifo_timeout;
spi->set_cs = spi_geni_set_cs;
init_completion(&mas->xfer_done);
+ init_completion(&mas->tx_cb);
+ init_completion(&mas->rx_cb);
spin_lock_init(&mas->lock);
pm_runtime_enable(dev);
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index bc936fefa109..7ee8ac2a1f0c 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -49,6 +49,7 @@ struct geni_se {
#define SE_GENI_STATUS 0x40
#define GENI_SER_M_CLK_CFG 0x48
#define GENI_SER_S_CLK_CFG 0x4c
+#define GENI_IF_DISABLE_RO 0x64
#define GENI_FW_REVISION_RO 0x68
#define SE_GENI_CLK_SEL 0x7c
#define SE_GENI_DMA_MODE_EN 0x258
@@ -91,6 +92,9 @@ struct geni_se {
#define CLK_DIV_MSK GENMASK(15, 4)
#define CLK_DIV_SHFT 4
+/* GENI_IF_DISABLE_RO fields */
+#define FIFO_IF_DISABLE (BIT(0))
+
/* GENI_FW_REVISION_RO fields */
#define FW_REV_PROTOCOL_MSK GENMASK(15, 8)
#define FW_REV_PROTOCOL_SHFT 8
@@ -230,6 +234,12 @@ struct geni_se {
#define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT)
#define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK)
+/* GSI TRE fields */
+/* Packing fields */
+#define GSI_TX_PACK_EN (BIT(0))
+#define GSI_RX_PACK_EN (BIT(1))
+#define GSI_PRESERVE_PACK (BIT(2))
+
#if IS_ENABLED(CONFIG_QCOM_GENI_SE)
u32 geni_se_get_qup_hw_version(struct geni_se *se);