From b714b84e2b74de68b12847bcaf2cf409a18fb741 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 25 Nov 2013 16:07:46 +0100 Subject: dma: pl330: Alloc dma_parms for the dma device In order to be able to set a maximum segment size for the device we need to allocate a dma_parameters struct for the device first. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index cdf0483b8f2d..7adaf3abffba 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -578,6 +578,9 @@ struct dma_pl330_dmac { /* DMA-Engine Device */ struct dma_device ddma; + /* Holds info about sg limitations */ + struct device_dma_parameters dma_parms; + /* Pool of descriptors available for the DMAC's channels */ struct list_head desc_pool; /* To protect desc_pool manipulation */ @@ -3023,6 +3026,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) "unable to register DMA to the generic DT DMA helpers\n"); } } + + adev->dev.dma_parms = &pdmac->dma_parms; + /* * This is the limit for transfers with a buswidth of 1, larger * buswidths will have larger limits. -- cgit v1.2.3 From cd72b8462a2ebbf9524e726c65c2770f0bf70d22 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 13 Nov 2013 22:55:24 +0800 Subject: dma: imx-sdma: Add sdma firmware version 2 support On i.MX5/6 series, SDMA is using new version firmware to support SSI dual FIFO feature and HDMI Audio (i.MX6Q/DL only). Thus add it. Signed-off-by: Nicolin Chen Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 15 ++++++++++++++- include/linux/platform_data/dma-imx-sdma.h | 5 +++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index c75679d42028..f769c7383536 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -323,6 +323,7 @@ struct sdma_engine { struct clk *clk_ipg; struct clk *clk_ahb; spinlock_t channel_0_lock; + u32 script_number; struct sdma_script_start_addrs *script_addrs; const struct sdma_driver_data *drvdata; }; @@ -1238,6 +1239,7 @@ static void sdma_issue_pending(struct dma_chan *chan) } #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 static void sdma_add_scripts(struct sdma_engine *sdma, const struct sdma_script_start_addrs *addr) @@ -1246,7 +1248,7 @@ static void sdma_add_scripts(struct sdma_engine *sdma, s32 *saddr_arr = (u32 *)sdma->script_addrs; int i; - for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) + for (i = 0; i < sdma->script_number; i++) if (addr_arr[i] > 0) saddr_arr[i] = addr_arr[i]; } @@ -1272,6 +1274,17 @@ static void sdma_load_firmware(const struct firmware *fw, void *context) goto err_firmware; if (header->ram_code_start + header->ram_code_size > fw->size) goto err_firmware; + switch (header->version_major) { + case 1: + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; + break; + case 2: + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; + break; + default: + dev_err(sdma->dev, "unknown firmware version\n"); + goto err_firmware; + } addr = (void *)header + header->script_addrs_start; ram_code = (void *)header + header->ram_code_start; diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h index 3a3942823c20..eabac4e2fc99 100644 --- a/include/linux/platform_data/dma-imx-sdma.h +++ b/include/linux/platform_data/dma-imx-sdma.h @@ -43,6 +43,11 @@ struct sdma_script_start_addrs { s32 dptc_dvfs_addr; s32 utra_addr; s32 ram_code_start_addr; + /* End of v1 array */ + s32 mcu_2_ssish_addr; + s32 ssish_2_mcu_addr; + s32 hdmi_dma_addr; + /* End of v2 array */ }; /** -- cgit v1.2.3 From 1a895578d4e50577bb6aa79bd194b502f09dd768 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 13 Nov 2013 22:55:25 +0800 Subject: dma: imx-sdma: Add new dma type for ssi dual fifo script This patch adds a new DMA_TYPE for SSI dual FIFO script, included in SDMA firmware version 2. This script would allow SSI use dual fifo mode to transimit/receive data without occasional hardware underrun/overrun. Signed-off-by: Nicolin Chen Acked-by: Kumar Gala Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt | 1 + drivers/dma/imx-sdma.c | 4 ++++ include/linux/platform_data/dma-imx.h | 1 + 3 files changed, 6 insertions(+) diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt index 4fa814d38321..68b83ecc3850 100644 --- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt +++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt @@ -42,6 +42,7 @@ The full ID of peripheral types can be found below. 19 IPU Memory 20 ASRC 21 ESAI + 22 SSI Dual FIFO (needs firmware ver >= 2) The third cell specifies the transfer priority as below. diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index f769c7383536..152247675feb 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -725,6 +725,10 @@ static void sdma_get_pc(struct sdma_channel *sdmac, per_2_emi = sdma->script_addrs->app_2_mcu_addr; emi_2_per = sdma->script_addrs->mcu_2_app_addr; break; + case IMX_DMATYPE_SSI_DUAL: + per_2_emi = sdma->script_addrs->ssish_2_mcu_addr; + emi_2_per = sdma->script_addrs->mcu_2_ssish_addr; + break; case IMX_DMATYPE_SSI_SP: case IMX_DMATYPE_MMC: case IMX_DMATYPE_SDHC: diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index beac6b8b6a7b..bcbc6c3c14c0 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -39,6 +39,7 @@ enum sdma_peripheral_type { IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */ IMX_DMATYPE_ASRC, /* ASRC */ IMX_DMATYPE_ESAI, /* ESAI */ + IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ }; enum imx_dma_prio { -- cgit v1.2.3 From b1d27c79c8377df1880447375deffa3bb82c7bd3 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 13 Nov 2013 22:55:27 +0800 Subject: ARM: dts: imx: use dual-fifo sdma script for ssi Use dual-fifo sdma scripts instead of shared scripts for ssi on i.MX series. Signed-off-by: Nicolin Chen Acked-by: Shawn Guo Signed-off-by: Vinod Koul --- arch/arm/boot/dts/imx51.dtsi | 4 ++-- arch/arm/boot/dts/imx53.dtsi | 4 ++-- arch/arm/boot/dts/imx6qdl.dtsi | 12 ++++++------ arch/arm/boot/dts/imx6sl.dtsi | 12 ++++++------ 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi index 4bcdd3ad15e5..5be28377d41f 100644 --- a/arch/arm/boot/dts/imx51.dtsi +++ b/arch/arm/boot/dts/imx51.dtsi @@ -159,8 +159,8 @@ reg = <0x70014000 0x4000>; interrupts = <30>; clocks = <&clks 49>; - dmas = <&sdma 24 1 0>, - <&sdma 25 1 0>; + dmas = <&sdma 24 22 0>, + <&sdma 25 22 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */ diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 4307e80b2d2e..7208fde9bc16 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -153,8 +153,8 @@ reg = <0x50014000 0x4000>; interrupts = <30>; clocks = <&clks 49>; - dmas = <&sdma 24 1 0>, - <&sdma 25 1 0>; + dmas = <&sdma 24 22 0>, + <&sdma 25 22 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */ diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index fb28b2ecb1db..e9534f2f53e7 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -236,8 +236,8 @@ reg = <0x02028000 0x4000>; interrupts = <0 46 0x04>; clocks = <&clks 178>; - dmas = <&sdma 37 1 0>, - <&sdma 38 1 0>; + dmas = <&sdma 37 22 0>, + <&sdma 38 22 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; fsl,ssi-dma-events = <38 37>; @@ -249,8 +249,8 @@ reg = <0x0202c000 0x4000>; interrupts = <0 47 0x04>; clocks = <&clks 179>; - dmas = <&sdma 41 1 0>, - <&sdma 42 1 0>; + dmas = <&sdma 41 22 0>, + <&sdma 42 22 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; fsl,ssi-dma-events = <42 41>; @@ -262,8 +262,8 @@ reg = <0x02030000 0x4000>; interrupts = <0 48 0x04>; clocks = <&clks 180>; - dmas = <&sdma 45 1 0>, - <&sdma 46 1 0>; + dmas = <&sdma 45 22 0>, + <&sdma 46 22 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; fsl,ssi-dma-events = <46 45>; diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi index 28558f1aaf2d..7b57fecd0bd5 100644 --- a/arch/arm/boot/dts/imx6sl.dtsi +++ b/arch/arm/boot/dts/imx6sl.dtsi @@ -199,8 +199,8 @@ reg = <0x02028000 0x4000>; interrupts = <0 46 0x04>; clocks = <&clks IMX6SL_CLK_SSI1>; - dmas = <&sdma 37 1 0>, - <&sdma 38 1 0>; + dmas = <&sdma 37 22 0>, + <&sdma 38 22 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; status = "disabled"; @@ -211,8 +211,8 @@ reg = <0x0202c000 0x4000>; interrupts = <0 47 0x04>; clocks = <&clks IMX6SL_CLK_SSI2>; - dmas = <&sdma 41 1 0>, - <&sdma 42 1 0>; + dmas = <&sdma 41 22 0>, + <&sdma 42 22 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; status = "disabled"; @@ -223,8 +223,8 @@ reg = <0x02030000 0x4000>; interrupts = <0 48 0x04>; clocks = <&clks IMX6SL_CLK_SSI3>; - dmas = <&sdma 45 1 0>, - <&sdma 46 1 0>; + dmas = <&sdma 45 22 0>, + <&sdma 46 22 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; status = "disabled"; -- cgit v1.2.3 From 0da9e55e71bc239102d47ac422162c9915c99074 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 13 Nov 2013 22:55:26 +0800 Subject: ASoC: fsl_ssi: Add dual fifo mode support By enabling dual fifo mode, it would allow SSI enter a better performance to transimit/receive data without occasional hardware underrun/overrun. Signed-off-by: Nicolin Chen Acked-by: Timur Tabi Acked-by: Mark Brown Signed-off-by: Vinod Koul --- sound/soc/fsl/fsl_ssi.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 35e277379b86..f43be6d4c549 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -143,6 +143,7 @@ struct fsl_ssi_private { bool ssi_on_imx; bool imx_ac97; bool use_dma; + bool use_dual_fifo; struct clk *clk; struct snd_dmaengine_dai_dma_data dma_params_tx; struct snd_dmaengine_dai_dma_data dma_params_rx; @@ -413,6 +414,12 @@ static int fsl_ssi_setup(struct fsl_ssi_private *ssi_private) write_ssi(CCSR_SSI_SOR_WAIT(3), &ssi->sor); } + if (ssi_private->use_dual_fifo) { + write_ssi_mask(&ssi->srcr, 0, CCSR_SSI_SRCR_RFEN1); + write_ssi_mask(&ssi->stcr, 0, CCSR_SSI_STCR_TFEN1); + write_ssi_mask(&ssi->scr, 0, CCSR_SSI_SCR_TCH_EN); + } + return 0; } @@ -480,6 +487,15 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream, ssi_private->second_stream = substream; } + /* When using dual fifo mode, it is safer to ensure an even period + * size. If appearing to an odd number while DMA always starts its + * task from fifo0, fifo1 would be neglected at the end of each + * period. But SSI would still access fifo1 with an invalid data. + */ + if (ssi_private->use_dual_fifo) + snd_pcm_hw_constraint_step(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2); + return 0; } @@ -947,7 +963,7 @@ static int fsl_ssi_probe(struct platform_device *pdev) ssi_private->fifo_depth = 8; if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx21-ssi")) { - u32 dma_events[2]; + u32 dma_events[2], dmas[4]; ssi_private->ssi_on_imx = true; ssi_private->clk = devm_clk_get(&pdev->dev, NULL); @@ -1001,6 +1017,15 @@ static int fsl_ssi_probe(struct platform_device *pdev) dma_events[0], shared ? IMX_DMATYPE_SSI_SP : IMX_DMATYPE_SSI); imx_pcm_dma_params_init_data(&ssi_private->filter_data_rx, dma_events[1], shared ? IMX_DMATYPE_SSI_SP : IMX_DMATYPE_SSI); + if (!of_property_read_u32_array(pdev->dev.of_node, "dmas", dmas, 4) + && dmas[2] == IMX_DMATYPE_SSI_DUAL) { + ssi_private->use_dual_fifo = true; + /* When using dual fifo mode, we need to keep watermark + * as even numbers due to dma script limitation. + */ + ssi_private->dma_params_tx.maxburst &= ~0x1; + ssi_private->dma_params_rx.maxburst &= ~0x1; + } } else if (ssi_private->use_dma) { /* The 'name' should not have any slashes in it. */ ret = devm_request_irq(&pdev->dev, ssi_private->irq, -- cgit v1.2.3 From 2b7f65b11d87f9f3925dee5df020303b362c98ee Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 17 Nov 2013 12:12:56 -0800 Subject: mmp_pdma: Style neatening Neaten code used as a template for other drivers. Make the code more consistent with kernel styles. o Convert #defines with (1< Signed-off-by: Vinod Koul --- drivers/dma/mmp_pdma.c | 204 +++++++++++++++++++++++++------------------------ 1 file changed, 105 insertions(+), 99 deletions(-) diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 8869500ab92b..3f7712c4d3fa 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -5,6 +5,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ + #include #include #include @@ -32,38 +33,37 @@ #define DTADR 0x0208 #define DCMD 0x020c -#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ -#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ -#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ -#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ -#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ -#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ -#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ -#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ - -#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ -#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ -#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ -#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ -#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ -#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ -#define DCSR_EORINTR (1 << 9) /* The end of Receive */ - -#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \ - (((n) & 0x3f) << 2)) -#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ -#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ +#define DCSR_RUN BIT(31) /* Run Bit (read / write) */ +#define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */ +#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */ +#define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */ +#define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */ +#define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */ +#define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */ +#define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */ + +#define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */ +#define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */ +#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ +#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ +#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ +#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ +#define DCSR_EORINTR BIT(9) /* The end of Receive */ + +#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2)) +#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */ +#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ -#define DDADR_STOP (1 << 0) /* Stop (read / write) */ - -#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ -#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ -#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ -#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ -#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ -#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ -#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ +#define DDADR_STOP BIT(0) /* Stop (read / write) */ + +#define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */ +#define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */ +#define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */ +#define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */ +#define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */ +#define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */ +#define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */ #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ @@ -132,10 +132,14 @@ struct mmp_pdma_device { spinlock_t phy_lock; /* protect alloc/free phy channels */ }; -#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) -#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) -#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) -#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) +#define tx_to_mmp_pdma_desc(tx) \ + container_of(tx, struct mmp_pdma_desc_sw, async_tx) +#define to_mmp_pdma_desc(lh) \ + container_of(lh, struct mmp_pdma_desc_sw, node) +#define to_mmp_pdma_chan(dchan) \ + container_of(dchan, struct mmp_pdma_chan, chan) +#define to_mmp_pdma_dev(dmadev) \ + container_of(dmadev, struct mmp_pdma_device, device) static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) { @@ -162,19 +166,18 @@ static void enable_chan(struct mmp_pdma_phy *phy) writel(dalgn, phy->base + DALGN); reg = (phy->idx << 2) + DCSR; - writel(readl(phy->base + reg) | DCSR_RUN, - phy->base + reg); + writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg); } static void disable_chan(struct mmp_pdma_phy *phy) { u32 reg; - if (phy) { - reg = (phy->idx << 2) + DCSR; - writel(readl(phy->base + reg) & ~DCSR_RUN, - phy->base + reg); - } + if (!phy) + return; + + reg = (phy->idx << 2) + DCSR; + writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg); } static int clear_chan_irq(struct mmp_pdma_phy *phy) @@ -183,26 +186,27 @@ static int clear_chan_irq(struct mmp_pdma_phy *phy) u32 dint = readl(phy->base + DINT); u32 reg = (phy->idx << 2) + DCSR; - if (dint & BIT(phy->idx)) { - /* clear irq */ - dcsr = readl(phy->base + reg); - writel(dcsr, phy->base + reg); - if ((dcsr & DCSR_BUSERR) && (phy->vchan)) - dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); - return 0; - } - return -EAGAIN; + if (!(dint & BIT(phy->idx))) + return -EAGAIN; + + /* clear irq */ + dcsr = readl(phy->base + reg); + writel(dcsr, phy->base + reg); + if ((dcsr & DCSR_BUSERR) && (phy->vchan)) + dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); + + return 0; } static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) { struct mmp_pdma_phy *phy = dev_id; - if (clear_chan_irq(phy) == 0) { - tasklet_schedule(&phy->vchan->tasklet); - return IRQ_HANDLED; - } else + if (clear_chan_irq(phy) != 0) return IRQ_NONE; + + tasklet_schedule(&phy->vchan->tasklet); + return IRQ_HANDLED; } static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) @@ -224,8 +228,8 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) if (irq_num) return IRQ_HANDLED; - else - return IRQ_NONE; + + return IRQ_NONE; } /* lookup free phy channel as descending priority */ @@ -245,9 +249,9 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) */ spin_lock_irqsave(&pdev->phy_lock, flags); - for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { + for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) { for (i = 0; i < pdev->dma_channels; i++) { - if (prio != ((i & 0xf) >> 2)) + if (prio != (i & 0xf) >> 2) continue; phy = &pdev->phy[i]; if (!phy->vchan) { @@ -389,14 +393,16 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) if (chan->desc_pool) return 1; - chan->desc_pool = - dma_pool_create(dev_name(&dchan->dev->device), chan->dev, - sizeof(struct mmp_pdma_desc_sw), - __alignof__(struct mmp_pdma_desc_sw), 0); + chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device), + chan->dev, + sizeof(struct mmp_pdma_desc_sw), + __alignof__(struct mmp_pdma_desc_sw), + 0); if (!chan->desc_pool) { dev_err(chan->dev, "unable to allocate descriptor pool\n"); return -ENOMEM; } + mmp_pdma_free_phy(chan); chan->idle = true; chan->dev_addr = 0; @@ -404,7 +410,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) } static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, - struct list_head *list) + struct list_head *list) { struct mmp_pdma_desc_sw *desc, *_desc; @@ -434,8 +440,8 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) static struct dma_async_tx_descriptor * mmp_pdma_prep_memcpy(struct dma_chan *dchan, - dma_addr_t dma_dst, dma_addr_t dma_src, - size_t len, unsigned long flags) + dma_addr_t dma_dst, dma_addr_t dma_src, + size_t len, unsigned long flags) { struct mmp_pdma_chan *chan; struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; @@ -515,8 +521,8 @@ fail: static struct dma_async_tx_descriptor * mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_transfer_direction dir, - unsigned long flags, void *context) + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) { struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; @@ -591,10 +597,11 @@ fail: return NULL; } -static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic( - struct dma_chan *dchan, dma_addr_t buf_addr, size_t len, - size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) +static struct dma_async_tx_descriptor * +mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, + dma_addr_t buf_addr, size_t len, size_t period_len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) { struct mmp_pdma_chan *chan; struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; @@ -636,8 +643,8 @@ static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic( goto fail; } - new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN | - (DCMD_LENGTH & period_len); + new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | + (DCMD_LENGTH & period_len)); new->desc.dsadr = dma_src; new->desc.dtadr = dma_dst; @@ -677,12 +684,11 @@ fail: } static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, - unsigned long arg) + unsigned long arg) { struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); struct dma_slave_config *cfg = (void *)arg; unsigned long flags; - int ret = 0; u32 maxburst = 0, addr = 0; enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; @@ -739,11 +745,12 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, return -ENOSYS; } - return ret; + return 0; } static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, - dma_cookie_t cookie, struct dma_tx_state *txstate) + dma_cookie_t cookie, + struct dma_tx_state *txstate) { return dma_cookie_status(dchan, cookie, txstate); } @@ -845,15 +852,14 @@ static int mmp_pdma_remove(struct platform_device *op) return 0; } -static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, - int idx, int irq) +static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq) { struct mmp_pdma_phy *phy = &pdev->phy[idx]; struct mmp_pdma_chan *chan; int ret; - chan = devm_kzalloc(pdev->dev, - sizeof(struct mmp_pdma_chan), GFP_KERNEL); + chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan), + GFP_KERNEL); if (chan == NULL) return -ENOMEM; @@ -861,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, phy->base = pdev->base; if (irq) { - ret = devm_request_irq(pdev->dev, irq, - mmp_pdma_chan_handler, 0, "pdma", phy); + ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0, + "pdma", phy); if (ret) { dev_err(pdev->dev, "channel request irq fail!\n"); return ret; @@ -877,8 +883,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, INIT_LIST_HEAD(&chan->chain_running); /* register virt channel to dma engine */ - list_add_tail(&chan->chan.device_node, - &pdev->device.channels); + list_add_tail(&chan->chan.device_node, &pdev->device.channels); return 0; } @@ -913,13 +918,12 @@ retry: * the lookup and the reservation */ chan = dma_get_slave_channel(candidate); - if (chan) { - struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan); - c->drcmr = dma_spec->args[0]; - return chan; - } + if (!chan) + goto retry; - goto retry; + to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0]; + + return chan; } static int mmp_pdma_probe(struct platform_device *op) @@ -934,6 +938,7 @@ static int mmp_pdma_probe(struct platform_device *op) pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); if (!pdev) return -ENOMEM; + pdev->dev = &op->dev; spin_lock_init(&pdev->phy_lock); @@ -945,8 +950,8 @@ static int mmp_pdma_probe(struct platform_device *op) of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); if (of_id) - of_property_read_u32(pdev->dev->of_node, - "#dma-channels", &dma_channels); + of_property_read_u32(pdev->dev->of_node, "#dma-channels", + &dma_channels); else if (pdata && pdata->dma_channels) dma_channels = pdata->dma_channels; else @@ -958,8 +963,9 @@ static int mmp_pdma_probe(struct platform_device *op) irq_num++; } - pdev->phy = devm_kzalloc(pdev->dev, - dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); + pdev->phy = devm_kcalloc(pdev->dev, + dma_channels, sizeof(struct mmp_pdma_chan), + GFP_KERNEL); if (pdev->phy == NULL) return -ENOMEM; @@ -968,8 +974,8 @@ static int mmp_pdma_probe(struct platform_device *op) if (irq_num != dma_channels) { /* all chan share one irq, demux inside */ irq = platform_get_irq(op, 0); - ret = devm_request_irq(pdev->dev, irq, - mmp_pdma_int_handler, 0, "pdma", pdev); + ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0, + "pdma", pdev); if (ret) return ret; } @@ -1045,7 +1051,7 @@ bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param) if (chan->device->dev->driver != &mmp_pdma_driver.driver) return false; - c->drcmr = *(unsigned int *) param; + c->drcmr = *(unsigned int *)param; return true; } @@ -1053,6 +1059,6 @@ EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn); module_platform_driver(mmp_pdma_driver); -MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); +MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver"); MODULE_AUTHOR("Marvell International Ltd."); MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 9d0f1fa6e104ad80c21a1051bb875b4c33d437e0 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 28 Nov 2013 14:59:39 +0530 Subject: dmaengine: mmp_tdma: fix the 'pointer from integer' warnings the driver is using unsigned long type for storing the channel register base "reg_base", this leads to bunch of warns when we try to use this as pointer. So better use an iomem pointer type for this variable drivers/dma/mmp_tdma.c: In function 'mmp_tdma_chan_set_desc': drivers/dma/mmp_tdma.c:143: warning: passing argument 2 of '__raw_writel' makes pointer from integer without a cast drivers/dma/mmp_tdma.c:144: warning: passing argument 1 of '__raw_readl' makes pointer from integer without a cast drivers/dma/mmp_tdma.c:144: warning: passing argument 2 of '__raw_writel' makes pointer from integer without a cast drivers/dma/mmp_tdma.c: In function 'mmp_tdma_enable_chan': drivers/dma/mmp_tdma.c:151: warning: passing argument 2 of '__raw_writel' makes pointer from integer without a cast drivers/dma/mmp_tdma.c:153: warning: passing argument 1 of '__raw_readl' makes pointer from integer without a cast drivers/dma/mmp_tdma.c:153: warning: passing argument 2 of '__raw_writel' makes pointer from integer without a cast drivers/dma/mmp_tdma.c: In function 'mmp_tdma_disable_chan': drivers/dma/mmp_tdma.c:160: warning: passing argument 1 of '__raw_readl' makes pointer from integer without a cast drivers/dma/mmp_tdma.c:160: warning: passing argument 2 of '__raw_writel' makes pointer from integer without a cast drivers/dma/mmp_tdma.c:164: warning: passing argument 2 of '__raw_writel' makes pointer from integer without a cast drivers/dma/mmp_tdma.c: In function 'mmp_tdma_resume_chan': drivers/dma/mmp_tdma.c:171: warning: passing argument 1 of '__raw_readl' makes pointer from integer without a cast drivers/dma/mmp_tdma.c:171: warning: passing argument 2 of '__raw_writel' makes pointer from integer without a cast drivers/dma/mmp_tdma.c: In function 'mmp_tdma_pause_chan': drivers/dma/mmp_tdma.c:178: warning: passing argument 1 of '__raw_readl' makes pointer from integer without a cast drivers/dma/mmp_tdma.c:178: warning: passing argument 2 of '__raw_writel' makes pointer from integer without a cast drivers/dma/mmp_tdma.c: In function 'mmp_tdma_config_chan': drivers/dma/mmp_tdma.c:263: warning: passing argument 2 of '__raw_writel' makes pointer from integer without a cast drivers/dma/mmp_tdma.c: In function 'mmp_tdma_clear_chan_irq': drivers/dma/mmp_tdma.c:269: warning: passing argument 1 of '__raw_readl' makes pointer from integer without a cast drivers/dma/mmp_tdma.c:274: warning: passing argument 2 of '__raw_writel' makes pointer from integer without a cast Signed-off-by: Vinod Koul --- drivers/dma/mmp_tdma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 3ddacc14a736..61b562b2602d 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -121,7 +121,7 @@ struct mmp_tdma_chan { int idx; enum mmp_tdma_type type; int irq; - unsigned long reg_base; + void __iomem *reg_base; size_t buf_len; size_t period_len; @@ -526,7 +526,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, tdmac->chan.device = &tdev->device; tdmac->idx = idx; tdmac->type = type; - tdmac->reg_base = (unsigned long)tdev->base + idx * 4; + tdmac->reg_base = tdev->base + idx * 4; tdmac->status = DMA_COMPLETE; tdev->tdmac[tdmac->idx] = tdmac; tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); -- cgit v1.2.3 From a9ebbcd986de217cf650cb9f850a2fe3f72097f1 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 29 Nov 2013 10:52:52 +0530 Subject: dmaengine: mmp: fix uninitialized variable drivers/dma/mmp_tdma.c:236:8: warning: 'tdcr' may be used uninitialized in this function [-Wuninitialized] Reported-by: Dan Williams Signed-off-by: Vinod Koul --- drivers/dma/mmp_tdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 61b562b2602d..d4b730ce0369 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -182,7 +182,7 @@ static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac) static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) { - unsigned int tdcr; + unsigned int tdcr = 0; mmp_tdma_disable_chan(tdmac); -- cgit v1.2.3 From 397321f45d31159c84982978106f3165be44bc2b Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Mon, 16 Dec 2013 12:12:17 +0400 Subject: dmaengine: fix incorrect kernel-doc comment for struct dma_slave_config The 'direction' member of 'struct dma_slave_config' is of data type 'enum dma_transfer_direction', so update the kernel-doc comment for 'struct dma_slave_config' to refer to appropriate values. Signed-off-by: Alexander Popov Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 41cf0c399288..bd6b88222ced 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -305,9 +305,8 @@ enum dma_slave_buswidth { /** * struct dma_slave_config - dma slave channel runtime config * @direction: whether the data shall go in or out on this slave - * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are - * legal values, DMA_BIDIRECTIONAL is not acceptable since we - * need to differentiate source and target addresses. + * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are + * legal values. * @src_addr: this is the physical address where DMA slave data * should be read (RX), if the source is memory this argument is * ignored. -- cgit v1.2.3 From 0e772c672686460c5341fa2914c02c45c2b6a3f3 Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Fri, 13 Dec 2013 11:06:18 -0300 Subject: dma: edma: Set debug level to debugging messages The channel allocated/released messages are very spammy and not really interesting to users. Change them to "debug" level. Signed-off-by: Ezequiel Garcia Acked-by: Matt Porter Signed-off-by: Vinod Koul --- drivers/dma/edma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 2539ea0cbc63..cd8da451d199 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -699,8 +699,8 @@ static int edma_alloc_chan_resources(struct dma_chan *chan) echan->alloced = true; echan->slot[0] = echan->ch_num; - dev_info(dev, "allocated channel for %u:%u\n", - EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); + dev_dbg(dev, "allocated channel for %u:%u\n", + EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); return 0; @@ -736,7 +736,7 @@ static void edma_free_chan_resources(struct dma_chan *chan) echan->alloced = false; } - dev_info(dev, "freeing channel for %u\n", echan->ch_num); + dev_dbg(dev, "freeing channel for %u\n", echan->ch_num); } /* Send pending descriptor to hardware */ -- cgit v1.2.3 From 1080411c6bf9da8e815292ccdb22f64662a38bb2 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 14 Dec 2013 00:16:23 +0100 Subject: dma: pl08x: allow zero slave channels It might happen that a platform wants to use its DMA engine for memcpy only, and then we have zero slave channels to initialize, so allow the slave initialization to return zero. Signed-off-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index ec4ee5c1fe9d..f68f1c1d560b 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2167,7 +2167,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) /* Register slave channels */ ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, pl08x->pd->num_slave_channels, true); - if (ret <= 0) { + if (ret < 0) { dev_warn(&pl08x->adev->dev, "%s failed to enumerate slave channels - %d\n", __func__, ret); -- cgit v1.2.3 From 96286b57669073e81870e33a3e5ce476433d115f Mon Sep 17 00:00:00 2001 From: Florian Meier Date: Mon, 6 Jan 2014 20:18:24 +0100 Subject: dmaengine: Add support for BCM2835 Add support for DMA controller of BCM2835 as used in the Raspberry Pi. Currently it only supports cyclic DMA. Signed-off-by: Florian Meier Reviewed-by: Andy Shevchenko Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- .../devicetree/bindings/dma/bcm2835-dma.txt | 57 ++ drivers/dma/Kconfig | 6 + drivers/dma/Makefile | 1 + drivers/dma/bcm2835-dma.c | 706 +++++++++++++++++++++ 4 files changed, 770 insertions(+) create mode 100644 Documentation/devicetree/bindings/dma/bcm2835-dma.txt create mode 100644 drivers/dma/bcm2835-dma.c diff --git a/Documentation/devicetree/bindings/dma/bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/bcm2835-dma.txt new file mode 100644 index 000000000000..1396078d15ac --- /dev/null +++ b/Documentation/devicetree/bindings/dma/bcm2835-dma.txt @@ -0,0 +1,57 @@ +* BCM2835 DMA controller + +The BCM2835 DMA controller has 16 channels in total. +Only the lower 13 channels have an associated IRQ. +Some arbitrary channels are used by the firmware +(1,3,6,7 in the current firmware version). +The channels 0,2 and 3 have special functionality +and should not be used by the driver. + +Required properties: +- compatible: Should be "brcm,bcm2835-dma". +- reg: Should contain DMA registers location and length. +- interrupts: Should contain the DMA interrupts associated + to the DMA channels in ascending order. +- #dma-cells: Must be <1>, the cell in the dmas property of the + client device represents the DREQ number. +- brcm,dma-channel-mask: Bit mask representing the channels + not used by the firmware in ascending order, + i.e. first channel corresponds to LSB. + +Example: + +dma: dma@7e007000 { + compatible = "brcm,bcm2835-dma"; + reg = <0x7e007000 0xf00>; + interrupts = <1 16>, + <1 17>, + <1 18>, + <1 19>, + <1 20>, + <1 21>, + <1 22>, + <1 23>, + <1 24>, + <1 25>, + <1 26>, + <1 27>, + <1 28>; + + #dma-cells = <1>; + brcm,dma-channel-mask = <0x7f35>; +}; + +DMA clients connected to the BCM2835 DMA controller must use the format +described in the dma.txt file, using a two-cell specifier for each channel. + +Example: + +bcm2835_i2s: i2s@7e203000 { + compatible = "brcm,bcm2835-i2s"; + reg = < 0x7e203000 0x20>, + < 0x7e101098 0x02>; + + dmas = <&dma 2>, + <&dma 3>; + dma-names = "tx", "rx"; +}; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 446687cc2334..cff5f1e39cf7 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -301,6 +301,12 @@ config DMA_OMAP select DMA_ENGINE select DMA_VIRTUAL_CHANNELS +config DMA_BCM2835 + tristate "BCM2835 DMA engine support" + depends on (ARCH_BCM2835 || MACH_BCM2708) + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + config TI_CPPI41 tristate "AM33xx CPPI41 DMA support" depends on ARCH_OMAP diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 0ce2da97e429..0a6f08edf2c8 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o obj-$(CONFIG_DMA_OMAP) += omap-dma.o +obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o obj-$(CONFIG_TI_CPPI41) += cppi41.o diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c new file mode 100644 index 000000000000..6ae070825f3f --- /dev/null +++ b/drivers/dma/bcm2835-dma.c @@ -0,0 +1,706 @@ +/* + * BCM2835 DMA engine support + * + * This driver only supports cyclic DMA transfers + * as needed for the I2S module. + * + * Author: Florian Meier + * Copyright 2013 + * + * Based on + * OMAP DMAengine support by Russell King + * + * BCM2708 DMA Driver + * Copyright (C) 2010 Broadcom + * + * Raspberry Pi PCM I2S ALSA Driver + * Copyright (c) by Phil Poole 2013 + * + * MARVELL MMP Peripheral DMA Driver + * Copyright 2012 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virt-dma.h" + +struct bcm2835_dmadev { + struct dma_device ddev; + spinlock_t lock; + void __iomem *base; + struct device_dma_parameters dma_parms; +}; + +struct bcm2835_dma_cb { + uint32_t info; + uint32_t src; + uint32_t dst; + uint32_t length; + uint32_t stride; + uint32_t next; + uint32_t pad[2]; +}; + +struct bcm2835_chan { + struct virt_dma_chan vc; + struct list_head node; + + struct dma_slave_config cfg; + bool cyclic; + unsigned int dreq; + + int ch; + struct bcm2835_desc *desc; + + void __iomem *chan_base; + int irq_number; +}; + +struct bcm2835_desc { + struct virt_dma_desc vd; + enum dma_transfer_direction dir; + + unsigned int control_block_size; + struct bcm2835_dma_cb *control_block_base; + dma_addr_t control_block_base_phys; + + unsigned int frames; + size_t size; +}; + +#define BCM2835_DMA_CS 0x00 +#define BCM2835_DMA_ADDR 0x04 +#define BCM2835_DMA_SOURCE_AD 0x0c +#define BCM2835_DMA_DEST_AD 0x10 +#define BCM2835_DMA_NEXTCB 0x1C + +/* DMA CS Control and Status bits */ +#define BCM2835_DMA_ACTIVE BIT(0) +#define BCM2835_DMA_INT BIT(2) +#define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */ +#define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */ +#define BCM2835_DMA_ERR BIT(8) +#define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */ +#define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */ + +#define BCM2835_DMA_INT_EN BIT(0) +#define BCM2835_DMA_D_INC BIT(4) +#define BCM2835_DMA_D_DREQ BIT(6) +#define BCM2835_DMA_S_INC BIT(8) +#define BCM2835_DMA_S_DREQ BIT(10) + +#define BCM2835_DMA_PER_MAP(x) ((x) << 16) + +#define BCM2835_DMA_DATA_TYPE_S8 1 +#define BCM2835_DMA_DATA_TYPE_S16 2 +#define BCM2835_DMA_DATA_TYPE_S32 4 +#define BCM2835_DMA_DATA_TYPE_S128 16 + +#define BCM2835_DMA_BULK_MASK BIT(0) +#define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3)) + +/* Valid only for channels 0 - 14, 15 has its own base address */ +#define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ +#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) + +static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d) +{ + return container_of(d, struct bcm2835_dmadev, ddev); +} + +static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct bcm2835_chan, vc.chan); +} + +static inline struct bcm2835_desc *to_bcm2835_dma_desc( + struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct bcm2835_desc, vd.tx); +} + +static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) +{ + struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); + dma_free_coherent(desc->vd.tx.chan->device->dev, + desc->control_block_size, + desc->control_block_base, + desc->control_block_base_phys); + kfree(desc); +} + +static int bcm2835_dma_abort(void __iomem *chan_base) +{ + unsigned long cs; + long int timeout = 10000; + + cs = readl(chan_base + BCM2835_DMA_CS); + if (!(cs & BCM2835_DMA_ACTIVE)) + return 0; + + /* Write 0 to the active bit - Pause the DMA */ + writel(0, chan_base + BCM2835_DMA_CS); + + /* Wait for any current AXI transfer to complete */ + while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { + cpu_relax(); + cs = readl(chan_base + BCM2835_DMA_CS); + } + + /* We'll un-pause when we set of our next DMA */ + if (!timeout) + return -ETIMEDOUT; + + if (!(cs & BCM2835_DMA_ACTIVE)) + return 0; + + /* Terminate the control block chain */ + writel(0, chan_base + BCM2835_DMA_NEXTCB); + + /* Abort the whole DMA */ + writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, + chan_base + BCM2835_DMA_CS); + + return 0; +} + +static void bcm2835_dma_start_desc(struct bcm2835_chan *c) +{ + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + struct bcm2835_desc *d; + + if (!vd) { + c->desc = NULL; + return; + } + + list_del(&vd->node); + + c->desc = d = to_bcm2835_dma_desc(&vd->tx); + + writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); + writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); +} + +static irqreturn_t bcm2835_dma_callback(int irq, void *data) +{ + struct bcm2835_chan *c = data; + struct bcm2835_desc *d; + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + + /* Acknowledge interrupt */ + writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); + + d = c->desc; + + if (d) { + /* TODO Only works for cyclic DMA */ + vchan_cyclic_callback(&d->vd); + } + + /* Keep the DMA engine running */ + writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); + + spin_unlock_irqrestore(&c->vc.lock, flags); + + return IRQ_HANDLED; +} + +static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + + dev_dbg(c->vc.chan.device->dev, + "Allocating DMA channel %d\n", c->ch); + + return request_irq(c->irq_number, + bcm2835_dma_callback, 0, "DMA IRQ", c); +} + +static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + + vchan_free_chan_resources(&c->vc); + free_irq(c->irq_number, c); + + dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); +} + +static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d) +{ + return d->size; +} + +static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr) +{ + unsigned int i; + size_t size; + + for (size = i = 0; i < d->frames; i++) { + struct bcm2835_dma_cb *control_block = + &d->control_block_base[i]; + size_t this_size = control_block->length; + dma_addr_t dma; + + if (d->dir == DMA_DEV_TO_MEM) + dma = control_block->dst; + else + dma = control_block->src; + + if (size) + size += this_size; + else if (addr >= dma && addr < dma + this_size) + size += dma + this_size - addr; + } + + return size; +} + +static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE || !txstate) + return ret; + + spin_lock_irqsave(&c->vc.lock, flags); + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + txstate->residue = + bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx)); + } else if (c->desc && c->desc->vd.tx.cookie == cookie) { + struct bcm2835_desc *d = c->desc; + dma_addr_t pos; + + if (d->dir == DMA_MEM_TO_DEV) + pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD); + else if (d->dir == DMA_DEV_TO_MEM) + pos = readl(c->chan_base + BCM2835_DMA_DEST_AD); + else + pos = 0; + + txstate->residue = bcm2835_dma_desc_size_pos(d, pos); + } else { + txstate->residue = 0; + } + + spin_unlock_irqrestore(&c->vc.lock, flags); + + return ret; +} + +static void bcm2835_dma_issue_pending(struct dma_chan *chan) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + unsigned long flags; + + c->cyclic = true; /* Nothing else is implemented */ + + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc) && !c->desc) + bcm2835_dma_start_desc(c); + + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct bcm2835_desc *d; + dma_addr_t dev_addr; + unsigned int es, sync_type; + unsigned int frame; + + /* Grab configuration */ + if (!is_slave_direction(direction)) { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (direction == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + sync_type = BCM2835_DMA_S_DREQ; + } else { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + sync_type = BCM2835_DMA_D_DREQ; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = BCM2835_DMA_DATA_TYPE_S32; + break; + default: + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d), GFP_NOWAIT); + if (!d) + return NULL; + + d->dir = direction; + d->frames = buf_len / period_len; + + /* Allocate memory for control blocks */ + d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); + d->control_block_base = dma_zalloc_coherent(chan->device->dev, + d->control_block_size, &d->control_block_base_phys, + GFP_NOWAIT); + + if (!d->control_block_base) { + kfree(d); + return NULL; + } + + /* + * Iterate over all frames, create a control block + * for each frame and link them together. + */ + for (frame = 0; frame < d->frames; frame++) { + struct bcm2835_dma_cb *control_block = + &d->control_block_base[frame]; + + /* Setup adresses */ + if (d->dir == DMA_DEV_TO_MEM) { + control_block->info = BCM2835_DMA_D_INC; + control_block->src = dev_addr; + control_block->dst = buf_addr + frame * period_len; + } else { + control_block->info = BCM2835_DMA_S_INC; + control_block->src = buf_addr + frame * period_len; + control_block->dst = dev_addr; + } + + /* Enable interrupt */ + control_block->info |= BCM2835_DMA_INT_EN; + + /* Setup synchronization */ + if (sync_type != 0) + control_block->info |= sync_type; + + /* Setup DREQ channel */ + if (c->dreq != 0) + control_block->info |= + BCM2835_DMA_PER_MAP(c->dreq); + + /* Length of a frame */ + control_block->length = period_len; + d->size += control_block->length; + + /* + * Next block is the next frame. + * This DMA engine driver currently only supports cyclic DMA. + * Therefore, wrap around at number of frames. + */ + control_block->next = d->control_block_base_phys + + sizeof(struct bcm2835_dma_cb) + * ((frame + 1) % d->frames); + } + + return vchan_tx_prep(&c->vc, &d->vd, flags); +} + +static int bcm2835_dma_slave_config(struct bcm2835_chan *c, + struct dma_slave_config *cfg) +{ + if ((cfg->direction == DMA_DEV_TO_MEM && + cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || + (cfg->direction == DMA_MEM_TO_DEV && + cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || + !is_slave_direction(cfg->direction)) { + return -EINVAL; + } + + c->cfg = *cfg; + + return 0; +} + +static int bcm2835_dma_terminate_all(struct bcm2835_chan *c) +{ + struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); + unsigned long flags; + int timeout = 10000; + LIST_HEAD(head); + + spin_lock_irqsave(&c->vc.lock, flags); + + /* Prevent this channel being scheduled */ + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); + + /* + * Stop DMA activity: we assume the callback will not be called + * after bcm_dma_abort() returns (even if it does, it will see + * c->desc is NULL and exit.) + */ + if (c->desc) { + c->desc = NULL; + bcm2835_dma_abort(c->chan_base); + + /* Wait for stopping */ + while (--timeout) { + if (!(readl(c->chan_base + BCM2835_DMA_CS) & + BCM2835_DMA_ACTIVE)) + break; + + cpu_relax(); + } + + if (!timeout) + dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); + } + + vchan_get_all_descriptors(&c->vc, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + + return 0; +} + +static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + + switch (cmd) { + case DMA_SLAVE_CONFIG: + return bcm2835_dma_slave_config(c, + (struct dma_slave_config *)arg); + + case DMA_TERMINATE_ALL: + return bcm2835_dma_terminate_all(c); + + default: + return -ENXIO; + } +} + +static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) +{ + struct bcm2835_chan *c; + + c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + + c->vc.desc_free = bcm2835_dma_desc_free; + vchan_init(&c->vc, &d->ddev); + INIT_LIST_HEAD(&c->node); + + d->ddev.chancnt++; + + c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); + c->ch = chan_id; + c->irq_number = irq; + + return 0; +} + +static void bcm2835_dma_free(struct bcm2835_dmadev *od) +{ + struct bcm2835_chan *c, *next; + + list_for_each_entry_safe(c, next, &od->ddev.channels, + vc.chan.device_node) { + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); + } +} + +static const struct of_device_id bcm2835_dma_of_match[] = { + { .compatible = "brcm,bcm2835-dma", }, + {}, +}; +MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match); + +static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec, + struct of_dma *ofdma) +{ + struct bcm2835_dmadev *d = ofdma->of_dma_data; + struct dma_chan *chan; + + chan = dma_get_any_slave_channel(&d->ddev); + if (!chan) + return NULL; + + /* Set DREQ from param */ + to_bcm2835_dma_chan(chan)->dreq = spec->args[0]; + + return chan; +} + +static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan, + struct dma_slave_caps *caps) +{ + caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + caps->cmd_pause = false; + caps->cmd_terminate = true; + + return 0; +} + +static int bcm2835_dma_probe(struct platform_device *pdev) +{ + struct bcm2835_dmadev *od; + struct resource *res; + void __iomem *base; + int rc; + int i; + int irq; + uint32_t chans_available; + + if (!pdev->dev.dma_mask) + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); + if (!od) + return -ENOMEM; + + pdev->dev.dma_parms = &od->dma_parms; + dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + od->base = base; + + dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); + dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); + od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; + od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; + od->ddev.device_tx_status = bcm2835_dma_tx_status; + od->ddev.device_issue_pending = bcm2835_dma_issue_pending; + od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps; + od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; + od->ddev.device_control = bcm2835_dma_control; + od->ddev.dev = &pdev->dev; + INIT_LIST_HEAD(&od->ddev.channels); + spin_lock_init(&od->lock); + + platform_set_drvdata(pdev, od); + + /* Request DMA channel mask from device tree */ + if (of_property_read_u32(pdev->dev.of_node, + "brcm,dma-channel-mask", + &chans_available)) { + dev_err(&pdev->dev, "Failed to get channel mask\n"); + rc = -EINVAL; + goto err_no_dma; + } + + /* + * Do not use the FIQ and BULK channels, + * because they are used by the GPU. + */ + chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK); + + for (i = 0; i < pdev->num_resources; i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) + break; + + if (chans_available & (1 << i)) { + rc = bcm2835_dma_chan_init(od, i, irq); + if (rc) + goto err_no_dma; + } + } + + dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i); + + /* Device-tree DMA controller registration */ + rc = of_dma_controller_register(pdev->dev.of_node, + bcm2835_dma_xlate, od); + if (rc) { + dev_err(&pdev->dev, "Failed to register DMA controller\n"); + goto err_no_dma; + } + + rc = dma_async_device_register(&od->ddev); + if (rc) { + dev_err(&pdev->dev, + "Failed to register slave DMA engine device: %d\n", rc); + goto err_no_dma; + } + + dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n"); + + return 0; + +err_no_dma: + bcm2835_dma_free(od); + return rc; +} + +static int bcm2835_dma_remove(struct platform_device *pdev) +{ + struct bcm2835_dmadev *od = platform_get_drvdata(pdev); + + dma_async_device_unregister(&od->ddev); + bcm2835_dma_free(od); + + return 0; +} + +static struct platform_driver bcm2835_dma_driver = { + .probe = bcm2835_dma_probe, + .remove = bcm2835_dma_remove, + .driver = { + .name = "bcm2835-dma", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(bcm2835_dma_of_match), + }, +}; + +module_platform_driver(bcm2835_dma_driver); + +MODULE_ALIAS("platform:bcm2835-dma"); +MODULE_DESCRIPTION("BCM2835 DMA engine driver"); +MODULE_AUTHOR("Florian Meier "); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 3b0f4a54f247b2b5f2523fab0e6243f76ac80d9f Mon Sep 17 00:00:00 2001 From: Nenghua Cao Date: Fri, 13 Dec 2013 16:14:31 +0800 Subject: dma:mmp_tdma: get sram pool through device tree Support to get sram pool from generic device tree binding. The existing way of get sram poll, directly call sram_get_gpool(), still work here. Signed-off-by: Nenghua Cao Acked-by: Zhangfei Gao Signed-off-by: Vinod Koul --- drivers/dma/mmp_tdma.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index d4b730ce0369..33f96aaa80c7 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -126,6 +126,8 @@ struct mmp_tdma_chan { size_t buf_len; size_t period_len; size_t pos; + + struct gen_pool *pool; }; #define TDMA_CHANNEL_NUM 2 @@ -324,7 +326,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) struct gen_pool *gpool; int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); - gpool = sram_get_gpool("asram"); + gpool = tdmac->pool; if (tdmac->desc_arr) gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, size); @@ -374,7 +376,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) struct gen_pool *gpool; int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); - gpool = sram_get_gpool("asram"); + gpool = tdmac->pool; if (!gpool) return NULL; @@ -505,7 +507,8 @@ static int mmp_tdma_remove(struct platform_device *pdev) } static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, - int idx, int irq, int type) + int idx, int irq, + int type, struct gen_pool *pool) { struct mmp_tdma_chan *tdmac; @@ -527,6 +530,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, tdmac->idx = idx; tdmac->type = type; tdmac->reg_base = tdev->base + idx * 4; + tdmac->pool = pool; tdmac->status = DMA_COMPLETE; tdev->tdmac[tdmac->idx] = tdmac; tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); @@ -553,6 +557,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) int i, ret; int irq = 0, irq_num = 0; int chan_num = TDMA_CHANNEL_NUM; + struct gen_pool *pool; of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); if (of_id) @@ -579,6 +584,15 @@ static int mmp_tdma_probe(struct platform_device *pdev) INIT_LIST_HEAD(&tdev->device.channels); + if (pdev->dev.of_node) + pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0); + else + pool = sram_get_gpool("asram"); + if (!pool) { + dev_err(&pdev->dev, "asram pool not available\n"); + return -ENOMEM; + } + if (irq_num != chan_num) { irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, irq, @@ -590,7 +604,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) /* initialize channel parameters */ for (i = 0; i < chan_num; i++) { irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i); - ret = mmp_tdma_chan_init(tdev, i, irq, type); + ret = mmp_tdma_chan_init(tdev, i, irq, type, pool); if (ret) return ret; } -- cgit v1.2.3 From 61310a3659befaad3edfababcf7b4352c1df2b3e Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Mon, 30 Dec 2013 10:46:31 +0530 Subject: dma: Cleanup dma-mmp_tdma.h header Commit 293b2da1b611 ("ARM: pxa: move platform_data definitions") moved the file to the current location but forgot to remove the pointer to its previous location. Clean it up. While at it also change the header file protection macros appropriately. Signed-off-by: Sachin Kamat Signed-off-by: Vinod Koul --- include/linux/platform_data/dma-mmp_tdma.h | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h index 239e0fc1bb1f..66574ea39f97 100644 --- a/include/linux/platform_data/dma-mmp_tdma.h +++ b/include/linux/platform_data/dma-mmp_tdma.h @@ -1,6 +1,4 @@ /* - * linux/arch/arm/mach-mmp/include/mach/sram.h - * * SRAM Memory Management * * Copyright (c) 2011 Marvell Semiconductors Inc. @@ -11,8 +9,8 @@ * */ -#ifndef __ASM_ARCH_SRAM_H -#define __ASM_ARCH_SRAM_H +#ifndef __DMA_MMP_TDMA_H +#define __DMA_MMP_TDMA_H #include @@ -32,4 +30,4 @@ struct sram_platdata { extern struct gen_pool *sram_get_gpool(char *pool_name); -#endif /* __ASM_ARCH_SRAM_H */ +#endif /* __DMA_MMP_TDMA_H */ -- cgit v1.2.3 From 6fb9063ca2237956e80135700275cdc471ce7233 Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Mon, 30 Dec 2013 10:46:32 +0530 Subject: dma: mv_xor: Cleanup in dma-mv_xor.h header Commit c02cecb92ed4 ("ARM: orion: move platform_data definitions") moved the file to the current location but forgot to remove the pointer to its previous location. Clean it up. While at it also change the header file protection macros appropriately. Signed-off-by: Sachin Kamat Cc: Thomas Petazzoni Signed-off-by: Vinod Koul --- include/linux/platform_data/dma-mv_xor.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/include/linux/platform_data/dma-mv_xor.h b/include/linux/platform_data/dma-mv_xor.h index 8ec18f64e396..92ffd3245f76 100644 --- a/include/linux/platform_data/dma-mv_xor.h +++ b/include/linux/platform_data/dma-mv_xor.h @@ -1,11 +1,9 @@ /* - * arch/arm/plat-orion/include/plat/mv_xor.h - * * Marvell XOR platform device data definition file. */ -#ifndef __PLAT_MV_XOR_H -#define __PLAT_MV_XOR_H +#ifndef __DMA_MV_XOR_H +#define __DMA_MV_XOR_H #include #include -- cgit v1.2.3 From e5b2886145bb0c3d7949952bd618529f8bea4cf7 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 8 Jan 2014 16:45:55 +0800 Subject: Revert "ARM: dts: imx: use dual-fifo sdma script for ssi" This reverts commit b1d27c79c8377df1880447375deffa3bb82c7bd3. Previously we switched the SSI scriprt to dual-fifo mode to reduce playback underrun issue, which is only included by SDMA firmware version 2. However, there are quite a lot people still using version 1 or default firmware in the ROM code of SoC while these two kinds of firmwares do not support the dual-fifo script and the audio function on their platform would be broken. Thus this patch provisionally reverts the dual-fifo script to the original single fifo script to meet all kinds of users' requirements, including the version 1/2 or inner ROM firmware. Reported-by: Fabio Estevam Signed-off-by: Nicolin Chen Tested-by: Fabio Estevam Acked-by: Shawn Guo Signed-off-by: Vinod Koul --- arch/arm/boot/dts/imx51.dtsi | 4 ++-- arch/arm/boot/dts/imx53.dtsi | 4 ++-- arch/arm/boot/dts/imx6qdl.dtsi | 12 ++++++------ arch/arm/boot/dts/imx6sl.dtsi | 12 ++++++------ 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi index 5be28377d41f..4bcdd3ad15e5 100644 --- a/arch/arm/boot/dts/imx51.dtsi +++ b/arch/arm/boot/dts/imx51.dtsi @@ -159,8 +159,8 @@ reg = <0x70014000 0x4000>; interrupts = <30>; clocks = <&clks 49>; - dmas = <&sdma 24 22 0>, - <&sdma 25 22 0>; + dmas = <&sdma 24 1 0>, + <&sdma 25 1 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */ diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 7208fde9bc16..4307e80b2d2e 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -153,8 +153,8 @@ reg = <0x50014000 0x4000>; interrupts = <30>; clocks = <&clks 49>; - dmas = <&sdma 24 22 0>, - <&sdma 25 22 0>; + dmas = <&sdma 24 1 0>, + <&sdma 25 1 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */ diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index e9534f2f53e7..fb28b2ecb1db 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -236,8 +236,8 @@ reg = <0x02028000 0x4000>; interrupts = <0 46 0x04>; clocks = <&clks 178>; - dmas = <&sdma 37 22 0>, - <&sdma 38 22 0>; + dmas = <&sdma 37 1 0>, + <&sdma 38 1 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; fsl,ssi-dma-events = <38 37>; @@ -249,8 +249,8 @@ reg = <0x0202c000 0x4000>; interrupts = <0 47 0x04>; clocks = <&clks 179>; - dmas = <&sdma 41 22 0>, - <&sdma 42 22 0>; + dmas = <&sdma 41 1 0>, + <&sdma 42 1 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; fsl,ssi-dma-events = <42 41>; @@ -262,8 +262,8 @@ reg = <0x02030000 0x4000>; interrupts = <0 48 0x04>; clocks = <&clks 180>; - dmas = <&sdma 45 22 0>, - <&sdma 46 22 0>; + dmas = <&sdma 45 1 0>, + <&sdma 46 1 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; fsl,ssi-dma-events = <46 45>; diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi index 7b57fecd0bd5..28558f1aaf2d 100644 --- a/arch/arm/boot/dts/imx6sl.dtsi +++ b/arch/arm/boot/dts/imx6sl.dtsi @@ -199,8 +199,8 @@ reg = <0x02028000 0x4000>; interrupts = <0 46 0x04>; clocks = <&clks IMX6SL_CLK_SSI1>; - dmas = <&sdma 37 22 0>, - <&sdma 38 22 0>; + dmas = <&sdma 37 1 0>, + <&sdma 38 1 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; status = "disabled"; @@ -211,8 +211,8 @@ reg = <0x0202c000 0x4000>; interrupts = <0 47 0x04>; clocks = <&clks IMX6SL_CLK_SSI2>; - dmas = <&sdma 41 22 0>, - <&sdma 42 22 0>; + dmas = <&sdma 41 1 0>, + <&sdma 42 1 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; status = "disabled"; @@ -223,8 +223,8 @@ reg = <0x02030000 0x4000>; interrupts = <0 48 0x04>; clocks = <&clks IMX6SL_CLK_SSI3>; - dmas = <&sdma 45 22 0>, - <&sdma 46 22 0>; + dmas = <&sdma 45 1 0>, + <&sdma 46 1 0>; dma-names = "rx", "tx"; fsl,fifo-depth = <15>; status = "disabled"; -- cgit v1.2.3 From 70dabaede806e12881a527ef9460b293ec15af59 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 8 Jan 2014 16:45:56 +0800 Subject: dma: imx-sdma: Assign a default script number for ROM firmware cases i.MX series have inner firmware in its ROM code: when SDMA isn't provided any firmware from Kernel or rootfs, the default inner ROM firmware will be activated. However the current driver doesn't assign any script number to this situation, and those platform running in this case would be broken. Thus this patch adds a default script number when no external firmware being loaded so that people would continue to be able to use basic scripts to run their platform without any firmware. Reported-by: Fabio Estevam Signed-off-by: Nicolin Chen Tested-by: Fabio Estevam Acked-by: Shawn Guo Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 152247675feb..4e7918339b12 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1252,6 +1252,10 @@ static void sdma_add_scripts(struct sdma_engine *sdma, s32 *saddr_arr = (u32 *)sdma->script_addrs; int i; + /* use the default firmware in ROM if missing external firmware */ + if (!sdma->script_number) + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; + for (i = 0; i < sdma->script_number; i++) if (addr_arr[i] > 0) saddr_arr[i] = addr_arr[i]; -- cgit v1.2.3 From 7f5ae3553685d54413dda4fc3c98f46056ea716b Mon Sep 17 00:00:00 2001 From: Florian Meier Date: Fri, 17 Jan 2014 18:06:29 +0100 Subject: dmaengine: Add DMA_PRIVATE to BCM2835 driver Without DMA_PRIVATE the driver is not able to allocate more than one channel. Since it uses dma_get_any_slave_channel that calls private_candidate, the second allocation fails at /* some channels are already publicly allocated */ Maybe it should be fixed in the core, but at least this fixes the bug. Signed-off-by: Florian Meier Signed-off-by: Vinod Koul --- drivers/dma/bcm2835-dma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 6ae070825f3f..a03602164e3e 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -611,6 +611,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev) od->base = base; dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); + dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; -- cgit v1.2.3 From 5f9e685a0d463666af080250b2ece11bc81acacd Mon Sep 17 00:00:00 2001 From: Jonas Jensen Date: Fri, 17 Jan 2014 09:46:05 +0100 Subject: dmaengine: Add MOXA ART DMA engine driver The MOXA ART SoC has a DMA controller capable of offloading expensive memory operations, such as large copies. This patch adds support for the controller including four channels. Two of these are used to handle MMC copy on the UC-7112-LX hardware. The remaining two can be used in a future audio driver or client application. Signed-off-by: Jonas Jensen Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- .../devicetree/bindings/dma/moxa,moxart-dma.txt | 45 ++ drivers/dma/Kconfig | 8 + drivers/dma/Makefile | 1 + drivers/dma/moxart-dma.c | 699 +++++++++++++++++++++ 4 files changed, 753 insertions(+) create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt create mode 100644 drivers/dma/moxart-dma.c diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt new file mode 100644 index 000000000000..8a9f3559335b --- /dev/null +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt @@ -0,0 +1,45 @@ +MOXA ART DMA Controller + +See dma.txt first + +Required properties: + +- compatible : Must be "moxa,moxart-dma" +- reg : Should contain registers location and length +- interrupts : Should contain an interrupt-specifier for the sole + interrupt generated by the device +- #dma-cells : Should be 1, a single cell holding a line request number + +Example: + + dma: dma@90500000 { + compatible = "moxa,moxart-dma"; + reg = <0x90500080 0x40>; + interrupts = <24 0>; + #dma-cells = <1>; + }; + + +Clients: + +DMA clients connected to the MOXA ART DMA controller must use the format +described in the dma.txt file, using a two-cell specifier for each channel: +a phandle plus one integer cells. +The two cells in order are: + +1. A phandle pointing to the DMA controller. +2. Peripheral identifier for the hardware handshaking interface. + +Example: +Use specific request line passing from dma +For example, MMC request line is 5 + + sdhci: sdhci@98e00000 { + compatible = "moxa,moxart-sdhci"; + reg = <0x98e00000 0x5C>; + interrupts = <5 0>; + clocks = <&clk_apb>; + dmas = <&dma 5>, + <&dma 5>; + dma-names = "tx", "rx"; + }; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index cff5f1e39cf7..0362f4ce61cc 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -337,6 +337,14 @@ config K3_DMA Support the DMA engine for Hisilicon K3 platform devices. +config MOXART_DMA + tristate "MOXART DMA support" + depends on ARCH_MOXART + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Enable support for the MOXA ART SoC DMA controller. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 0a6f08edf2c8..a029d0f4a1be 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -43,3 +43,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o obj-$(CONFIG_TI_CPPI41) += cppi41.o obj-$(CONFIG_K3_DMA) += k3dma.o +obj-$(CONFIG_MOXART_DMA) += moxart-dma.o diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c new file mode 100644 index 000000000000..3258e484e4f6 --- /dev/null +++ b/drivers/dma/moxart-dma.c @@ -0,0 +1,699 @@ +/* + * MOXA ART SoCs DMA Engine support. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "dmaengine.h" +#include "virt-dma.h" + +#define APB_DMA_MAX_CHANNEL 4 + +#define REG_OFF_ADDRESS_SOURCE 0 +#define REG_OFF_ADDRESS_DEST 4 +#define REG_OFF_CYCLES 8 +#define REG_OFF_CTRL 12 +#define REG_OFF_CHAN_SIZE 16 + +#define APB_DMA_ENABLE BIT(0) +#define APB_DMA_FIN_INT_STS BIT(1) +#define APB_DMA_FIN_INT_EN BIT(2) +#define APB_DMA_BURST_MODE BIT(3) +#define APB_DMA_ERR_INT_STS BIT(4) +#define APB_DMA_ERR_INT_EN BIT(5) + +/* + * Unset: APB + * Set: AHB + */ +#define APB_DMA_SOURCE_SELECT 0x40 +#define APB_DMA_DEST_SELECT 0x80 + +#define APB_DMA_SOURCE 0x100 +#define APB_DMA_DEST 0x1000 + +#define APB_DMA_SOURCE_MASK 0x700 +#define APB_DMA_DEST_MASK 0x7000 + +/* + * 000: No increment + * 001: +1 (Burst=0), +4 (Burst=1) + * 010: +2 (Burst=0), +8 (Burst=1) + * 011: +4 (Burst=0), +16 (Burst=1) + * 101: -1 (Burst=0), -4 (Burst=1) + * 110: -2 (Burst=0), -8 (Burst=1) + * 111: -4 (Burst=0), -16 (Burst=1) + */ +#define APB_DMA_SOURCE_INC_0 0 +#define APB_DMA_SOURCE_INC_1_4 0x100 +#define APB_DMA_SOURCE_INC_2_8 0x200 +#define APB_DMA_SOURCE_INC_4_16 0x300 +#define APB_DMA_SOURCE_DEC_1_4 0x500 +#define APB_DMA_SOURCE_DEC_2_8 0x600 +#define APB_DMA_SOURCE_DEC_4_16 0x700 +#define APB_DMA_DEST_INC_0 0 +#define APB_DMA_DEST_INC_1_4 0x1000 +#define APB_DMA_DEST_INC_2_8 0x2000 +#define APB_DMA_DEST_INC_4_16 0x3000 +#define APB_DMA_DEST_DEC_1_4 0x5000 +#define APB_DMA_DEST_DEC_2_8 0x6000 +#define APB_DMA_DEST_DEC_4_16 0x7000 + +/* + * Request signal select source/destination address for DMA hardware handshake. + * + * The request line number is a property of the DMA controller itself, + * e.g. MMC must always request channels where dma_slave_config->slave_id is 5. + * + * 0: No request / Grant signal + * 1-15: Request / Grant signal + */ +#define APB_DMA_SOURCE_REQ_NO 0x1000000 +#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000 +#define APB_DMA_DEST_REQ_NO 0x10000 +#define APB_DMA_DEST_REQ_NO_MASK 0xf0000 + +#define APB_DMA_DATA_WIDTH 0x100000 +#define APB_DMA_DATA_WIDTH_MASK 0x300000 +/* + * Data width of transfer: + * + * 00: Word + * 01: Half + * 10: Byte + */ +#define APB_DMA_DATA_WIDTH_4 0 +#define APB_DMA_DATA_WIDTH_2 0x100000 +#define APB_DMA_DATA_WIDTH_1 0x200000 + +#define APB_DMA_CYCLES_MASK 0x00ffffff + +#define MOXART_DMA_DATA_TYPE_S8 0x00 +#define MOXART_DMA_DATA_TYPE_S16 0x01 +#define MOXART_DMA_DATA_TYPE_S32 0x02 + +struct moxart_sg { + dma_addr_t addr; + uint32_t len; +}; + +struct moxart_desc { + enum dma_transfer_direction dma_dir; + dma_addr_t dev_addr; + unsigned int sglen; + unsigned int dma_cycles; + struct virt_dma_desc vd; + uint8_t es; + struct moxart_sg sg[0]; +}; + +struct moxart_chan { + struct virt_dma_chan vc; + + void __iomem *base; + struct moxart_desc *desc; + + struct dma_slave_config cfg; + + bool allocated; + bool error; + int ch_num; + unsigned int line_reqno; + unsigned int sgidx; +}; + +struct moxart_dmadev { + struct dma_device dma_slave; + struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL]; +}; + +struct moxart_filter_data { + struct moxart_dmadev *mdc; + struct of_phandle_args *dma_spec; +}; + +static const unsigned int es_bytes[] = { + [MOXART_DMA_DATA_TYPE_S8] = 1, + [MOXART_DMA_DATA_TYPE_S16] = 2, + [MOXART_DMA_DATA_TYPE_S32] = 4, +}; + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} + +static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct moxart_chan, vc.chan); +} + +static inline struct moxart_desc *to_moxart_dma_desc( + struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct moxart_desc, vd.tx); +} + +static void moxart_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(container_of(vd, struct moxart_desc, vd)); +} + +static int moxart_terminate_all(struct dma_chan *chan) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + unsigned long flags; + LIST_HEAD(head); + u32 ctrl; + + dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch); + + spin_lock_irqsave(&ch->vc.lock, flags); + + if (ch->desc) + ch->desc = NULL; + + ctrl = readl(ch->base + REG_OFF_CTRL); + ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); + writel(ctrl, ch->base + REG_OFF_CTRL); + + vchan_get_all_descriptors(&ch->vc, &head); + spin_unlock_irqrestore(&ch->vc.lock, flags); + vchan_dma_desc_free_list(&ch->vc, &head); + + return 0; +} + +static int moxart_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + u32 ctrl; + + ch->cfg = *cfg; + + ctrl = readl(ch->base + REG_OFF_CTRL); + ctrl |= APB_DMA_BURST_MODE; + ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK); + ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK); + + switch (ch->cfg.src_addr_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + ctrl |= APB_DMA_DATA_WIDTH_1; + if (ch->cfg.direction != DMA_MEM_TO_DEV) + ctrl |= APB_DMA_DEST_INC_1_4; + else + ctrl |= APB_DMA_SOURCE_INC_1_4; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + ctrl |= APB_DMA_DATA_WIDTH_2; + if (ch->cfg.direction != DMA_MEM_TO_DEV) + ctrl |= APB_DMA_DEST_INC_2_8; + else + ctrl |= APB_DMA_SOURCE_INC_2_8; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + ctrl &= ~APB_DMA_DATA_WIDTH; + if (ch->cfg.direction != DMA_MEM_TO_DEV) + ctrl |= APB_DMA_DEST_INC_4_16; + else + ctrl |= APB_DMA_SOURCE_INC_4_16; + break; + default: + return -EINVAL; + } + + if (ch->cfg.direction == DMA_MEM_TO_DEV) { + ctrl &= ~APB_DMA_DEST_SELECT; + ctrl |= APB_DMA_SOURCE_SELECT; + ctrl |= (ch->line_reqno << 16 & + APB_DMA_DEST_REQ_NO_MASK); + } else { + ctrl |= APB_DMA_DEST_SELECT; + ctrl &= ~APB_DMA_SOURCE_SELECT; + ctrl |= (ch->line_reqno << 24 & + APB_DMA_SOURCE_REQ_NO_MASK); + } + + writel(ctrl, ch->base + REG_OFF_CTRL); + + return 0; +} + +static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + int ret = 0; + + switch (cmd) { + case DMA_PAUSE: + case DMA_RESUME: + return -EINVAL; + case DMA_TERMINATE_ALL: + moxart_terminate_all(chan); + break; + case DMA_SLAVE_CONFIG: + ret = moxart_slave_config(chan, (struct dma_slave_config *)arg); + break; + default: + ret = -ENOSYS; + } + + return ret; +} + +static struct dma_async_tx_descriptor *moxart_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + struct moxart_desc *d; + enum dma_slave_buswidth dev_width; + dma_addr_t dev_addr; + struct scatterlist *sgent; + unsigned int es; + unsigned int i; + + if (!is_slave_direction(dir)) { + dev_err(chan2dev(chan), "%s: invalid DMA direction\n", + __func__); + return NULL; + } + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = ch->cfg.src_addr; + dev_width = ch->cfg.src_addr_width; + } else { + dev_addr = ch->cfg.dst_addr; + dev_width = ch->cfg.dst_addr_width; + } + + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = MOXART_DMA_DATA_TYPE_S8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = MOXART_DMA_DATA_TYPE_S16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = MOXART_DMA_DATA_TYPE_S32; + break; + default: + dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n", + __func__, dev_width); + return NULL; + } + + d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dma_dir = dir; + d->dev_addr = dev_addr; + d->es = es; + + for_each_sg(sgl, sgent, sg_len, i) { + d->sg[i].addr = sg_dma_address(sgent); + d->sg[i].len = sg_dma_len(sgent); + } + + d->sglen = sg_len; + + ch->error = 0; + + return vchan_tx_prep(&ch->vc, &d->vd, tx_flags); +} + +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct moxart_dmadev *mdc = ofdma->of_dma_data; + struct dma_chan *chan; + struct moxart_chan *ch; + + chan = dma_get_any_slave_channel(&mdc->dma_slave); + if (!chan) + return NULL; + + ch = to_moxart_dma_chan(chan); + ch->line_reqno = dma_spec->args[0]; + + return chan; +} + +static int moxart_alloc_chan_resources(struct dma_chan *chan) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + + dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n", + __func__, ch->ch_num); + ch->allocated = 1; + + return 0; +} + +static void moxart_free_chan_resources(struct dma_chan *chan) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + + vchan_free_chan_resources(&ch->vc); + + dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n", + __func__, ch->ch_num); + ch->allocated = 0; +} + +static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr, + dma_addr_t dst_addr) +{ + writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE); + writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST); +} + +static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len) +{ + struct moxart_desc *d = ch->desc; + unsigned int sglen_div = es_bytes[d->es]; + + d->dma_cycles = len >> sglen_div; + + /* + * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16 + * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ). + */ + writel(d->dma_cycles, ch->base + REG_OFF_CYCLES); + + dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n", + __func__, d->dma_cycles, len); +} + +static void moxart_start_dma(struct moxart_chan *ch) +{ + u32 ctrl; + + ctrl = readl(ch->base + REG_OFF_CTRL); + ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); + writel(ctrl, ch->base + REG_OFF_CTRL); +} + +static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx) +{ + struct moxart_desc *d = ch->desc; + struct moxart_sg *sg = ch->desc->sg + idx; + + if (ch->desc->dma_dir == DMA_MEM_TO_DEV) + moxart_dma_set_params(ch, sg->addr, d->dev_addr); + else if (ch->desc->dma_dir == DMA_DEV_TO_MEM) + moxart_dma_set_params(ch, d->dev_addr, sg->addr); + + moxart_set_transfer_params(ch, sg->len); + + moxart_start_dma(ch); +} + +static void moxart_dma_start_desc(struct dma_chan *chan) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&ch->vc); + + if (!vd) { + ch->desc = NULL; + return; + } + + list_del(&vd->node); + + ch->desc = to_moxart_dma_desc(&vd->tx); + ch->sgidx = 0; + + moxart_dma_start_sg(ch, 0); +} + +static void moxart_issue_pending(struct dma_chan *chan) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&ch->vc.lock, flags); + if (vchan_issue_pending(&ch->vc) && !ch->desc) + moxart_dma_start_desc(chan); + spin_unlock_irqrestore(&ch->vc.lock, flags); +} + +static size_t moxart_dma_desc_size(struct moxart_desc *d, + unsigned int completed_sgs) +{ + unsigned int i; + size_t size; + + for (size = i = completed_sgs; i < d->sglen; i++) + size += d->sg[i].len; + + return size; +} + +static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch) +{ + size_t size; + unsigned int completed_cycles, cycles; + + size = moxart_dma_desc_size(ch->desc, ch->sgidx); + cycles = readl(ch->base + REG_OFF_CYCLES); + completed_cycles = (ch->desc->dma_cycles - cycles); + size -= completed_cycles << es_bytes[ch->desc->es]; + + dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size); + + return size; +} + +static enum dma_status moxart_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + struct virt_dma_desc *vd; + struct moxart_desc *d; + enum dma_status ret; + unsigned long flags; + + /* + * dma_cookie_status() assigns initial residue value. + */ + ret = dma_cookie_status(chan, cookie, txstate); + + spin_lock_irqsave(&ch->vc.lock, flags); + vd = vchan_find_desc(&ch->vc, cookie); + if (vd) { + d = to_moxart_dma_desc(&vd->tx); + txstate->residue = moxart_dma_desc_size(d, 0); + } else if (ch->desc && ch->desc->vd.tx.cookie == cookie) { + txstate->residue = moxart_dma_desc_size_in_flight(ch); + } + spin_unlock_irqrestore(&ch->vc.lock, flags); + + if (ch->error) + return DMA_ERROR; + + return ret; +} + +static void moxart_dma_init(struct dma_device *dma, struct device *dev) +{ + dma->device_prep_slave_sg = moxart_prep_slave_sg; + dma->device_alloc_chan_resources = moxart_alloc_chan_resources; + dma->device_free_chan_resources = moxart_free_chan_resources; + dma->device_issue_pending = moxart_issue_pending; + dma->device_tx_status = moxart_tx_status; + dma->device_control = moxart_control; + dma->dev = dev; + + INIT_LIST_HEAD(&dma->channels); +} + +static irqreturn_t moxart_dma_interrupt(int irq, void *devid) +{ + struct moxart_dmadev *mc = devid; + struct moxart_chan *ch = &mc->slave_chans[0]; + unsigned int i; + unsigned long flags; + u32 ctrl; + + dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__); + + for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) { + if (!ch->allocated) + continue; + + ctrl = readl(ch->base + REG_OFF_CTRL); + + dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n", + __func__, ch, ch->base, ctrl); + + if (ctrl & APB_DMA_FIN_INT_STS) { + ctrl &= ~APB_DMA_FIN_INT_STS; + if (ch->desc) { + spin_lock_irqsave(&ch->vc.lock, flags); + if (++ch->sgidx < ch->desc->sglen) { + moxart_dma_start_sg(ch, ch->sgidx); + } else { + vchan_cookie_complete(&ch->desc->vd); + moxart_dma_start_desc(&ch->vc.chan); + } + spin_unlock_irqrestore(&ch->vc.lock, flags); + } + } + + if (ctrl & APB_DMA_ERR_INT_STS) { + ctrl &= ~APB_DMA_ERR_INT_STS; + ch->error = 1; + } + + writel(ctrl, ch->base + REG_OFF_CTRL); + } + + return IRQ_HANDLED; +} + +static int moxart_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + static void __iomem *dma_base_addr; + int ret, i; + unsigned int irq; + struct moxart_chan *ch; + struct moxart_dmadev *mdc; + + mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL); + if (!mdc) { + dev_err(dev, "can't allocate DMA container\n"); + return -ENOMEM; + } + + irq = irq_of_parse_and_map(node, 0); + if (irq == NO_IRQ) { + dev_err(dev, "no IRQ resource\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dma_base_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(dma_base_addr)) + return PTR_ERR(dma_base_addr); + + dma_cap_zero(mdc->dma_slave.cap_mask); + dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask); + dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask); + + moxart_dma_init(&mdc->dma_slave, dev); + + ch = &mdc->slave_chans[0]; + for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) { + ch->ch_num = i; + ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE; + ch->allocated = 0; + + ch->vc.desc_free = moxart_dma_desc_free; + vchan_init(&ch->vc, &mdc->dma_slave); + + dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n", + __func__, i, ch->ch_num, ch->base); + } + + platform_set_drvdata(pdev, mdc); + + ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0, + "moxart-dma-engine", mdc); + if (ret) { + dev_err(dev, "devm_request_irq failed\n"); + return ret; + } + + ret = dma_async_device_register(&mdc->dma_slave); + if (ret) { + dev_err(dev, "dma_async_device_register failed\n"); + return ret; + } + + ret = of_dma_controller_register(node, moxart_of_xlate, mdc); + if (ret) { + dev_err(dev, "of_dma_controller_register failed\n"); + dma_async_device_unregister(&mdc->dma_slave); + return ret; + } + + dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq); + + return 0; +} + +static int moxart_remove(struct platform_device *pdev) +{ + struct moxart_dmadev *m = platform_get_drvdata(pdev); + + dma_async_device_unregister(&m->dma_slave); + + if (pdev->dev.of_node) + of_dma_controller_free(pdev->dev.of_node); + + return 0; +} + +static const struct of_device_id moxart_dma_match[] = { + { .compatible = "moxa,moxart-dma" }, + { } +}; + +static struct platform_driver moxart_driver = { + .probe = moxart_probe, + .remove = moxart_remove, + .driver = { + .name = "moxart-dma-engine", + .owner = THIS_MODULE, + .of_match_table = moxart_dma_match, + }, +}; + +static int moxart_init(void) +{ + return platform_driver_register(&moxart_driver); +} +subsys_initcall(moxart_init); + +static void __exit moxart_exit(void) +{ + platform_driver_unregister(&moxart_driver); +} +module_exit(moxart_exit); + +MODULE_AUTHOR("Jonas Jensen "); +MODULE_DESCRIPTION("MOXART DMA engine driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 0ca583a239a854fd403bf8b659cdff8c603372c9 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Thu, 16 Jan 2014 14:10:53 +0800 Subject: DMA: Freescale: change BWC from 256 bytes to 1024 bytes Freescale DMA has a feature of BandWidth Control (ab. BWC), which is currently 256 bytes and should be changed to 1024 bytes for best DMA throughput. Changing BWC from 256 to 1024 will improve DMA performance much, in cases whatever one channel is running or multi channels are running simultanously, large or small buffers are copied. And this change doesn't impact memory access performance remarkably, lmbench tests show that for some cases the memory performance are decreased very slightly, while the others are even better. Tested on T4240. Signed-off-by: Hongbo Zhang Signed-off-by: Vinod Koul --- drivers/dma/fsldma.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 1ffc24484d23..d56e83599825 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -41,7 +41,7 @@ * channel is allowed to transfer before the DMA engine pauses * the current channel and switches to the next channel */ -#define FSL_DMA_MR_BWC 0x08000000 +#define FSL_DMA_MR_BWC 0x0A000000 /* Special MR definition for MPC8349 */ #define FSL_DMA_MR_EOTIE 0x00000080 -- cgit v1.2.3 From ba07d812f58c0ec65fff981a085529ed88965d23 Mon Sep 17 00:00:00 2001 From: Rongjun Ying Date: Mon, 23 Dec 2013 20:19:21 +0800 Subject: dmaengine: sirf: Add device_slave_caps interface this patch adds device_slave_caps() callback as SiRF SoC sound drivers depend on it. Signed-off-by: Rongjun Ying Signed-off-by: Barry Song Signed-off-by: Vinod Koul --- drivers/dma/sirf-dma.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 6aec3ad814d3..d4d3a3109b16 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -640,6 +640,25 @@ bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) } EXPORT_SYMBOL(sirfsoc_dma_filter_id); +#define SIRFSOC_DMA_BUSWIDTHS \ + (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) + +static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan, + struct dma_slave_caps *caps) +{ + caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS; + caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS; + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + caps->cmd_pause = true; + caps->cmd_terminate = true; + + return 0; +} + static int sirfsoc_dma_probe(struct platform_device *op) { struct device_node *dn = op->dev.of_node; @@ -712,6 +731,7 @@ static int sirfsoc_dma_probe(struct platform_device *op) dma->device_tx_status = sirfsoc_dma_tx_status; dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; + dma->device_slave_caps = sirfsoc_dma_device_slave_caps; INIT_LIST_HEAD(&dma->channels); dma_cap_set(DMA_SLAVE, dma->cap_mask); -- cgit v1.2.3 From 04abf5daf7df852566e5a4782d5954daa40e2542 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sat, 11 Jan 2014 20:08:38 +0100 Subject: dma: pl330: Differentiate between submitted and issued descriptors The pl330 dmaengine driver currently does not differentiate between submitted and issued descriptors. It won't start transferring a newly submitted descriptor until issue_pending() is called, but only if it is idle. If it is active and a new descriptor is submitted before it goes idle it will happily start the newly submitted descriptor once all earlier submitted descriptors have been completed. This is not a 100% correct with regards to the dmaengine interface semantics. A descriptor is not supposed to be started until the next issue_pending() call after the descriptor has been submitted. This patch adds a second per channel list that keeps track of the submitted descriptors. Once issue_pending() is called the submitted descriptors are moved to the working list and only descriptors on the working list are started. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 7adaf3abffba..8e018a221f19 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -543,7 +543,9 @@ struct dma_pl330_chan { /* DMA-Engine Channel */ struct dma_chan chan; - /* List of to be xfered descriptors */ + /* List of submitted descriptors */ + struct list_head submitted_list; + /* List of issued descriptors */ struct list_head work_list; /* List of completed descriptors */ struct list_head completed_list; @@ -2388,6 +2390,11 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); /* Mark all desc done */ + list_for_each_entry(desc, &pch->submitted_list, node) { + desc->status = FREE; + dma_cookie_complete(&desc->txd); + } + list_for_each_entry(desc, &pch->work_list , node) { desc->status = FREE; dma_cookie_complete(&desc->txd); @@ -2398,6 +2405,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned dma_cookie_complete(&desc->txd); } + list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool); list_splice_tail_init(&pch->work_list, &pdmac->desc_pool); list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); @@ -2456,7 +2464,14 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, static void pl330_issue_pending(struct dma_chan *chan) { - pl330_tasklet((unsigned long) to_pchan(chan)); + struct dma_pl330_chan *pch = to_pchan(chan); + unsigned long flags; + + spin_lock_irqsave(&pch->lock, flags); + list_splice_tail_init(&pch->submitted_list, &pch->work_list); + spin_unlock_irqrestore(&pch->lock, flags); + + pl330_tasklet((unsigned long)pch); } /* @@ -2483,11 +2498,11 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) dma_cookie_assign(&desc->txd); - list_move_tail(&desc->node, &pch->work_list); + list_move_tail(&desc->node, &pch->submitted_list); } cookie = dma_cookie_assign(&last->txd); - list_add_tail(&last->node, &pch->work_list); + list_add_tail(&last->node, &pch->submitted_list); spin_unlock_irqrestore(&pch->lock, flags); return cookie; @@ -2979,6 +2994,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) else pch->chan.private = adev->dev.of_node; + INIT_LIST_HEAD(&pch->submitted_list); INIT_LIST_HEAD(&pch->work_list); INIT_LIST_HEAD(&pch->completed_list); spin_lock_init(&pch->lock); -- cgit v1.2.3 From 70cbb163de1c6de239375b967caf372a98fae935 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sat, 11 Jan 2014 20:08:39 +0100 Subject: dma: pl330: Use dma_get_slave_channel() in the of xlate callback Currently the driver uses dma_request_channel() with a custom filter function to find the requested channel. This will loop over all available channels until the one we want has been found, but we already know which channel we want to request, so we can dma_get_slave_channel(). This also makes the code a bit shorter cleaner. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 35 ++++++++--------------------------- 1 file changed, 8 insertions(+), 27 deletions(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 8e018a221f19..5b2ba38b903f 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -589,6 +589,7 @@ struct dma_pl330_dmac { spinlock_t pool_lock; /* Peripheral channels connected to this DMAC */ + unsigned int num_peripherals; struct dma_pl330_chan *peripherals; /* keep at end */ }; @@ -611,11 +612,6 @@ struct dma_pl330_desc { struct dma_pl330_chan *pchan; }; -struct dma_pl330_filter_args { - struct dma_pl330_dmac *pdmac; - unsigned int chan_id; -}; - static inline void _callback(struct pl330_req *r, enum pl330_op_err err) { if (r && r->xfer_cb) @@ -2303,16 +2299,6 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err) tasklet_schedule(&pch->task); } -static bool pl330_dt_filter(struct dma_chan *chan, void *param) -{ - struct dma_pl330_filter_args *fargs = param; - - if (chan->device != &fargs->pdmac->ddma) - return false; - - return (chan->chan_id == fargs->chan_id); -} - bool pl330_filter(struct dma_chan *chan, void *param) { u8 *peri_id; @@ -2330,23 +2316,16 @@ static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, { int count = dma_spec->args_count; struct dma_pl330_dmac *pdmac = ofdma->of_dma_data; - struct dma_pl330_filter_args fargs; - dma_cap_mask_t cap; - - if (!pdmac) - return NULL; + unsigned int chan_id; if (count != 1) return NULL; - fargs.pdmac = pdmac; - fargs.chan_id = dma_spec->args[0]; - - dma_cap_zero(cap); - dma_cap_set(DMA_SLAVE, cap); - dma_cap_set(DMA_CYCLIC, cap); + chan_id = dma_spec->args[0]; + if (chan_id >= pdmac->num_peripherals) + return NULL; - return dma_request_channel(cap, pl330_dt_filter, &fargs); + return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan); } static int pl330_alloc_chan_resources(struct dma_chan *chan) @@ -2980,6 +2959,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) else num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); + pdmac->num_peripherals = num_chan; + pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); if (!pdmac->peripherals) { ret = -ENOMEM; -- cgit v1.2.3 From c61177c548c53b7bf838135deb5d26713094b2c7 Mon Sep 17 00:00:00 2001 From: Zhangfei Gao Date: Tue, 14 Jan 2014 11:37:43 +0800 Subject: dmaengine: k3dma: fix sparse warnings Fix sparse warnings: drivers/dma/k3dma.c:480:20: warning: Using plain integer as NULL pointer drivers/dma/k3dma.c:820:1: warning: symbol 'k3_dma_pmops' was not declared. Should it be static? Signed-off-by: Zhangfei Gao Signed-off-by: Vinod Koul --- drivers/dma/k3dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index e26075408e9b..a1f911aaf220 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -477,7 +477,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( dma_addr_t addr, src = 0, dst = 0; int num = sglen, i; - if (sgl == 0) + if (sgl == NULL) return NULL; for_each_sg(sgl, sg, sglen, i) { @@ -817,7 +817,7 @@ static int k3_dma_resume(struct device *dev) return 0; } -SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); +static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); static struct platform_driver k3_pdma_driver = { .driver = { -- cgit v1.2.3 From 1d566f11d02c8cfa65d803b847ded08febc18d2e Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 13 Jan 2014 14:04:48 +0200 Subject: dma: dw: fix style of multiline comment Simple fix a style of the multiline comment. There is no functional change. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 7516be4677cf..732b6bd398d9 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -218,8 +218,10 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc, struct dw_dma *dw = to_dw_dma(dwc->chan.device); u32 ctllo; - /* Software emulation of LLP mode relies on interrupts to continue - * multi block transfer. */ + /* + * Software emulation of LLP mode relies on interrupts to continue + * multi block transfer. + */ ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; channel_writel(dwc, SAR, desc->lli.sar); @@ -1603,9 +1605,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, dwc_params); - /* Decode maximum block size for given channel. The + /* + * Decode maximum block size for given channel. The * stored 4 bit value represents blocks from 0x00 for 3 - * up to 0x0a for 4095. */ + * up to 0x0a for 4095. + */ dwc->block_size = (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; dwc->nollp = -- cgit v1.2.3 From fc61f6b4f348f8ee9e798fb9f25c45398799d2e8 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 13 Jan 2014 14:04:49 +0200 Subject: dma: dw: join split up messages The joined messages are better to grep when debugging. There is no functional change. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 732b6bd398d9..1e900b1de913 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -255,8 +255,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) &dwc->flags); if (was_soft_llp) { dev_err(chan2dev(&dwc->chan), - "BUG: Attempted to start new LLP transfer " - "inside ongoing one\n"); + "BUG: Attempted to start new LLP transfer inside ongoing one\n"); return; } @@ -569,9 +568,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, unlikely(status_xfer & dwc->mask)) { int i; - dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " - "interrupt, stopping DMA transfer\n", - status_xfer ? "xfer" : "error"); + dev_err(chan2dev(&dwc->chan), + "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", + status_xfer ? "xfer" : "error"); spin_lock_irqsave(&dwc->lock, flags); -- cgit v1.2.3 From 5a87f0e618c709b982c1fa568a30346c38ea28de Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 13 Jan 2014 14:04:50 +0200 Subject: dma: dw: use %pad instead of casting dma_addr_t Since we have nice helper to print dma_addr_t values by reference we may use it instead of explicit casting to a longest type. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 1e900b1de913..13ac3f240e79 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -421,8 +421,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) return; } - dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, - (unsigned long long)llp); + dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp); list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { /* Initial residue value */ @@ -712,9 +711,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, u32 ctllo; dev_vdbg(chan2dev(chan), - "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, - (unsigned long long)dest, (unsigned long long)src, - len, flags); + "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, + &dest, &src, len, flags); if (unlikely(!len)) { dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); @@ -1402,9 +1400,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, /* Let's make a cyclic list */ last->lli.llp = cdesc->desc[0]->txd.phys; - dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " - "period %zu periods %d\n", (unsigned long long)buf_addr, - buf_len, period_len, periods); + dev_dbg(chan2dev(&dwc->chan), + "cyclic prepared buf %pad len %zu period %zu periods %d\n", + &buf_addr, buf_len, period_len, periods); cdesc->periods = periods; dwc->cdesc = cdesc; -- cgit v1.2.3 From 911daccc8b9672ec2206d3741127089dc2c695d4 Mon Sep 17 00:00:00 2001 From: Laxman Dewangan Date: Mon, 6 Jan 2014 11:16:45 -0700 Subject: dma: tegra: add support for Tegra148/124 Tegra148 introduces a few changes to the APB DMA HW registers. Update the driver to cope with them. Tegra124 inherits these changes. * The register address stride between DMA channels increases. * A new per-channel WCOUNT register is introduced. Signed-off-by: Laxman Dewangan Signed-off-by: Kunal Agrawal [swarren, remove .dts file change, rewrote commit description, removed some duplicate/unused code and register IO] Signed-off-by: Stephen Warren Reviewed-by: Thierry Reding Tested-by: Thierry Reding Signed-off-by: Vinod Koul --- drivers/dma/tegra20-apb-dma.c | 62 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 7 deletions(-) diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 73654e33f13b..25873bc10350 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -99,6 +99,11 @@ #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) +/* Tegra148 specific registers */ +#define TEGRA_APBDMA_CHAN_WCOUNT 0x20 + +#define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24 + /* * If any burst is in flight and DMA paused then this is the time to complete * on-flight burst and update DMA status register. @@ -108,21 +113,22 @@ /* Channel base address offset from APBDMA base address */ #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 -/* DMA channel register space size */ -#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20 - struct tegra_dma; /* * tegra_dma_chip_data Tegra chip specific DMA data * @nr_channels: Number of channels available in the controller. + * @channel_reg_size: Channel register size/stride. * @max_dma_count: Maximum DMA transfer count supported by DMA controller. * @support_channel_pause: Support channel wise pause of dma. + * @support_separate_wcount_reg: Support separate word count register. */ struct tegra_dma_chip_data { int nr_channels; + int channel_reg_size; int max_dma_count; bool support_channel_pause; + bool support_separate_wcount_reg; }; /* DMA channel registers */ @@ -132,6 +138,7 @@ struct tegra_dma_channel_regs { unsigned long apb_ptr; unsigned long ahb_seq; unsigned long apb_seq; + unsigned long wcount; }; /* @@ -421,6 +428,8 @@ static void tegra_dma_start(struct tegra_dma_channel *tdc, tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); + if (tdc->tdma->chip_data->support_separate_wcount_reg) + tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount); /* Start DMA */ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, @@ -460,6 +469,9 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, /* Safe to program new configuration */ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); + if (tdc->tdma->chip_data->support_separate_wcount_reg) + tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, + nsg_req->ch_regs.wcount); tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); nsg_req->configured = true; @@ -713,6 +725,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) struct tegra_dma_desc *dma_desc; unsigned long flags; unsigned long status; + unsigned long wcount; bool was_busy; spin_lock_irqsave(&tdc->lock, flags); @@ -733,6 +746,10 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) tdc->isr_handler(tdc, true); status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); } + if (tdc->tdma->chip_data->support_separate_wcount_reg) + wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER); + else + wcount = status; was_busy = tdc->busy; tegra_dma_stop(tdc); @@ -741,7 +758,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); sgreq->dma_desc->bytes_transferred += - get_current_xferred_count(tdc, sgreq, status); + get_current_xferred_count(tdc, sgreq, wcount); } tegra_dma_resume(tdc); @@ -903,6 +920,17 @@ static int get_transfer_param(struct tegra_dma_channel *tdc, return -EINVAL; } +static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc, + struct tegra_dma_channel_regs *ch_regs, u32 len) +{ + u32 len_field = (len - 4) & 0xFFFC; + + if (tdc->tdma->chip_data->support_separate_wcount_reg) + ch_regs->wcount = len_field; + else + ch_regs->csr |= len_field; +} + static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, @@ -986,7 +1014,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( sg_req->ch_regs.apb_ptr = apb_ptr; sg_req->ch_regs.ahb_ptr = mem; - sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); + sg_req->ch_regs.csr = csr; + tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len); sg_req->ch_regs.apb_seq = apb_seq; sg_req->ch_regs.ahb_seq = ahb_seq; sg_req->configured = false; @@ -1115,7 +1144,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); sg_req->ch_regs.apb_ptr = apb_ptr; sg_req->ch_regs.ahb_ptr = mem; - sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); + sg_req->ch_regs.csr = csr; + tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len); sg_req->ch_regs.apb_seq = apb_seq; sg_req->ch_regs.ahb_seq = ahb_seq; sg_req->configured = false; @@ -1210,27 +1240,45 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) /* Tegra20 specific DMA controller information */ static const struct tegra_dma_chip_data tegra20_dma_chip_data = { .nr_channels = 16, + .channel_reg_size = 0x20, .max_dma_count = 1024UL * 64, .support_channel_pause = false, + .support_separate_wcount_reg = false, }; /* Tegra30 specific DMA controller information */ static const struct tegra_dma_chip_data tegra30_dma_chip_data = { .nr_channels = 32, + .channel_reg_size = 0x20, .max_dma_count = 1024UL * 64, .support_channel_pause = false, + .support_separate_wcount_reg = false, }; /* Tegra114 specific DMA controller information */ static const struct tegra_dma_chip_data tegra114_dma_chip_data = { .nr_channels = 32, + .channel_reg_size = 0x20, .max_dma_count = 1024UL * 64, .support_channel_pause = true, + .support_separate_wcount_reg = false, +}; + +/* Tegra148 specific DMA controller information */ +static const struct tegra_dma_chip_data tegra148_dma_chip_data = { + .nr_channels = 32, + .channel_reg_size = 0x40, + .max_dma_count = 1024UL * 64, + .support_channel_pause = true, + .support_separate_wcount_reg = true, }; static const struct of_device_id tegra_dma_of_match[] = { { + .compatible = "nvidia,tegra148-apbdma", + .data = &tegra148_dma_chip_data, + }, { .compatible = "nvidia,tegra114-apbdma", .data = &tegra114_dma_chip_data, }, { @@ -1318,7 +1366,7 @@ static int tegra_dma_probe(struct platform_device *pdev) struct tegra_dma_channel *tdc = &tdma->channels[i]; tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + - i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; + i * cdata->channel_reg_size; res = platform_get_resource(pdev, IORESOURCE_IRQ, i); if (!res) { -- cgit v1.2.3 From 868d2ee252918e7640df80156df9e1299f8118f5 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 18 Dec 2013 21:39:39 +0530 Subject: dmaengine: fix kernel-doc style typos for few comments Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index fa6b4285d8d2..188108c36313 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -257,7 +257,7 @@ struct dma_chan_percpu { * @dev: class device for sysfs * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu - * @client-count: how many clients are using this channel + * @client_count: how many clients are using this channel * @table_count: number of appearances in the mem-to-mem allocation table * @private: private data for certain client-channel associations */ @@ -279,10 +279,10 @@ struct dma_chan { /** * struct dma_chan_dev - relate sysfs device node to backing channel device - * @chan - driver channel device - * @device - sysfs device - * @dev_id - parent dma_device dev_id - * @idr_ref - reference count to gate release of dma_device dev_id + * @chan: driver channel device + * @device: sysfs device + * @dev_id: parent dma_device dev_id + * @idr_ref: reference count to gate release of dma_device dev_id */ struct dma_chan_dev { struct dma_chan *chan; -- cgit v1.2.3 From 9e2f7d827912609a71a53d625ee27d29dfbf87e0 Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Thu, 19 Dec 2013 22:22:29 -0300 Subject: dma: omap: Set debug level to debugging messages The channel allocated/released messages are just informative and not really interesting to users. Change them to "debug" level. Signed-off-by: Ezequiel Garcia Signed-off-by: Vinod Koul --- drivers/dma/omap-dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 2f66cf4e54fe..362e7c49f2e1 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -190,7 +190,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan) { struct omap_chan *c = to_omap_dma_chan(chan); - dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); + dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); return omap_request_dma(c->dma_sig, "DMA engine", omap_dma_callback, c, &c->dma_ch); @@ -203,7 +203,7 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan) vchan_free_chan_resources(&c->vc); omap_free_dma(c->dma_ch); - dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); + dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); } static size_t omap_dma_sg_size(struct omap_sg *sg) -- cgit v1.2.3 From f3b77727e82722fafb1940b3fc15bfdddd9aab4a Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 29 Dec 2013 23:47:23 +0100 Subject: drivers/dma: fix error return code Set the return variable to an error code as done elsewhere in the function. A simplified version of the semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // ( if@p1 (\(ret < 0\|ret != 0\)) { ... return ret; } | ret@p1 = 0 ) ... when != ret = e1 when != &ret *if(...) { ... when != ret = e2 when forall return ret; } // Signed-off-by: Julia Lawall Signed-off-by: Vinod Koul --- drivers/dma/cppi41.c | 4 +++- drivers/dma/ppc4xx/adma.c | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index c29dacff66fa..c18aebf7d5aa 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -972,8 +972,10 @@ static int cppi41_dma_probe(struct platform_device *pdev) goto err_chans; irq = irq_of_parse_and_map(dev->of_node, 0); - if (!irq) + if (!irq) { + ret = -EINVAL; goto err_irq; + } cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 8da48c6b2a38..f9e5c0a5e494 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4139,6 +4139,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) regs = ioremap(res.start, resource_size(&res)); if (!regs) { dev_err(&ofdev->dev, "failed to ioremap regs!\n"); + ret = -ENOMEM; goto err_regs_alloc; } -- cgit v1.2.3 From a85159fece07f4ff3e266da619af050928dceca1 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Mon, 30 Dec 2013 14:58:04 +0100 Subject: DMA: dmatest: extend the "device" module parameter to 32 characters With Device Tree a typical DMA controller device name can look like 10000000.dma-controller, which extends the current size of the string, allocated for this parameter. This patch extends its size from 20 to 32 characters. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 20f9a3aaf926..79c87fa7fbe5 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -31,7 +31,7 @@ module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO | S_IWUSR); MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); -static char test_device[20]; +static char test_device[32]; module_param_string(device, test_device, sizeof(test_device), S_IRUGO | S_IWUSR); MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); @@ -89,7 +89,7 @@ MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); struct dmatest_params { unsigned int buf_size; char channel[20]; - char device[20]; + char device[32]; unsigned int threads_per_chan; unsigned int max_channels; unsigned int iterations; -- cgit v1.2.3 From af58652a3e6746c8ad498984c61c12a1941c9175 Mon Sep 17 00:00:00 2001 From: Jonas Jensen Date: Fri, 6 Dec 2013 16:42:09 +0100 Subject: dma: fix vchan_cookie_complete() debug print vd->tx.cookie is set zero on dma_cookie_complete(), save to local before printing it. Signed-off-by: Jonas Jensen Signed-off-by: Vinod Koul --- drivers/dma/virt-dma.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 85c19d63f9fb..181b95267866 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -84,10 +84,12 @@ static inline bool vchan_issue_pending(struct virt_dma_chan *vc) static inline void vchan_cookie_complete(struct virt_dma_desc *vd) { struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + dma_cookie_t cookie; + cookie = vd->tx.cookie; dma_cookie_complete(&vd->tx); dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", - vd, vd->tx.cookie); + vd, cookie); list_add_tail(&vd->node, &vc->desc_completed); tasklet_schedule(&vc->task); -- cgit v1.2.3 From 39d1447811fef1262ba956aac719c90a0f7b257f Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 2 Dec 2013 15:16:28 +0200 Subject: acpi-dma: align documentation with kernel-doc format It mostly fixes the "RETURN" sections in the resulting manual page. There is no functional change. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/acpi-dma.c | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index e69b03c0fa50..1e506afa33f5 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -30,11 +30,12 @@ static DEFINE_MUTEX(acpi_dma_lock); * @adev: ACPI device to match with * @adma: struct acpi_dma of the given DMA controller * - * Returns 1 on success, 0 when no information is available, or appropriate - * errno value on error. - * * In order to match a device from DSDT table to the corresponding CSRT device * we use MMIO address and IRQ. + * + * Return: + * 1 on success, 0 when no information is available, or appropriate errno value + * on error. */ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, struct acpi_device *adev, struct acpi_dma *adma) @@ -101,7 +102,6 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, * * We are using this table to get the request line range of the specific DMA * controller to be used later. - * */ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) { @@ -141,10 +141,11 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) * @data pointer to controller specific data to be used by * translation function * - * Returns 0 on success or appropriate errno value on error. - * * Allocated memory should be freed with appropriate acpi_dma_controller_free() * call. + * + * Return: + * 0 on success or appropriate errno value on error. */ int acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) @@ -188,6 +189,9 @@ EXPORT_SYMBOL_GPL(acpi_dma_controller_register); * @dev: struct device of DMA controller * * Memory allocated by acpi_dma_controller_register() is freed here. + * + * Return: + * 0 on success or appropriate errno value on error. */ int acpi_dma_controller_free(struct device *dev) { @@ -225,6 +229,9 @@ static void devm_acpi_dma_release(struct device *dev, void *res) * Managed acpi_dma_controller_register(). DMA controller registered by this * function are automatically freed on driver detach. See * acpi_dma_controller_register() for more information. + * + * Return: + * 0 on success or appropriate errno value on error. */ int devm_acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) @@ -267,8 +274,6 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); * @adma: struct acpi_dma of DMA controller * @dma_spec: dma specifier to update * - * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise. - * * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource * Descriptor": * DMA Request Line bits is a platform-relative number uniquely @@ -276,6 +281,9 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); * mapping is done in a controller-specific OS driver. * That's why we can safely adjust slave_id when the appropriate controller is * found. + * + * Return: + * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise. */ static int acpi_dma_update_dma_spec(struct acpi_dma *adma, struct acpi_dma_spec *dma_spec) @@ -334,7 +342,8 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data) * @dev: struct device to get DMA request from * @index: index of FixedDMA descriptor for @dev * - * Returns pointer to appropriate dma channel on success or NULL on error. + * Return: + * Pointer to appropriate dma channel on success or NULL on error. */ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, size_t index) @@ -403,7 +412,8 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index); * translate the names "tx" and "rx" here based on the most common case where * the first FixedDMA descriptor is TX and second is RX. * - * Returns pointer to appropriate dma channel on success or NULL on error. + * Return: + * Pointer to appropriate dma channel on success or NULL on error. */ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, const char *name) @@ -427,8 +437,10 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name); * @adma: pointer to ACPI DMA controller data * * A simple translation function for ACPI based devices. Passes &struct - * dma_spec to the DMA controller driver provided filter function. Returns - * pointer to the channel if found or %NULL otherwise. + * dma_spec to the DMA controller driver provided filter function. + * + * Return: + * Pointer to the channel if found or %NULL otherwise. */ struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, struct acpi_dma *adma) -- cgit v1.2.3 From 6d05c9fabfdc8642b42a1000cd092b2a7543d85f Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Thu, 23 Jan 2014 16:10:07 +0530 Subject: dma: pl08x: Export pl08x_filter_id Export the symbol so that it is accessible to modules. Fixes the following error: ERROR: "pl08x_filter_id" [sound/soc/samsung/snd-soc-s3c-dma.ko] undefined! Signed-off-by: Sachin Kamat Cc: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index f68f1c1d560b..8114731a1c62 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -83,6 +83,7 @@ #include #include #include +#include #include #include #include @@ -1771,6 +1772,7 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) return false; } +EXPORT_SYMBOL_GPL(pl08x_filter_id); /* * Just check that the device is there and active -- cgit v1.2.3 From 15cec530e4bc7bed3f51cde8404f96fd28a8c7c5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 28 Jan 2014 21:45:38 +0100 Subject: dmaengine: mmp_pdma: fix mismerge The merge between 2b7f65b11d87f "mmp_pdma: Style neatening" and 8010dad55a0ab0 "dma: add dma_get_any_slave_channel(), for use in of_xlate()" caused a build error by leaving obsolete code in place: mmp_pdma.c: In function 'mmp_pdma_dma_xlate': mmp_pdma.c:909:31: error: 'candidate' undeclared mmp_pdma.c:912:3: error: label 'retry' used but not defined mmp_pdma.c:901:24: warning: unused variable 'c' [-Wunused-variable] This removes the extraneous lines. Signed-off-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/mmp_pdma.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index c1b7c3ac7676..b439679f4126 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -899,19 +899,11 @@ static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec, { struct mmp_pdma_device *d = ofdma->of_dma_data; struct dma_chan *chan; - struct mmp_pdma_chan *c; chan = dma_get_any_slave_channel(&d->device); if (!chan) return NULL; - /* dma_get_slave_channel will return NULL if we lost a race between - * the lookup and the reservation */ - chan = dma_get_slave_channel(candidate); - - if (!chan) - goto retry; - to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0]; return chan; -- cgit v1.2.3