blob: 28db8770aaf359fbc222f531488a3342fb5a62f3 [file] [log] [blame]
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001/*
2 * SH RSPI driver
3 *
Geert Uytterhoeven93722202014-01-24 09:43:58 +01004 * Copyright (C) 2012, 2013 Renesas Solutions Corp.
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09005 *
6 * Based on spi-sh.c:
7 * Copyright (C) 2011 Renesas Solutions Corp.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/errno.h>
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +090028#include <linux/interrupt.h>
29#include <linux/platform_device.h>
30#include <linux/io.h>
31#include <linux/clk.h>
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +090032#include <linux/dmaengine.h>
33#include <linux/dma-mapping.h>
34#include <linux/sh_dma.h>
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +090035#include <linux/spi/spi.h>
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +090036#include <linux/spi/rspi.h>
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +090037
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +010038#define RSPI_SPCR 0x00 /* Control Register */
39#define RSPI_SSLP 0x01 /* Slave Select Polarity Register */
40#define RSPI_SPPCR 0x02 /* Pin Control Register */
41#define RSPI_SPSR 0x03 /* Status Register */
42#define RSPI_SPDR 0x04 /* Data Register */
43#define RSPI_SPSCR 0x08 /* Sequence Control Register */
44#define RSPI_SPSSR 0x09 /* Sequence Status Register */
45#define RSPI_SPBR 0x0a /* Bit Rate Register */
46#define RSPI_SPDCR 0x0b /* Data Control Register */
47#define RSPI_SPCKD 0x0c /* Clock Delay Register */
48#define RSPI_SSLND 0x0d /* Slave Select Negation Delay Register */
49#define RSPI_SPND 0x0e /* Next-Access Delay Register */
Geert Uytterhoeven862d3572014-01-24 09:43:59 +010050#define RSPI_SPCR2 0x0f /* Control Register 2 (SH only) */
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +010051#define RSPI_SPCMD0 0x10 /* Command Register 0 */
52#define RSPI_SPCMD1 0x12 /* Command Register 1 */
53#define RSPI_SPCMD2 0x14 /* Command Register 2 */
54#define RSPI_SPCMD3 0x16 /* Command Register 3 */
55#define RSPI_SPCMD4 0x18 /* Command Register 4 */
56#define RSPI_SPCMD5 0x1a /* Command Register 5 */
57#define RSPI_SPCMD6 0x1c /* Command Register 6 */
58#define RSPI_SPCMD7 0x1e /* Command Register 7 */
Geert Uytterhoeven862d3572014-01-24 09:43:59 +010059
60/* RSPI on RZ only */
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +010061#define RSPI_SPBFCR 0x20 /* Buffer Control Register */
62#define RSPI_SPBFDR 0x22 /* Buffer Data Count Setting Register */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +090063
Geert Uytterhoeven862d3572014-01-24 09:43:59 +010064/* QSPI only */
Geert Uytterhoevenfbe50722014-01-12 11:27:38 +010065#define QSPI_SPBFCR 0x18 /* Buffer Control Register */
66#define QSPI_SPBDCR 0x1a /* Buffer Data Count Register */
67#define QSPI_SPBMUL0 0x1c /* Transfer Data Length Multiplier Setting Register 0 */
68#define QSPI_SPBMUL1 0x20 /* Transfer Data Length Multiplier Setting Register 1 */
69#define QSPI_SPBMUL2 0x24 /* Transfer Data Length Multiplier Setting Register 2 */
70#define QSPI_SPBMUL3 0x28 /* Transfer Data Length Multiplier Setting Register 3 */
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +090071
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +010072/* SPCR - Control Register */
73#define SPCR_SPRIE 0x80 /* Receive Interrupt Enable */
74#define SPCR_SPE 0x40 /* Function Enable */
75#define SPCR_SPTIE 0x20 /* Transmit Interrupt Enable */
76#define SPCR_SPEIE 0x10 /* Error Interrupt Enable */
77#define SPCR_MSTR 0x08 /* Master/Slave Mode Select */
78#define SPCR_MODFEN 0x04 /* Mode Fault Error Detection Enable */
79/* RSPI on SH only */
80#define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */
81#define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */
Geert Uytterhoevenfbe50722014-01-12 11:27:38 +010082/* QSPI on R-Car M2 only */
83#define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */
84#define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +090085
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +010086/* SSLP - Slave Select Polarity Register */
87#define SSLP_SSL1P 0x02 /* SSL1 Signal Polarity Setting */
88#define SSLP_SSL0P 0x01 /* SSL0 Signal Polarity Setting */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +090089
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +010090/* SPPCR - Pin Control Register */
91#define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */
92#define SPPCR_MOIFV 0x10 /* MOSI Idle Fixed Value */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +090093#define SPPCR_SPOM 0x04
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +010094#define SPPCR_SPLP2 0x02 /* Loopback Mode 2 (non-inverting) */
95#define SPPCR_SPLP 0x01 /* Loopback Mode (inverting) */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +090096
Geert Uytterhoevenfbe50722014-01-12 11:27:38 +010097#define SPPCR_IO3FV 0x04 /* Single-/Dual-SPI Mode IO3 Output Fixed Value */
98#define SPPCR_IO2FV 0x04 /* Single-/Dual-SPI Mode IO2 Output Fixed Value */
99
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100100/* SPSR - Status Register */
101#define SPSR_SPRF 0x80 /* Receive Buffer Full Flag */
102#define SPSR_TEND 0x40 /* Transmit End */
103#define SPSR_SPTEF 0x20 /* Transmit Buffer Empty Flag */
104#define SPSR_PERF 0x08 /* Parity Error Flag */
105#define SPSR_MODF 0x04 /* Mode Fault Error Flag */
106#define SPSR_IDLNF 0x02 /* RSPI Idle Flag */
Geert Uytterhoeven862d3572014-01-24 09:43:59 +0100107#define SPSR_OVRF 0x01 /* Overrun Error Flag (RSPI only) */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900108
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100109/* SPSCR - Sequence Control Register */
110#define SPSCR_SPSLN_MASK 0x07 /* Sequence Length Specification */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900111
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100112/* SPSSR - Sequence Status Register */
113#define SPSSR_SPECM_MASK 0x70 /* Command Error Mask */
114#define SPSSR_SPCP_MASK 0x07 /* Command Pointer Mask */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900115
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100116/* SPDCR - Data Control Register */
117#define SPDCR_TXDMY 0x80 /* Dummy Data Transmission Enable */
118#define SPDCR_SPLW1 0x40 /* Access Width Specification (RZ) */
119#define SPDCR_SPLW0 0x20 /* Access Width Specification (RZ) */
120#define SPDCR_SPLLWORD (SPDCR_SPLW1 | SPDCR_SPLW0)
121#define SPDCR_SPLWORD SPDCR_SPLW1
122#define SPDCR_SPLBYTE SPDCR_SPLW0
123#define SPDCR_SPLW 0x20 /* Access Width Specification (SH) */
Geert Uytterhoeven862d3572014-01-24 09:43:59 +0100124#define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select (SH) */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900125#define SPDCR_SLSEL1 0x08
126#define SPDCR_SLSEL0 0x04
Geert Uytterhoeven862d3572014-01-24 09:43:59 +0100127#define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select (SH) */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900128#define SPDCR_SPFC1 0x02
129#define SPDCR_SPFC0 0x01
Geert Uytterhoeven862d3572014-01-24 09:43:59 +0100130#define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) (SH) */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900131
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100132/* SPCKD - Clock Delay Register */
133#define SPCKD_SCKDL_MASK 0x07 /* Clock Delay Setting (1-8) */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900134
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100135/* SSLND - Slave Select Negation Delay Register */
136#define SSLND_SLNDL_MASK 0x07 /* SSL Negation Delay Setting (1-8) */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900137
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100138/* SPND - Next-Access Delay Register */
139#define SPND_SPNDL_MASK 0x07 /* Next-Access Delay Setting (1-8) */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900140
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100141/* SPCR2 - Control Register 2 */
142#define SPCR2_PTE 0x08 /* Parity Self-Test Enable */
143#define SPCR2_SPIE 0x04 /* Idle Interrupt Enable */
144#define SPCR2_SPOE 0x02 /* Odd Parity Enable (vs. Even) */
145#define SPCR2_SPPE 0x01 /* Parity Enable */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900146
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100147/* SPCMDn - Command Registers */
148#define SPCMD_SCKDEN 0x8000 /* Clock Delay Setting Enable */
149#define SPCMD_SLNDEN 0x4000 /* SSL Negation Delay Setting Enable */
150#define SPCMD_SPNDEN 0x2000 /* Next-Access Delay Enable */
151#define SPCMD_LSBF 0x1000 /* LSB First */
152#define SPCMD_SPB_MASK 0x0f00 /* Data Length Setting */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900153#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900154#define SPCMD_SPB_8BIT 0x0000 /* qspi only */
155#define SPCMD_SPB_16BIT 0x0100
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900156#define SPCMD_SPB_20BIT 0x0000
157#define SPCMD_SPB_24BIT 0x0100
158#define SPCMD_SPB_32BIT 0x0200
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100159#define SPCMD_SSLKP 0x0080 /* SSL Signal Level Keeping */
Geert Uytterhoevenfbe50722014-01-12 11:27:38 +0100160#define SPCMD_SPIMOD_MASK 0x0060 /* SPI Operating Mode (QSPI only) */
161#define SPCMD_SPIMOD1 0x0040
162#define SPCMD_SPIMOD0 0x0020
163#define SPCMD_SPIMOD_SINGLE 0
164#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
165#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
166#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100167#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */
168#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
169#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
170#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900171
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100172/* SPBFCR - Buffer Control Register */
Geert Uytterhoeven862d3572014-01-24 09:43:59 +0100173#define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset */
174#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */
Geert Uytterhoeven6ab48652014-01-12 11:27:37 +0100175#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
176#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900177
Geert Uytterhoeven2aae80b2013-12-24 10:49:33 +0100178#define DUMMY_DATA 0x00
179
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900180struct rspi_data {
181 void __iomem *addr;
182 u32 max_speed_hz;
183 struct spi_master *master;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900184 wait_queue_head_t wait;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900185 struct clk *clk;
Geert Uytterhoeven348e5152014-01-12 11:27:43 +0100186 u16 spcmd;
Geert Uytterhoeven06a7a3c2014-01-24 09:44:00 +0100187 u8 spsr;
188 u8 sppcr;
Geert Uytterhoeven93722202014-01-24 09:43:58 +0100189 int rx_irq, tx_irq;
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900190 const struct spi_ops *ops;
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900191
192 /* for dmaengine */
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900193 struct dma_chan *chan_tx;
194 struct dma_chan *chan_rx;
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900195
196 unsigned dma_width_16bit:1;
197 unsigned dma_callbacked:1;
Geert Uytterhoeven74da7682014-01-24 09:43:53 +0100198 unsigned byte_access:1;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900199};
200
Geert Uytterhoevenbaf588f2013-12-24 10:49:32 +0100201static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900202{
203 iowrite8(data, rspi->addr + offset);
204}
205
Geert Uytterhoevenbaf588f2013-12-24 10:49:32 +0100206static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900207{
208 iowrite16(data, rspi->addr + offset);
209}
210
Geert Uytterhoevenbaf588f2013-12-24 10:49:32 +0100211static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900212{
213 iowrite32(data, rspi->addr + offset);
214}
215
Geert Uytterhoevenbaf588f2013-12-24 10:49:32 +0100216static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900217{
218 return ioread8(rspi->addr + offset);
219}
220
Geert Uytterhoevenbaf588f2013-12-24 10:49:32 +0100221static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900222{
223 return ioread16(rspi->addr + offset);
224}
225
Geert Uytterhoeven74da7682014-01-24 09:43:53 +0100226static void rspi_write_data(const struct rspi_data *rspi, u16 data)
227{
228 if (rspi->byte_access)
229 rspi_write8(rspi, data, RSPI_SPDR);
230 else /* 16 bit */
231 rspi_write16(rspi, data, RSPI_SPDR);
232}
233
234static u16 rspi_read_data(const struct rspi_data *rspi)
235{
236 if (rspi->byte_access)
237 return rspi_read8(rspi, RSPI_SPDR);
238 else /* 16 bit */
239 return rspi_read16(rspi, RSPI_SPDR);
240}
241
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900242/* optional functions */
243struct spi_ops {
Geert Uytterhoeven74da7682014-01-24 09:43:53 +0100244 int (*set_config_register)(struct rspi_data *rspi, int access_size);
Geert Uytterhoeveneb557f72014-01-24 09:43:55 +0100245 int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
246 struct spi_transfer *xfer);
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900247};
248
249/*
Geert Uytterhoeven862d3572014-01-24 09:43:59 +0100250 * functions for RSPI on legacy SH
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900251 */
Geert Uytterhoeven74da7682014-01-24 09:43:53 +0100252static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900253{
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900254 int spbr;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900255
Geert Uytterhoeven06a7a3c2014-01-24 09:44:00 +0100256 /* Sets output mode, MOSI signal, and (optionally) loopback */
257 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900258
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900259 /* Sets transfer bit rate */
260 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
261 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
262
Geert Uytterhoeven74da7682014-01-24 09:43:53 +0100263 /* Disable dummy transmission, set 16-bit word access, 1 frame */
264 rspi_write8(rspi, 0, RSPI_SPDCR);
265 rspi->byte_access = 0;
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900266
267 /* Sets RSPCK, SSL, next-access delay value */
268 rspi_write8(rspi, 0x00, RSPI_SPCKD);
269 rspi_write8(rspi, 0x00, RSPI_SSLND);
270 rspi_write8(rspi, 0x00, RSPI_SPND);
271
272 /* Sets parity, interrupt mask */
273 rspi_write8(rspi, 0x00, RSPI_SPCR2);
274
275 /* Sets SPCMD */
Geert Uytterhoeven348e5152014-01-12 11:27:43 +0100276 rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | rspi->spcmd,
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900277 RSPI_SPCMD0);
278
279 /* Sets RSPI mode */
280 rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
281
282 return 0;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900283}
284
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900285/*
Geert Uytterhoeven862d3572014-01-24 09:43:59 +0100286 * functions for RSPI on RZ
287 */
288static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
289{
290 int spbr;
291
Geert Uytterhoeven06a7a3c2014-01-24 09:44:00 +0100292 /* Sets output mode, MOSI signal, and (optionally) loopback */
293 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
Geert Uytterhoeven862d3572014-01-24 09:43:59 +0100294
295 /* Sets transfer bit rate */
296 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
297 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
298
299 /* Disable dummy transmission, set byte access */
300 rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
301 rspi->byte_access = 1;
302
303 /* Sets RSPCK, SSL, next-access delay value */
304 rspi_write8(rspi, 0x00, RSPI_SPCKD);
305 rspi_write8(rspi, 0x00, RSPI_SSLND);
306 rspi_write8(rspi, 0x00, RSPI_SPND);
307
308 /* Sets SPCMD */
309 rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
310 rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
311
312 /* Sets RSPI mode */
313 rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
314
315 return 0;
316}
317
318/*
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900319 * functions for QSPI
320 */
Geert Uytterhoeven74da7682014-01-24 09:43:53 +0100321static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900322{
323 u16 spcmd;
324 int spbr;
325
Geert Uytterhoeven06a7a3c2014-01-24 09:44:00 +0100326 /* Sets output mode, MOSI signal, and (optionally) loopback */
327 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900328
329 /* Sets transfer bit rate */
330 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz);
331 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
332
Geert Uytterhoeven74da7682014-01-24 09:43:53 +0100333 /* Disable dummy transmission, set byte access */
334 rspi_write8(rspi, 0, RSPI_SPDCR);
335 rspi->byte_access = 1;
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900336
337 /* Sets RSPCK, SSL, next-access delay value */
338 rspi_write8(rspi, 0x00, RSPI_SPCKD);
339 rspi_write8(rspi, 0x00, RSPI_SSLND);
340 rspi_write8(rspi, 0x00, RSPI_SPND);
341
342 /* Data Length Setting */
343 if (access_size == 8)
344 spcmd = SPCMD_SPB_8BIT;
345 else if (access_size == 16)
346 spcmd = SPCMD_SPB_16BIT;
Laurent Pinchart8e1c8092013-11-27 01:41:44 +0100347 else
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900348 spcmd = SPCMD_SPB_32BIT;
349
Geert Uytterhoeven348e5152014-01-12 11:27:43 +0100350 spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | rspi->spcmd | SPCMD_SPNDEN;
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900351
352 /* Resets transfer data length */
353 rspi_write32(rspi, 0, QSPI_SPBMUL0);
354
355 /* Resets transmit and receive buffer */
356 rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
357 /* Sets buffer to allow normal operation */
358 rspi_write8(rspi, 0x00, QSPI_SPBFCR);
359
360 /* Sets SPCMD */
361 rspi_write16(rspi, spcmd, RSPI_SPCMD0);
362
363 /* Enables SPI function in a master mode */
364 rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
365
366 return 0;
367}
368
369#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
370
Geert Uytterhoevenbaf588f2013-12-24 10:49:32 +0100371static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900372{
373 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
374}
375
Geert Uytterhoevenbaf588f2013-12-24 10:49:32 +0100376static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900377{
378 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
379}
380
381static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
382 u8 enable_bit)
383{
384 int ret;
385
386 rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
387 rspi_enable_irq(rspi, enable_bit);
388 ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
389 if (ret == 0 && !(rspi->spsr & wait_mask))
390 return -ETIMEDOUT;
391
392 return 0;
393}
394
Geert Uytterhoeven35301c92014-01-24 09:43:54 +0100395static int rspi_data_out(struct rspi_data *rspi, u8 data)
396{
397 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
398 dev_err(&rspi->master->dev, "transmit timeout\n");
399 return -ETIMEDOUT;
400 }
401 rspi_write_data(rspi, data);
402 return 0;
403}
404
405static int rspi_data_in(struct rspi_data *rspi)
406{
407 u8 data;
408
409 if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
410 dev_err(&rspi->master->dev, "receive timeout\n");
411 return -ETIMEDOUT;
412 }
413 data = rspi_read_data(rspi);
414 return data;
415}
416
417static int rspi_data_out_in(struct rspi_data *rspi, u8 data)
418{
419 int ret;
420
421 ret = rspi_data_out(rspi, data);
422 if (ret < 0)
423 return ret;
424
425 return rspi_data_in(rspi);
426}
427
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900428static void rspi_dma_complete(void *arg)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900429{
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900430 struct rspi_data *rspi = arg;
431
432 rspi->dma_callbacked = 1;
433 wake_up_interruptible(&rspi->wait);
434}
435
Geert Uytterhoevenc132f092013-12-24 10:49:31 +0100436static int rspi_dma_map_sg(struct scatterlist *sg, const void *buf,
437 unsigned len, struct dma_chan *chan,
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900438 enum dma_transfer_direction dir)
439{
440 sg_init_table(sg, 1);
441 sg_set_buf(sg, buf, len);
442 sg_dma_len(sg) = len;
443 return dma_map_sg(chan->device->dev, sg, 1, dir);
444}
445
446static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan,
447 enum dma_transfer_direction dir)
448{
449 dma_unmap_sg(chan->device->dev, sg, 1, dir);
450}
451
452static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len)
453{
454 u16 *dst = buf;
455 const u8 *src = data;
456
457 while (len) {
458 *dst++ = (u16)(*src++);
459 len--;
460 }
461}
462
463static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
464{
465 u8 *dst = buf;
466 const u16 *src = data;
467
468 while (len) {
469 *dst++ = (u8)*src++;
470 len--;
471 }
472}
473
474static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
475{
476 struct scatterlist sg;
Geert Uytterhoevenc132f092013-12-24 10:49:31 +0100477 const void *buf = NULL;
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900478 struct dma_async_tx_descriptor *desc;
Geert Uytterhoeven93722202014-01-24 09:43:58 +0100479 unsigned int len;
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900480 int ret = 0;
481
482 if (rspi->dma_width_16bit) {
Geert Uytterhoevenc132f092013-12-24 10:49:31 +0100483 void *tmp;
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900484 /*
485 * If DMAC bus width is 16-bit, the driver allocates a dummy
486 * buffer. And, the driver converts original data into the
487 * DMAC data as the following format:
488 * original data: 1st byte, 2nd byte ...
489 * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
490 */
491 len = t->len * 2;
Geert Uytterhoevenc132f092013-12-24 10:49:31 +0100492 tmp = kmalloc(len, GFP_KERNEL);
493 if (!tmp)
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900494 return -ENOMEM;
Geert Uytterhoevenc132f092013-12-24 10:49:31 +0100495 rspi_memory_to_8bit(tmp, t->tx_buf, t->len);
496 buf = tmp;
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900497 } else {
498 len = t->len;
Geert Uytterhoevenc132f092013-12-24 10:49:31 +0100499 buf = t->tx_buf;
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900500 }
501
502 if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) {
503 ret = -EFAULT;
504 goto end_nomap;
505 }
506 desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE,
507 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
508 if (!desc) {
509 ret = -EIO;
510 goto end;
511 }
512
513 /*
514 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
515 * called. So, this driver disables the IRQ while DMA transfer.
516 */
Geert Uytterhoeven93722202014-01-24 09:43:58 +0100517 disable_irq(rspi->tx_irq);
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900518
519 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
520 rspi_enable_irq(rspi, SPCR_SPTIE);
521 rspi->dma_callbacked = 0;
522
523 desc->callback = rspi_dma_complete;
524 desc->callback_param = rspi;
525 dmaengine_submit(desc);
526 dma_async_issue_pending(rspi->chan_tx);
527
528 ret = wait_event_interruptible_timeout(rspi->wait,
529 rspi->dma_callbacked, HZ);
530 if (ret > 0 && rspi->dma_callbacked)
531 ret = 0;
532 else if (!ret)
533 ret = -ETIMEDOUT;
534 rspi_disable_irq(rspi, SPCR_SPTIE);
535
Geert Uytterhoeven93722202014-01-24 09:43:58 +0100536 enable_irq(rspi->tx_irq);
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900537
538end:
539 rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE);
540end_nomap:
541 if (rspi->dma_width_16bit)
542 kfree(buf);
543
544 return ret;
545}
546
Geert Uytterhoevenbaf588f2013-12-24 10:49:32 +0100547static void rspi_receive_init(const struct rspi_data *rspi)
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900548{
Geert Uytterhoeven97b95c12013-12-24 10:49:34 +0100549 u8 spsr;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900550
551 spsr = rspi_read8(rspi, RSPI_SPSR);
552 if (spsr & SPSR_SPRF)
Geert Uytterhoeven74da7682014-01-24 09:43:53 +0100553 rspi_read_data(rspi); /* dummy read */
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900554 if (spsr & SPSR_OVRF)
555 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
Geert Uytterhoevendf900e62013-12-23 19:34:24 +0100556 RSPI_SPSR);
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900557}
558
Geert Uytterhoeven862d3572014-01-24 09:43:59 +0100559static void rspi_rz_receive_init(const struct rspi_data *rspi)
560{
561 rspi_receive_init(rspi);
562 rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
563 rspi_write8(rspi, 0, RSPI_SPBFCR);
564}
565
Geert Uytterhoevenbaf588f2013-12-24 10:49:32 +0100566static void qspi_receive_init(const struct rspi_data *rspi)
Hiep Cao Minhcb52c672013-10-10 17:14:03 +0900567{
Geert Uytterhoeven97b95c12013-12-24 10:49:34 +0100568 u8 spsr;
Hiep Cao Minhcb52c672013-10-10 17:14:03 +0900569
570 spsr = rspi_read8(rspi, RSPI_SPSR);
571 if (spsr & SPSR_SPRF)
Geert Uytterhoeven74da7682014-01-24 09:43:53 +0100572 rspi_read_data(rspi); /* dummy read */
Hiep Cao Minhcb52c672013-10-10 17:14:03 +0900573 rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
Geert Uytterhoeven340a15e2014-01-24 09:43:57 +0100574 rspi_write8(rspi, 0, QSPI_SPBFCR);
Hiep Cao Minhcb52c672013-10-10 17:14:03 +0900575}
576
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900577static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
578{
579 struct scatterlist sg, sg_dummy;
580 void *dummy = NULL, *rx_buf = NULL;
581 struct dma_async_tx_descriptor *desc, *desc_dummy;
Geert Uytterhoeven93722202014-01-24 09:43:58 +0100582 unsigned int len;
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900583 int ret = 0;
584
585 if (rspi->dma_width_16bit) {
586 /*
587 * If DMAC bus width is 16-bit, the driver allocates a dummy
588 * buffer. And, finally the driver converts the DMAC data into
589 * actual data as the following format:
590 * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
591 * actual data: 1st byte, 2nd byte ...
592 */
593 len = t->len * 2;
594 rx_buf = kmalloc(len, GFP_KERNEL);
595 if (!rx_buf)
596 return -ENOMEM;
597 } else {
598 len = t->len;
599 rx_buf = t->rx_buf;
600 }
601
602 /* prepare dummy transfer to generate SPI clocks */
603 dummy = kzalloc(len, GFP_KERNEL);
604 if (!dummy) {
605 ret = -ENOMEM;
606 goto end_nomap;
607 }
608 if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx,
609 DMA_TO_DEVICE)) {
610 ret = -EFAULT;
611 goto end_nomap;
612 }
613 desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1,
614 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
615 if (!desc_dummy) {
616 ret = -EIO;
617 goto end_dummy_mapped;
618 }
619
620 /* prepare receive transfer */
621 if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx,
622 DMA_FROM_DEVICE)) {
623 ret = -EFAULT;
624 goto end_dummy_mapped;
625
626 }
627 desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE,
628 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
629 if (!desc) {
630 ret = -EIO;
631 goto end;
632 }
633
634 rspi_receive_init(rspi);
635
636 /*
637 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
638 * called. So, this driver disables the IRQ while DMA transfer.
639 */
Geert Uytterhoeven93722202014-01-24 09:43:58 +0100640 disable_irq(rspi->tx_irq);
641 if (rspi->rx_irq != rspi->tx_irq)
642 disable_irq(rspi->rx_irq);
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900643
644 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
645 rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
646 rspi->dma_callbacked = 0;
647
648 desc->callback = rspi_dma_complete;
649 desc->callback_param = rspi;
650 dmaengine_submit(desc);
651 dma_async_issue_pending(rspi->chan_rx);
652
653 desc_dummy->callback = NULL; /* No callback */
654 dmaengine_submit(desc_dummy);
655 dma_async_issue_pending(rspi->chan_tx);
656
657 ret = wait_event_interruptible_timeout(rspi->wait,
658 rspi->dma_callbacked, HZ);
659 if (ret > 0 && rspi->dma_callbacked)
660 ret = 0;
661 else if (!ret)
662 ret = -ETIMEDOUT;
663 rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
664
Geert Uytterhoeven93722202014-01-24 09:43:58 +0100665 enable_irq(rspi->tx_irq);
666 if (rspi->rx_irq != rspi->tx_irq)
667 enable_irq(rspi->rx_irq);
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900668
669end:
670 rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
671end_dummy_mapped:
672 rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE);
673end_nomap:
674 if (rspi->dma_width_16bit) {
675 if (!ret)
676 rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len);
677 kfree(rx_buf);
678 }
679 kfree(dummy);
680
681 return ret;
682}
683
Geert Uytterhoevenbaf588f2013-12-24 10:49:32 +0100684static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t)
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900685{
686 if (t->tx_buf && rspi->chan_tx)
687 return 1;
688 /* If the module receives data by DMAC, it also needs TX DMAC */
689 if (t->rx_buf && rspi->chan_tx && rspi->chan_rx)
690 return 1;
691
692 return 0;
693}
694
Geert Uytterhoeven8449fd72014-01-24 09:43:56 +0100695static int rspi_transfer_out_in(struct rspi_data *rspi,
696 struct spi_transfer *xfer)
697{
698 int remain = xfer->len, ret;
699 const u8 *tx_buf = xfer->tx_buf;
700 u8 *rx_buf = xfer->rx_buf;
701 u8 spcr, data;
702
703 rspi_receive_init(rspi);
704
705 spcr = rspi_read8(rspi, RSPI_SPCR);
706 if (rx_buf)
707 spcr &= ~SPCR_TXMD;
708 else
709 spcr |= SPCR_TXMD;
710 rspi_write8(rspi, spcr, RSPI_SPCR);
711
712 while (remain > 0) {
713 data = tx_buf ? *tx_buf++ : DUMMY_DATA;
714 ret = rspi_data_out(rspi, data);
715 if (ret < 0)
716 return ret;
717 if (rx_buf) {
718 ret = rspi_data_in(rspi);
719 if (ret < 0)
720 return ret;
721 *rx_buf++ = ret;
722 }
723 remain--;
724 }
725
726 /* Wait for the last transmission */
727 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
728
729 return 0;
730}
731
Geert Uytterhoeven79d23492014-01-24 09:43:52 +0100732static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
733 struct spi_transfer *xfer)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900734{
Geert Uytterhoeven79d23492014-01-24 09:43:52 +0100735 struct rspi_data *rspi = spi_master_get_devdata(master);
Geert Uytterhoeven8449fd72014-01-24 09:43:56 +0100736 int ret;
737
738 if (!rspi_is_dma(rspi, xfer))
739 return rspi_transfer_out_in(rspi, xfer);
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900740
Geert Uytterhoeven79d23492014-01-24 09:43:52 +0100741 if (xfer->tx_buf) {
Geert Uytterhoeven8449fd72014-01-24 09:43:56 +0100742 ret = rspi_send_dma(rspi, xfer);
Geert Uytterhoeven79d23492014-01-24 09:43:52 +0100743 if (ret < 0)
744 return ret;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900745 }
Geert Uytterhoeven8449fd72014-01-24 09:43:56 +0100746 if (xfer->rx_buf)
747 return rspi_receive_dma(rspi, xfer);
748
749 return 0;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900750}
751
Geert Uytterhoeven862d3572014-01-24 09:43:59 +0100752static int rspi_rz_transfer_out_in(struct rspi_data *rspi,
753 struct spi_transfer *xfer)
754{
755 int remain = xfer->len, ret;
756 const u8 *tx_buf = xfer->tx_buf;
757 u8 *rx_buf = xfer->rx_buf;
758 u8 data;
759
760 rspi_rz_receive_init(rspi);
761
762 while (remain > 0) {
763 data = tx_buf ? *tx_buf++ : DUMMY_DATA;
764 ret = rspi_data_out_in(rspi, data);
765 if (ret < 0)
766 return ret;
767 if (rx_buf)
768 *rx_buf++ = ret;
769 remain--;
770 }
771
772 /* Wait for the last transmission */
773 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
774
775 return 0;
776}
777
778static int rspi_rz_transfer_one(struct spi_master *master,
779 struct spi_device *spi,
780 struct spi_transfer *xfer)
781{
782 struct rspi_data *rspi = spi_master_get_devdata(master);
783
784 return rspi_rz_transfer_out_in(rspi, xfer);
785}
786
Geert Uytterhoeven340a15e2014-01-24 09:43:57 +0100787static int qspi_transfer_out_in(struct rspi_data *rspi,
788 struct spi_transfer *xfer)
789{
790 int remain = xfer->len, ret;
791 const u8 *tx_buf = xfer->tx_buf;
792 u8 *rx_buf = xfer->rx_buf;
793 u8 data;
794
795 qspi_receive_init(rspi);
796
797 while (remain > 0) {
798 data = tx_buf ? *tx_buf++ : DUMMY_DATA;
799 ret = rspi_data_out_in(rspi, data);
800 if (ret < 0)
801 return ret;
802 if (rx_buf)
803 *rx_buf++ = ret;
804 remain--;
805 }
806
807 /* Wait for the last transmission */
808 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
809
810 return 0;
811}
812
Geert Uytterhoeveneb557f72014-01-24 09:43:55 +0100813static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
814 struct spi_transfer *xfer)
815{
816 struct rspi_data *rspi = spi_master_get_devdata(master);
Geert Uytterhoeveneb557f72014-01-24 09:43:55 +0100817
Geert Uytterhoeven340a15e2014-01-24 09:43:57 +0100818 return qspi_transfer_out_in(rspi, xfer);
Geert Uytterhoeveneb557f72014-01-24 09:43:55 +0100819}
820
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900821static int rspi_setup(struct spi_device *spi)
822{
823 struct rspi_data *rspi = spi_master_get_devdata(spi->master);
824
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900825 rspi->max_speed_hz = spi->max_speed_hz;
826
Geert Uytterhoeven348e5152014-01-12 11:27:43 +0100827 rspi->spcmd = SPCMD_SSLKP;
828 if (spi->mode & SPI_CPOL)
829 rspi->spcmd |= SPCMD_CPOL;
830 if (spi->mode & SPI_CPHA)
831 rspi->spcmd |= SPCMD_CPHA;
832
Geert Uytterhoeven06a7a3c2014-01-24 09:44:00 +0100833 /* CMOS output mode and MOSI signal from previous transfer */
834 rspi->sppcr = 0;
835 if (spi->mode & SPI_LOOP)
836 rspi->sppcr |= SPPCR_SPLP;
837
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +0900838 set_config_register(rspi, 8);
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900839
840 return 0;
841}
842
Geert Uytterhoeven79d23492014-01-24 09:43:52 +0100843static void rspi_cleanup(struct spi_device *spi)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900844{
Geert Uytterhoeven79d23492014-01-24 09:43:52 +0100845}
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900846
Geert Uytterhoeven79d23492014-01-24 09:43:52 +0100847static int rspi_prepare_message(struct spi_master *master,
848 struct spi_message *message)
849{
850 struct rspi_data *rspi = spi_master_get_devdata(master);
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900851
Geert Uytterhoeven79d23492014-01-24 09:43:52 +0100852 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900853 return 0;
854}
855
Geert Uytterhoeven79d23492014-01-24 09:43:52 +0100856static int rspi_unprepare_message(struct spi_master *master,
857 struct spi_message *message)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900858{
Geert Uytterhoeven79d23492014-01-24 09:43:52 +0100859 struct rspi_data *rspi = spi_master_get_devdata(master);
860
861 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
862 return 0;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900863}
864
Geert Uytterhoeven93722202014-01-24 09:43:58 +0100865static irqreturn_t rspi_irq_mux(int irq, void *_sr)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900866{
Geert Uytterhoevenc132f092013-12-24 10:49:31 +0100867 struct rspi_data *rspi = _sr;
Geert Uytterhoeven97b95c12013-12-24 10:49:34 +0100868 u8 spsr;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900869 irqreturn_t ret = IRQ_NONE;
Geert Uytterhoeven97b95c12013-12-24 10:49:34 +0100870 u8 disable_irq = 0;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900871
872 rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
873 if (spsr & SPSR_SPRF)
874 disable_irq |= SPCR_SPRIE;
875 if (spsr & SPSR_SPTEF)
876 disable_irq |= SPCR_SPTIE;
877
878 if (disable_irq) {
879 ret = IRQ_HANDLED;
880 rspi_disable_irq(rspi, disable_irq);
881 wake_up(&rspi->wait);
882 }
883
884 return ret;
885}
886
Geert Uytterhoeven93722202014-01-24 09:43:58 +0100887static irqreturn_t rspi_irq_rx(int irq, void *_sr)
888{
889 struct rspi_data *rspi = _sr;
890 u8 spsr;
891
892 rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
893 if (spsr & SPSR_SPRF) {
894 rspi_disable_irq(rspi, SPCR_SPRIE);
895 wake_up(&rspi->wait);
896 return IRQ_HANDLED;
897 }
898
899 return 0;
900}
901
902static irqreturn_t rspi_irq_tx(int irq, void *_sr)
903{
904 struct rspi_data *rspi = _sr;
905 u8 spsr;
906
907 rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
908 if (spsr & SPSR_SPTEF) {
909 rspi_disable_irq(rspi, SPCR_SPTIE);
910 wake_up(&rspi->wait);
911 return IRQ_HANDLED;
912 }
913
914 return 0;
915}
916
Grant Likelyfd4a3192012-12-07 16:57:14 +0000917static int rspi_request_dma(struct rspi_data *rspi,
Shimoda, Yoshihiro0243c532012-08-02 17:17:33 +0900918 struct platform_device *pdev)
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900919{
Geert Uytterhoevenbaf588f2013-12-24 10:49:32 +0100920 const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
Guennadi Liakhovetskie2b05092013-08-02 15:03:42 +0200921 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900922 dma_cap_mask_t mask;
Shimoda, Yoshihiro0243c532012-08-02 17:17:33 +0900923 struct dma_slave_config cfg;
924 int ret;
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900925
Guennadi Liakhovetskie2b05092013-08-02 15:03:42 +0200926 if (!res || !rspi_pd)
Shimoda, Yoshihiro0243c532012-08-02 17:17:33 +0900927 return 0; /* The driver assumes no error. */
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900928
929 rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
930
931 /* If the module receives data by DMAC, it also needs TX DMAC */
932 if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
933 dma_cap_zero(mask);
934 dma_cap_set(DMA_SLAVE, mask);
Shimoda, Yoshihiro0243c532012-08-02 17:17:33 +0900935 rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
936 (void *)rspi_pd->dma_rx_id);
937 if (rspi->chan_rx) {
938 cfg.slave_id = rspi_pd->dma_rx_id;
939 cfg.direction = DMA_DEV_TO_MEM;
Guennadi Liakhovetskie2b05092013-08-02 15:03:42 +0200940 cfg.dst_addr = 0;
941 cfg.src_addr = res->start + RSPI_SPDR;
Shimoda, Yoshihiro0243c532012-08-02 17:17:33 +0900942 ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
943 if (!ret)
944 dev_info(&pdev->dev, "Use DMA when rx.\n");
945 else
946 return ret;
947 }
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900948 }
949 if (rspi_pd->dma_tx_id) {
950 dma_cap_zero(mask);
951 dma_cap_set(DMA_SLAVE, mask);
Shimoda, Yoshihiro0243c532012-08-02 17:17:33 +0900952 rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter,
953 (void *)rspi_pd->dma_tx_id);
954 if (rspi->chan_tx) {
955 cfg.slave_id = rspi_pd->dma_tx_id;
956 cfg.direction = DMA_MEM_TO_DEV;
Guennadi Liakhovetskie2b05092013-08-02 15:03:42 +0200957 cfg.dst_addr = res->start + RSPI_SPDR;
958 cfg.src_addr = 0;
Shimoda, Yoshihiro0243c532012-08-02 17:17:33 +0900959 ret = dmaengine_slave_config(rspi->chan_tx, &cfg);
960 if (!ret)
961 dev_info(&pdev->dev, "Use DMA when tx\n");
962 else
963 return ret;
964 }
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900965 }
Shimoda, Yoshihiro0243c532012-08-02 17:17:33 +0900966
967 return 0;
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900968}
969
Grant Likelyfd4a3192012-12-07 16:57:14 +0000970static void rspi_release_dma(struct rspi_data *rspi)
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900971{
972 if (rspi->chan_tx)
973 dma_release_channel(rspi->chan_tx);
974 if (rspi->chan_rx)
975 dma_release_channel(rspi->chan_rx);
976}
977
Grant Likelyfd4a3192012-12-07 16:57:14 +0000978static int rspi_remove(struct platform_device *pdev)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900979{
Laurent Pinchart5ffbe2d2013-11-27 01:41:45 +0100980 struct rspi_data *rspi = platform_get_drvdata(pdev);
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900981
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +0900982 rspi_release_dma(rspi);
Geert Uytterhoevenfcb4ed72014-01-14 10:20:33 +0100983 clk_disable(rspi->clk);
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +0900984
985 return 0;
986}
987
Geert Uytterhoeven93722202014-01-24 09:43:58 +0100988static int rspi_request_irq(struct device *dev, unsigned int irq,
989 irq_handler_t handler, const char *suffix,
990 void *dev_id)
991{
992 const char *base = dev_name(dev);
993 size_t len = strlen(base) + strlen(suffix) + 2;
994 char *name = devm_kzalloc(dev, len, GFP_KERNEL);
995 if (!name)
996 return -ENOMEM;
997 snprintf(name, len, "%s:%s", base, suffix);
998 return devm_request_irq(dev, irq, handler, 0, name, dev_id);
999}
1000
Grant Likelyfd4a3192012-12-07 16:57:14 +00001001static int rspi_probe(struct platform_device *pdev)
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001002{
1003 struct resource *res;
1004 struct spi_master *master;
1005 struct rspi_data *rspi;
Geert Uytterhoeven93722202014-01-24 09:43:58 +01001006 int ret;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001007 char clk_name[16];
Geert Uytterhoevenbaf588f2013-12-24 10:49:32 +01001008 const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +09001009 const struct spi_ops *ops;
1010 const struct platform_device_id *id_entry = pdev->id_entry;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001011
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +09001012 ops = (struct spi_ops *)id_entry->driver_data;
1013 /* ops parameter check */
1014 if (!ops->set_config_register) {
1015 dev_err(&pdev->dev, "there is no set_config_register\n");
1016 return -ENODEV;
1017 }
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001018
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001019 master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
1020 if (master == NULL) {
1021 dev_err(&pdev->dev, "spi_alloc_master error.\n");
1022 return -ENOMEM;
1023 }
1024
1025 rspi = spi_master_get_devdata(master);
Jingoo Han24b5a822013-05-23 19:20:40 +09001026 platform_set_drvdata(pdev, rspi);
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +09001027 rspi->ops = ops;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001028 rspi->master = master;
Laurent Pinchart5d79e9a2013-11-27 01:41:46 +01001029
1030 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1031 rspi->addr = devm_ioremap_resource(&pdev->dev, res);
1032 if (IS_ERR(rspi->addr)) {
1033 ret = PTR_ERR(rspi->addr);
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001034 goto error1;
1035 }
1036
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +09001037 snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id);
Laurent Pinchart5d79e9a2013-11-27 01:41:46 +01001038 rspi->clk = devm_clk_get(&pdev->dev, clk_name);
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001039 if (IS_ERR(rspi->clk)) {
1040 dev_err(&pdev->dev, "cannot get clock\n");
1041 ret = PTR_ERR(rspi->clk);
Laurent Pinchart5d79e9a2013-11-27 01:41:46 +01001042 goto error1;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001043 }
1044 clk_enable(rspi->clk);
1045
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001046 init_waitqueue_head(&rspi->wait);
1047
Geert Uytterhoevenefd85ac2013-12-23 19:34:23 +01001048 if (rspi_pd && rspi_pd->num_chipselect)
1049 master->num_chipselect = rspi_pd->num_chipselect;
1050 else
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +09001051 master->num_chipselect = 2; /* default */
1052
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001053 master->bus_num = pdev->id;
1054 master->setup = rspi_setup;
Geert Uytterhoeveneb557f72014-01-24 09:43:55 +01001055 master->transfer_one = ops->transfer_one;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001056 master->cleanup = rspi_cleanup;
Geert Uytterhoeven79d23492014-01-24 09:43:52 +01001057 master->prepare_message = rspi_prepare_message;
1058 master->unprepare_message = rspi_unprepare_message;
Geert Uytterhoeven06a7a3c2014-01-24 09:44:00 +01001059 master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001060
Geert Uytterhoeven93722202014-01-24 09:43:58 +01001061 ret = platform_get_irq_byname(pdev, "rx");
1062 if (ret < 0) {
1063 ret = platform_get_irq_byname(pdev, "mux");
1064 if (ret < 0)
1065 ret = platform_get_irq(pdev, 0);
1066 if (ret >= 0)
1067 rspi->rx_irq = rspi->tx_irq = ret;
1068 } else {
1069 rspi->rx_irq = ret;
1070 ret = platform_get_irq_byname(pdev, "tx");
1071 if (ret >= 0)
1072 rspi->tx_irq = ret;
1073 }
1074 if (ret < 0) {
1075 dev_err(&pdev->dev, "platform_get_irq error\n");
1076 goto error2;
1077 }
1078
1079 if (rspi->rx_irq == rspi->tx_irq) {
1080 /* Single multiplexed interrupt */
1081 ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
1082 "mux", rspi);
1083 } else {
1084 /* Multi-interrupt mode, only SPRI and SPTI are used */
1085 ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
1086 "rx", rspi);
1087 if (!ret)
1088 ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
1089 rspi_irq_tx, "tx", rspi);
1090 }
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001091 if (ret < 0) {
1092 dev_err(&pdev->dev, "request_irq error\n");
Geert Uytterhoevenfcb4ed72014-01-14 10:20:33 +01001093 goto error2;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001094 }
1095
Shimoda, Yoshihiro0243c532012-08-02 17:17:33 +09001096 ret = rspi_request_dma(rspi, pdev);
1097 if (ret < 0) {
1098 dev_err(&pdev->dev, "rspi_request_dma failed.\n");
Geert Uytterhoevenfcb4ed72014-01-14 10:20:33 +01001099 goto error3;
Shimoda, Yoshihiro0243c532012-08-02 17:17:33 +09001100 }
Shimoda, Yoshihiroa3633fe2012-04-20 14:50:36 +09001101
Jingoo Han9e03d052013-12-04 14:13:50 +09001102 ret = devm_spi_register_master(&pdev->dev, master);
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001103 if (ret < 0) {
1104 dev_err(&pdev->dev, "spi_register_master error.\n");
Geert Uytterhoevenfcb4ed72014-01-14 10:20:33 +01001105 goto error3;
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001106 }
1107
1108 dev_info(&pdev->dev, "probed\n");
1109
1110 return 0;
1111
Geert Uytterhoevenfcb4ed72014-01-14 10:20:33 +01001112error3:
Laurent Pinchart5d79e9a2013-11-27 01:41:46 +01001113 rspi_release_dma(rspi);
Geert Uytterhoevenfcb4ed72014-01-14 10:20:33 +01001114error2:
1115 clk_disable(rspi->clk);
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001116error1:
1117 spi_master_put(master);
1118
1119 return ret;
1120}
1121
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +09001122static struct spi_ops rspi_ops = {
1123 .set_config_register = rspi_set_config_register,
Geert Uytterhoeveneb557f72014-01-24 09:43:55 +01001124 .transfer_one = rspi_transfer_one,
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +09001125};
1126
Geert Uytterhoeven862d3572014-01-24 09:43:59 +01001127static struct spi_ops rspi_rz_ops = {
1128 .set_config_register = rspi_rz_set_config_register,
1129 .transfer_one = rspi_rz_transfer_one,
1130};
1131
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +09001132static struct spi_ops qspi_ops = {
1133 .set_config_register = qspi_set_config_register,
Geert Uytterhoeveneb557f72014-01-24 09:43:55 +01001134 .transfer_one = qspi_transfer_one,
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +09001135};
1136
1137static struct platform_device_id spi_driver_ids[] = {
1138 { "rspi", (kernel_ulong_t)&rspi_ops },
Geert Uytterhoeven862d3572014-01-24 09:43:59 +01001139 { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +09001140 { "qspi", (kernel_ulong_t)&qspi_ops },
1141 {},
1142};
1143
1144MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1145
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001146static struct platform_driver rspi_driver = {
1147 .probe = rspi_probe,
Grant Likelyfd4a3192012-12-07 16:57:14 +00001148 .remove = rspi_remove,
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +09001149 .id_table = spi_driver_ids,
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001150 .driver = {
Hiep Cao Minh5ce0ba82013-09-03 13:10:26 +09001151 .name = "renesas_spi",
Shimoda, Yoshihiro0b2182d2012-03-07 14:46:25 +09001152 .owner = THIS_MODULE,
1153 },
1154};
1155module_platform_driver(rspi_driver);
1156
1157MODULE_DESCRIPTION("Renesas RSPI bus driver");
1158MODULE_LICENSE("GPL v2");
1159MODULE_AUTHOR("Yoshihiro Shimoda");
1160MODULE_ALIAS("platform:rspi");