blob: 9fea8ccdce45aa0888e5f8c19c49e831a0458727 [file] [log] [blame]
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +09001/*
2 * SH SPI bus driver
3 *
4 * Copyright (C) 2011 Renesas Solutions Corp.
5 *
6 * Based on pxa2xx_spi.c:
7 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/errno.h>
28#include <linux/timer.h>
29#include <linux/delay.h>
30#include <linux/list.h>
31#include <linux/workqueue.h>
32#include <linux/interrupt.h>
33#include <linux/platform_device.h>
34#include <linux/io.h>
35#include <linux/spi/spi.h>
36
37#define SPI_SH_TBR 0x00
38#define SPI_SH_RBR 0x00
39#define SPI_SH_CR1 0x08
40#define SPI_SH_CR2 0x10
41#define SPI_SH_CR3 0x18
42#define SPI_SH_CR4 0x20
43#define SPI_SH_CR5 0x28
44
45/* CR1 */
46#define SPI_SH_TBE 0x80
47#define SPI_SH_TBF 0x40
48#define SPI_SH_RBE 0x20
49#define SPI_SH_RBF 0x10
50#define SPI_SH_PFONRD 0x08
51#define SPI_SH_SSDB 0x04
52#define SPI_SH_SSD 0x02
53#define SPI_SH_SSA 0x01
54
55/* CR2 */
56#define SPI_SH_RSTF 0x80
57#define SPI_SH_LOOPBK 0x40
58#define SPI_SH_CPOL 0x20
59#define SPI_SH_CPHA 0x10
60#define SPI_SH_L1M0 0x08
61
62/* CR3 */
63#define SPI_SH_MAX_BYTE 0xFF
64
65/* CR4 */
66#define SPI_SH_TBEI 0x80
67#define SPI_SH_TBFI 0x40
68#define SPI_SH_RBEI 0x20
69#define SPI_SH_RBFI 0x10
70#define SPI_SH_WPABRT 0x04
71#define SPI_SH_SSS 0x01
72
73/* CR8 */
74#define SPI_SH_P1L0 0x80
75#define SPI_SH_PP1L0 0x40
76#define SPI_SH_MUXI 0x20
77#define SPI_SH_MUXIRQ 0x10
78
79#define SPI_SH_FIFO_SIZE 32
80#define SPI_SH_SEND_TIMEOUT (3 * HZ)
81#define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3)
82
83#undef DEBUG
84
85struct spi_sh_data {
86 void __iomem *addr;
87 int irq;
88 struct spi_master *master;
89 struct list_head queue;
90 struct workqueue_struct *workqueue;
91 struct work_struct ws;
92 unsigned long cr1;
93 wait_queue_head_t wait;
94 spinlock_t lock;
Shimoda, Yoshihiro0eb88802012-03-07 14:45:37 +090095 int width;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +090096};
97
98static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
99 unsigned long offset)
100{
Shimoda, Yoshihiro0eb88802012-03-07 14:45:37 +0900101 if (ss->width == 8)
102 iowrite8(data, ss->addr + (offset >> 2));
103 else if (ss->width == 32)
104 iowrite32(data, ss->addr + offset);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900105}
106
107static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
108{
Shimoda, Yoshihiro0eb88802012-03-07 14:45:37 +0900109 if (ss->width == 8)
110 return ioread8(ss->addr + (offset >> 2));
111 else if (ss->width == 32)
112 return ioread32(ss->addr + offset);
113 else
114 return 0;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900115}
116
117static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
118 unsigned long offset)
119{
120 unsigned long tmp;
121
122 tmp = spi_sh_read(ss, offset);
123 tmp |= val;
124 spi_sh_write(ss, tmp, offset);
125}
126
127static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val,
128 unsigned long offset)
129{
130 unsigned long tmp;
131
132 tmp = spi_sh_read(ss, offset);
133 tmp &= ~val;
134 spi_sh_write(ss, tmp, offset);
135}
136
137static void clear_fifo(struct spi_sh_data *ss)
138{
139 spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
140 spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
141}
142
143static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss)
144{
145 int timeout = 100000;
146
147 while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
148 udelay(10);
149 if (timeout-- < 0)
150 return -ETIMEDOUT;
151 }
152 return 0;
153}
154
155static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss)
156{
157 int timeout = 100000;
158
159 while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) {
160 udelay(10);
161 if (timeout-- < 0)
162 return -ETIMEDOUT;
163 }
164 return 0;
165}
166
167static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
168 struct spi_transfer *t)
169{
170 int i, retval = 0;
171 int remain = t->len;
172 int cur_len;
173 unsigned char *data;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900174 long ret;
175
176 if (t->len)
177 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
178
179 data = (unsigned char *)t->tx_buf;
180 while (remain > 0) {
181 cur_len = min(SPI_SH_FIFO_SIZE, remain);
182 for (i = 0; i < cur_len &&
183 !(spi_sh_read(ss, SPI_SH_CR4) &
184 SPI_SH_WPABRT) &&
185 !(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF);
186 i++)
187 spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR);
188
189 if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) {
190 /* Abort SPI operation */
191 spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4);
192 retval = -EIO;
193 break;
194 }
195
196 cur_len = i;
197
198 remain -= cur_len;
199 data += cur_len;
200
201 if (remain > 0) {
202 ss->cr1 &= ~SPI_SH_TBE;
203 spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
204 ret = wait_event_interruptible_timeout(ss->wait,
205 ss->cr1 & SPI_SH_TBE,
206 SPI_SH_SEND_TIMEOUT);
207 if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) {
208 printk(KERN_ERR "%s: timeout\n", __func__);
209 return -ETIMEDOUT;
210 }
211 }
212 }
213
214 if (list_is_last(&t->transfer_list, &mesg->transfers)) {
Axel Lin909e7092014-01-13 10:00:44 +0800215 spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900216 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
217
218 ss->cr1 &= ~SPI_SH_TBE;
219 spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
220 ret = wait_event_interruptible_timeout(ss->wait,
221 ss->cr1 & SPI_SH_TBE,
222 SPI_SH_SEND_TIMEOUT);
223 if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) {
224 printk(KERN_ERR "%s: timeout\n", __func__);
225 return -ETIMEDOUT;
226 }
227 }
228
229 return retval;
230}
231
232static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
233 struct spi_transfer *t)
234{
235 int i;
236 int remain = t->len;
237 int cur_len;
238 unsigned char *data;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900239 long ret;
240
241 if (t->len > SPI_SH_MAX_BYTE)
242 spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3);
243 else
244 spi_sh_write(ss, t->len, SPI_SH_CR3);
245
Axel Lin909e7092014-01-13 10:00:44 +0800246 spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900247 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
248
249 spi_sh_wait_write_buffer_empty(ss);
250
251 data = (unsigned char *)t->rx_buf;
252 while (remain > 0) {
253 if (remain >= SPI_SH_FIFO_SIZE) {
254 ss->cr1 &= ~SPI_SH_RBF;
255 spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4);
256 ret = wait_event_interruptible_timeout(ss->wait,
257 ss->cr1 & SPI_SH_RBF,
258 SPI_SH_RECEIVE_TIMEOUT);
259 if (ret == 0 &&
260 spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
261 printk(KERN_ERR "%s: timeout\n", __func__);
262 return -ETIMEDOUT;
263 }
264 }
265
266 cur_len = min(SPI_SH_FIFO_SIZE, remain);
267 for (i = 0; i < cur_len; i++) {
268 if (spi_sh_wait_receive_buffer(ss))
269 break;
270 data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR);
271 }
272
273 remain -= cur_len;
274 data += cur_len;
275 }
276
277 /* deassert CS when SPI is receiving. */
278 if (t->len > SPI_SH_MAX_BYTE) {
279 clear_fifo(ss);
280 spi_sh_write(ss, 1, SPI_SH_CR3);
281 } else {
282 spi_sh_write(ss, 0, SPI_SH_CR3);
283 }
284
285 return 0;
286}
287
288static void spi_sh_work(struct work_struct *work)
289{
290 struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws);
291 struct spi_message *mesg;
292 struct spi_transfer *t;
293 unsigned long flags;
294 int ret;
295
296 pr_debug("%s: enter\n", __func__);
297
298 spin_lock_irqsave(&ss->lock, flags);
299 while (!list_empty(&ss->queue)) {
300 mesg = list_entry(ss->queue.next, struct spi_message, queue);
301 list_del_init(&mesg->queue);
302
303 spin_unlock_irqrestore(&ss->lock, flags);
304 list_for_each_entry(t, &mesg->transfers, transfer_list) {
305 pr_debug("tx_buf = %p, rx_buf = %p\n",
306 t->tx_buf, t->rx_buf);
307 pr_debug("len = %d, delay_usecs = %d\n",
308 t->len, t->delay_usecs);
309
310 if (t->tx_buf) {
311 ret = spi_sh_send(ss, mesg, t);
312 if (ret < 0)
313 goto error;
314 }
315 if (t->rx_buf) {
316 ret = spi_sh_receive(ss, mesg, t);
317 if (ret < 0)
318 goto error;
319 }
320 mesg->actual_length += t->len;
321 }
322 spin_lock_irqsave(&ss->lock, flags);
323
324 mesg->status = 0;
325 mesg->complete(mesg->context);
326 }
327
328 clear_fifo(ss);
329 spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1);
330 udelay(100);
331
332 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
333 SPI_SH_CR1);
334
335 clear_fifo(ss);
336
337 spin_unlock_irqrestore(&ss->lock, flags);
338
339 return;
340
341 error:
342 mesg->status = ret;
343 mesg->complete(mesg->context);
344
345 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
346 SPI_SH_CR1);
347 clear_fifo(ss);
348
349}
350
351static int spi_sh_setup(struct spi_device *spi)
352{
353 struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
354
355 if (!spi->bits_per_word)
356 spi->bits_per_word = 8;
357
358 pr_debug("%s: enter\n", __func__);
359
360 spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */
361 spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */
362 spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */
363
364 clear_fifo(ss);
365
366 /* 1/8 clock */
367 spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2);
368 udelay(10);
369
370 return 0;
371}
372
373static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg)
374{
375 struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
376 unsigned long flags;
377
378 pr_debug("%s: enter\n", __func__);
379 pr_debug("\tmode = %02x\n", spi->mode);
380
381 spin_lock_irqsave(&ss->lock, flags);
382
383 mesg->actual_length = 0;
384 mesg->status = -EINPROGRESS;
385
386 spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
387
388 list_add_tail(&mesg->queue, &ss->queue);
389 queue_work(ss->workqueue, &ss->ws);
390
391 spin_unlock_irqrestore(&ss->lock, flags);
392
393 return 0;
394}
395
396static void spi_sh_cleanup(struct spi_device *spi)
397{
398 struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
399
400 pr_debug("%s: enter\n", __func__);
401
402 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
403 SPI_SH_CR1);
404}
405
406static irqreturn_t spi_sh_irq(int irq, void *_ss)
407{
408 struct spi_sh_data *ss = (struct spi_sh_data *)_ss;
409 unsigned long cr1;
410
411 cr1 = spi_sh_read(ss, SPI_SH_CR1);
412 if (cr1 & SPI_SH_TBE)
413 ss->cr1 |= SPI_SH_TBE;
414 if (cr1 & SPI_SH_TBF)
415 ss->cr1 |= SPI_SH_TBF;
416 if (cr1 & SPI_SH_RBE)
417 ss->cr1 |= SPI_SH_RBE;
418 if (cr1 & SPI_SH_RBF)
419 ss->cr1 |= SPI_SH_RBF;
420
421 if (ss->cr1) {
422 spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4);
423 wake_up(&ss->wait);
424 }
425
426 return IRQ_HANDLED;
427}
428
Grant Likelyfd4a3192012-12-07 16:57:14 +0000429static int spi_sh_remove(struct platform_device *pdev)
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900430{
Jingoo Han24b5a822013-05-23 19:20:40 +0900431 struct spi_sh_data *ss = platform_get_drvdata(pdev);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900432
Axel Lin680c1302011-05-11 21:27:00 +0800433 spi_unregister_master(ss->master);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900434 destroy_workqueue(ss->workqueue);
435 free_irq(ss->irq, ss);
436 iounmap(ss->addr);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900437
438 return 0;
439}
440
Grant Likelyfd4a3192012-12-07 16:57:14 +0000441static int spi_sh_probe(struct platform_device *pdev)
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900442{
443 struct resource *res;
444 struct spi_master *master;
445 struct spi_sh_data *ss;
446 int ret, irq;
447
448 /* get base addr */
449 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
450 if (unlikely(res == NULL)) {
451 dev_err(&pdev->dev, "invalid resource\n");
452 return -EINVAL;
453 }
454
455 irq = platform_get_irq(pdev, 0);
456 if (irq < 0) {
457 dev_err(&pdev->dev, "platform_get_irq error\n");
458 return -ENODEV;
459 }
460
461 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
462 if (master == NULL) {
463 dev_err(&pdev->dev, "spi_alloc_master error.\n");
464 return -ENOMEM;
465 }
466
467 ss = spi_master_get_devdata(master);
Jingoo Han24b5a822013-05-23 19:20:40 +0900468 platform_set_drvdata(pdev, ss);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900469
Shimoda, Yoshihiro0eb88802012-03-07 14:45:37 +0900470 switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
471 case IORESOURCE_MEM_8BIT:
472 ss->width = 8;
473 break;
474 case IORESOURCE_MEM_32BIT:
475 ss->width = 32;
476 break;
477 default:
478 dev_err(&pdev->dev, "No support width\n");
479 ret = -ENODEV;
480 goto error1;
481 }
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900482 ss->irq = irq;
483 ss->master = master;
484 ss->addr = ioremap(res->start, resource_size(res));
485 if (ss->addr == NULL) {
486 dev_err(&pdev->dev, "ioremap error.\n");
487 ret = -ENOMEM;
488 goto error1;
489 }
490 INIT_LIST_HEAD(&ss->queue);
491 spin_lock_init(&ss->lock);
492 INIT_WORK(&ss->ws, spi_sh_work);
493 init_waitqueue_head(&ss->wait);
494 ss->workqueue = create_singlethread_workqueue(
495 dev_name(master->dev.parent));
496 if (ss->workqueue == NULL) {
497 dev_err(&pdev->dev, "create workqueue error\n");
498 ret = -EBUSY;
499 goto error2;
500 }
501
Yong Zhang38ada212011-10-22 17:56:55 +0800502 ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900503 if (ret < 0) {
504 dev_err(&pdev->dev, "request_irq error\n");
505 goto error3;
506 }
507
508 master->num_chipselect = 2;
509 master->bus_num = pdev->id;
510 master->setup = spi_sh_setup;
511 master->transfer = spi_sh_transfer;
512 master->cleanup = spi_sh_cleanup;
513
514 ret = spi_register_master(master);
515 if (ret < 0) {
516 printk(KERN_ERR "spi_register_master error.\n");
517 goto error4;
518 }
519
520 return 0;
521
522 error4:
523 free_irq(irq, ss);
524 error3:
525 destroy_workqueue(ss->workqueue);
526 error2:
527 iounmap(ss->addr);
528 error1:
529 spi_master_put(master);
530
531 return ret;
532}
533
534static struct platform_driver spi_sh_driver = {
535 .probe = spi_sh_probe,
Grant Likelyfd4a3192012-12-07 16:57:14 +0000536 .remove = spi_sh_remove,
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900537 .driver = {
538 .name = "sh_spi",
539 .owner = THIS_MODULE,
540 },
541};
Grant Likely940ab882011-10-05 11:29:49 -0600542module_platform_driver(spi_sh_driver);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900543
544MODULE_DESCRIPTION("SH SPI bus driver");
545MODULE_LICENSE("GPL");
546MODULE_AUTHOR("Yoshihiro Shimoda");
547MODULE_ALIAS("platform:sh_spi");