blob: 5396709cdc0783296afd4b3f9f3b32152e5c7492 [file] [log] [blame]
Kevin Cernekee613065e2012-08-25 12:38:52 -07001/*
2 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
3 *
4 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5 * Copyright (C) 2012 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/bitops.h>
14#include <linux/bug.h>
15#include <linux/clk.h>
16#include <linux/compiler.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/dma-mapping.h>
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/kconfig.h>
26#include <linux/kernel.h>
27#include <linux/list.h>
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/platform_device.h>
31#include <linux/sched.h>
32#include <linux/seq_file.h>
33#include <linux/slab.h>
34#include <linux/timer.h>
35#include <linux/usb/ch9.h>
36#include <linux/usb/gadget.h>
37#include <linux/workqueue.h>
38
39#include <bcm63xx_cpu.h>
40#include <bcm63xx_iudma.h>
41#include <bcm63xx_dev_usb_usbd.h>
42#include <bcm63xx_io.h>
43#include <bcm63xx_regs.h>
44
45#define DRV_MODULE_NAME "bcm63xx_udc"
46
47static const char bcm63xx_ep0name[] = "ep0";
48static const char *const bcm63xx_ep_name[] = {
49 bcm63xx_ep0name,
50 "ep1in-bulk", "ep2out-bulk", "ep3in-int", "ep4out-int",
51};
52
53static bool use_fullspeed;
54module_param(use_fullspeed, bool, S_IRUGO);
55MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
56
57/*
58 * RX IRQ coalescing options:
59 *
60 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
61 * driver is able to pass the "testusb" suite and recover from conditions like:
62 *
63 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
64 * 2) Host sends 512 bytes of data
65 * 3) Host decides to reconfigure the device and sends SET_INTERFACE
66 * 4) Device shuts down the endpoint and cancels the RX transaction
67 *
68 * true - one IRQ per transfer, for transfers <= 2048B. Generates
69 * considerably fewer IRQs, but error recovery is less robust. Does not
70 * reliably pass "testusb".
71 *
72 * TX always uses coalescing, because we can cancel partially complete TX
73 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
74 * this on RX.
75 */
76static bool irq_coalesce;
77module_param(irq_coalesce, bool, S_IRUGO);
78MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
79
80#define BCM63XX_NUM_EP 5
81#define BCM63XX_NUM_IUDMA 6
82#define BCM63XX_NUM_FIFO_PAIRS 3
83
84#define IUDMA_RESET_TIMEOUT_US 10000
85
86#define IUDMA_EP0_RXCHAN 0
87#define IUDMA_EP0_TXCHAN 1
88
89#define IUDMA_MAX_FRAGMENT 2048
90#define BCM63XX_MAX_CTRL_PKT 64
91
92#define BCMEP_CTRL 0x00
93#define BCMEP_ISOC 0x01
94#define BCMEP_BULK 0x02
95#define BCMEP_INTR 0x03
96
97#define BCMEP_OUT 0x00
98#define BCMEP_IN 0x01
99
100#define BCM63XX_SPD_FULL 1
101#define BCM63XX_SPD_HIGH 0
102
103#define IUDMA_DMAC_OFFSET 0x200
104#define IUDMA_DMAS_OFFSET 0x400
105
106enum bcm63xx_ep0_state {
107 EP0_REQUEUE,
108 EP0_IDLE,
109 EP0_IN_DATA_PHASE_SETUP,
110 EP0_IN_DATA_PHASE_COMPLETE,
111 EP0_OUT_DATA_PHASE_SETUP,
112 EP0_OUT_DATA_PHASE_COMPLETE,
113 EP0_OUT_STATUS_PHASE,
114 EP0_IN_FAKE_STATUS_PHASE,
115 EP0_SHUTDOWN,
116};
117
118static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
119 "REQUEUE",
120 "IDLE",
121 "IN_DATA_PHASE_SETUP",
122 "IN_DATA_PHASE_COMPLETE",
123 "OUT_DATA_PHASE_SETUP",
124 "OUT_DATA_PHASE_COMPLETE",
125 "OUT_STATUS_PHASE",
126 "IN_FAKE_STATUS_PHASE",
127 "SHUTDOWN",
128};
129
130/**
131 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
132 * @ep_num: USB endpoint number.
133 * @n_bds: Number of buffer descriptors in the ring.
134 * @ep_type: Endpoint type (control, bulk, interrupt).
135 * @dir: Direction (in, out).
136 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
137 * @max_pkt_hs: Maximum packet size in high speed mode.
138 * @max_pkt_fs: Maximum packet size in full speed mode.
139 */
140struct iudma_ch_cfg {
141 int ep_num;
142 int n_bds;
143 int ep_type;
144 int dir;
145 int n_fifo_slots;
146 int max_pkt_hs;
147 int max_pkt_fs;
148};
149
150static const struct iudma_ch_cfg iudma_defaults[] = {
151
152 /* This controller was designed to support a CDC/RNDIS application.
153 It may be possible to reconfigure some of the endpoints, but
154 the hardware limitations (FIFO sizing and number of DMA channels)
155 may significantly impact flexibility and/or stability. Change
156 these values at your own risk.
157
158 ep_num ep_type n_fifo_slots max_pkt_fs
159 idx | n_bds | dir | max_pkt_hs |
160 | | | | | | | | */
161 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
162 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
163 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
164 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
165 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
166 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
167};
168
169struct bcm63xx_udc;
170
171/**
172 * struct iudma_ch - Represents the current state of a single IUDMA channel.
173 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
174 * @ep_num: USB endpoint number. -1 for ep0 RX.
175 * @enabled: Whether bcm63xx_ep_enable() has been called.
176 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
177 * @is_tx: true for TX, false for RX.
178 * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
179 * @udc: Reference to the device controller.
180 * @read_bd: Next buffer descriptor to reap from the hardware.
181 * @write_bd: Next BD available for a new packet.
182 * @end_bd: Points to the final BD in the ring.
183 * @n_bds_used: Number of BD entries currently occupied.
184 * @bd_ring: Base pointer to the BD ring.
185 * @bd_ring_dma: Physical (DMA) address of bd_ring.
186 * @n_bds: Total number of BDs in the ring.
187 *
188 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
189 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
190 * only.
191 *
192 * Each bulk/intr endpoint has a single IUDMA channel and a single
193 * struct usb_ep.
194 */
195struct iudma_ch {
196 unsigned int ch_idx;
197 int ep_num;
198 bool enabled;
199 int max_pkt;
200 bool is_tx;
201 struct bcm63xx_ep *bep;
202 struct bcm63xx_udc *udc;
203
204 struct bcm_enet_desc *read_bd;
205 struct bcm_enet_desc *write_bd;
206 struct bcm_enet_desc *end_bd;
207 int n_bds_used;
208
209 struct bcm_enet_desc *bd_ring;
210 dma_addr_t bd_ring_dma;
211 unsigned int n_bds;
212};
213
214/**
215 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
216 * @ep_num: USB endpoint number.
217 * @iudma: Pointer to IUDMA channel state.
218 * @ep: USB gadget layer representation of the EP.
219 * @udc: Reference to the device controller.
220 * @queue: Linked list of outstanding requests for this EP.
221 * @halted: 1 if the EP is stalled; 0 otherwise.
222 */
223struct bcm63xx_ep {
224 unsigned int ep_num;
225 struct iudma_ch *iudma;
226 struct usb_ep ep;
227 struct bcm63xx_udc *udc;
228 struct list_head queue;
229 unsigned halted:1;
230};
231
232/**
233 * struct bcm63xx_req - Internal (driver) state of a single request.
234 * @queue: Links back to the EP's request list.
235 * @req: USB gadget layer representation of the request.
236 * @offset: Current byte offset into the data buffer (next byte to queue).
237 * @bd_bytes: Number of data bytes in outstanding BD entries.
238 * @iudma: IUDMA channel used for the request.
239 */
240struct bcm63xx_req {
241 struct list_head queue; /* ep's requests */
242 struct usb_request req;
243 unsigned int offset;
244 unsigned int bd_bytes;
245 struct iudma_ch *iudma;
246};
247
248/**
249 * struct bcm63xx_udc - Driver/hardware private context.
250 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
251 * @dev: Generic Linux device structure.
252 * @pd: Platform data (board/port info).
253 * @usbd_clk: Clock descriptor for the USB device block.
254 * @usbh_clk: Clock descriptor for the USB host block.
255 * @gadget: USB slave device.
256 * @driver: Driver for USB slave devices.
257 * @usbd_regs: Base address of the USBD/USB20D block.
258 * @iudma_regs: Base address of the USBD's associated IUDMA block.
259 * @bep: Array of endpoints, including ep0.
260 * @iudma: Array of all IUDMA channels used by this controller.
261 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
262 * @iface: USB interface number, from SET_INTERFACE wIndex.
263 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
264 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
265 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
266 * @ep0state: Current state of the ep0 state machine.
267 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
268 * @wedgemap: Bitmap of wedged endpoints.
269 * @ep0_req_reset: USB reset is pending.
270 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
271 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
272 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
273 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
274 * @ep0_reply: Pending reply from gadget driver.
275 * @ep0_request: Outstanding ep0 request.
276 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
277 * @debugfs_usbd: debugfs file "usbd" for controller state.
278 * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
279 */
280struct bcm63xx_udc {
281 spinlock_t lock;
282
283 struct device *dev;
284 struct bcm63xx_usbd_platform_data *pd;
285 struct clk *usbd_clk;
286 struct clk *usbh_clk;
287
288 struct usb_gadget gadget;
289 struct usb_gadget_driver *driver;
290
291 void __iomem *usbd_regs;
292 void __iomem *iudma_regs;
293
294 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
295 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
296
297 int cfg;
298 int iface;
299 int alt_iface;
300
301 struct bcm63xx_req ep0_ctrl_req;
302 u8 *ep0_ctrl_buf;
303
304 int ep0state;
305 struct work_struct ep0_wq;
306
307 unsigned long wedgemap;
308
309 unsigned ep0_req_reset:1;
310 unsigned ep0_req_set_cfg:1;
311 unsigned ep0_req_set_iface:1;
312 unsigned ep0_req_shutdown:1;
313
314 unsigned ep0_req_completed:1;
315 struct usb_request *ep0_reply;
316 struct usb_request *ep0_request;
317
318 struct dentry *debugfs_root;
319 struct dentry *debugfs_usbd;
320 struct dentry *debugfs_iudma;
321};
322
323static const struct usb_ep_ops bcm63xx_udc_ep_ops;
324
325/***********************************************************************
326 * Convenience functions
327 ***********************************************************************/
328
329static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
330{
331 return container_of(g, struct bcm63xx_udc, gadget);
332}
333
334static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
335{
336 return container_of(ep, struct bcm63xx_ep, ep);
337}
338
339static inline struct bcm63xx_req *our_req(struct usb_request *req)
340{
341 return container_of(req, struct bcm63xx_req, req);
342}
343
344static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
345{
346 return bcm_readl(udc->usbd_regs + off);
347}
348
349static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
350{
351 bcm_writel(val, udc->usbd_regs + off);
352}
353
354static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
355{
356 return bcm_readl(udc->iudma_regs + off);
357}
358
359static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
360{
361 bcm_writel(val, udc->iudma_regs + off);
362}
363
Florian Fainellia4c937c2014-01-14 15:36:29 -0800364static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
Kevin Cernekee613065e2012-08-25 12:38:52 -0700365{
Florian Fainellia4c937c2014-01-14 15:36:29 -0800366 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
367 (ENETDMA_CHAN_WIDTH * chan));
Kevin Cernekee613065e2012-08-25 12:38:52 -0700368}
369
Florian Fainellia4c937c2014-01-14 15:36:29 -0800370static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
371 int chan)
Kevin Cernekee613065e2012-08-25 12:38:52 -0700372{
Florian Fainellia4c937c2014-01-14 15:36:29 -0800373 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
374 (ENETDMA_CHAN_WIDTH * chan));
Kevin Cernekee613065e2012-08-25 12:38:52 -0700375}
376
Florian Fainellia4c937c2014-01-14 15:36:29 -0800377static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
Kevin Cernekee613065e2012-08-25 12:38:52 -0700378{
Florian Fainellia4c937c2014-01-14 15:36:29 -0800379 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
380 (ENETDMA_CHAN_WIDTH * chan));
Kevin Cernekee613065e2012-08-25 12:38:52 -0700381}
382
Florian Fainellia4c937c2014-01-14 15:36:29 -0800383static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
384 int chan)
Kevin Cernekee613065e2012-08-25 12:38:52 -0700385{
Florian Fainellia4c937c2014-01-14 15:36:29 -0800386 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
387 (ENETDMA_CHAN_WIDTH * chan));
Kevin Cernekee613065e2012-08-25 12:38:52 -0700388}
389
390static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
391{
392 if (is_enabled) {
393 clk_enable(udc->usbh_clk);
394 clk_enable(udc->usbd_clk);
395 udelay(10);
396 } else {
397 clk_disable(udc->usbd_clk);
398 clk_disable(udc->usbh_clk);
399 }
400}
401
402/***********************************************************************
403 * Low-level IUDMA / FIFO operations
404 ***********************************************************************/
405
406/**
407 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
408 * @udc: Reference to the device controller.
409 * @idx: Desired init_sel value.
410 *
411 * The "init_sel" signal is used as a selection index for both endpoints
412 * and IUDMA channels. Since these do not map 1:1, the use of this signal
413 * depends on the context.
414 */
415static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
416{
417 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
418
419 val &= ~USBD_CONTROL_INIT_SEL_MASK;
420 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
421 usbd_writel(udc, val, USBD_CONTROL_REG);
422}
423
424/**
425 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
426 * @udc: Reference to the device controller.
427 * @bep: Endpoint on which to operate.
428 * @is_stalled: true to enable stall, false to disable.
429 *
430 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
431 * halt/stall conditions.
432 */
433static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
434 bool is_stalled)
435{
436 u32 val;
437
438 val = USBD_STALL_UPDATE_MASK |
439 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
440 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
441 usbd_writel(udc, val, USBD_STALL_REG);
442}
443
444/**
445 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
446 * @udc: Reference to the device controller.
447 *
448 * These parameters depend on the USB link speed. Settings are
449 * per-IUDMA-channel-pair.
450 */
451static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
452{
453 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
454 u32 i, val, rx_fifo_slot, tx_fifo_slot;
455
456 /* set up FIFO boundaries and packet sizes; this is done in pairs */
457 rx_fifo_slot = tx_fifo_slot = 0;
458 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
459 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
460 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
461
462 bcm63xx_ep_dma_select(udc, i >> 1);
463
464 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
465 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
466 USBD_RXFIFO_CONFIG_END_SHIFT);
467 rx_fifo_slot += rx_cfg->n_fifo_slots;
468 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
469 usbd_writel(udc,
470 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
471 USBD_RXFIFO_EPSIZE_REG);
472
473 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
474 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
475 USBD_TXFIFO_CONFIG_END_SHIFT);
476 tx_fifo_slot += tx_cfg->n_fifo_slots;
477 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
478 usbd_writel(udc,
479 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
480 USBD_TXFIFO_EPSIZE_REG);
481
482 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
483 }
484}
485
486/**
487 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
488 * @udc: Reference to the device controller.
489 * @ep_num: Endpoint number.
490 */
491static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
492{
493 u32 val;
494
495 bcm63xx_ep_dma_select(udc, ep_num);
496
497 val = usbd_readl(udc, USBD_CONTROL_REG);
498 val |= USBD_CONTROL_FIFO_RESET_MASK;
499 usbd_writel(udc, val, USBD_CONTROL_REG);
500 usbd_readl(udc, USBD_CONTROL_REG);
501}
502
503/**
504 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
505 * @udc: Reference to the device controller.
506 */
507static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
508{
509 int i;
510
511 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
512 bcm63xx_fifo_reset_ep(udc, i);
513}
514
515/**
516 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
517 * @udc: Reference to the device controller.
518 */
519static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
520{
521 u32 i, val;
522
523 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
524 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
525
526 if (cfg->ep_num < 0)
527 continue;
528
529 bcm63xx_ep_dma_select(udc, cfg->ep_num);
530 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
531 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
532 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
533 }
534}
535
536/**
537 * bcm63xx_ep_setup - Configure per-endpoint settings.
538 * @udc: Reference to the device controller.
539 *
540 * This needs to be rerun if the speed/cfg/intf/altintf changes.
541 */
542static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
543{
544 u32 val, i;
545
546 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
547
548 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
549 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
550 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
551 cfg->max_pkt_hs : cfg->max_pkt_fs;
552 int idx = cfg->ep_num;
553
554 udc->iudma[i].max_pkt = max_pkt;
555
556 if (idx < 0)
557 continue;
558 udc->bep[idx].ep.maxpacket = max_pkt;
559
560 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
561 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
562 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
563 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
564 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
565 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
566 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
567 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
568 }
569}
570
571/**
572 * iudma_write - Queue a single IUDMA transaction.
573 * @udc: Reference to the device controller.
574 * @iudma: IUDMA channel to use.
575 * @breq: Request containing the transaction data.
576 *
577 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
578 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
579 * So iudma_write() may be called several times to fulfill a single
580 * usb_request.
581 *
582 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
583 */
584static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
585 struct bcm63xx_req *breq)
586{
587 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
588 unsigned int bytes_left = breq->req.length - breq->offset;
589 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
590 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
591
592 iudma->n_bds_used = 0;
593 breq->bd_bytes = 0;
594 breq->iudma = iudma;
595
596 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
597 extra_zero_pkt = 1;
598
599 do {
600 struct bcm_enet_desc *d = iudma->write_bd;
601 u32 dmaflags = 0;
602 unsigned int n_bytes;
603
604 if (d == iudma->end_bd) {
605 dmaflags |= DMADESC_WRAP_MASK;
606 iudma->write_bd = iudma->bd_ring;
607 } else {
608 iudma->write_bd++;
609 }
610 iudma->n_bds_used++;
611
612 n_bytes = min_t(int, bytes_left, max_bd_bytes);
613 if (n_bytes)
614 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
615 else
616 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
617 DMADESC_USB_ZERO_MASK;
618
619 dmaflags |= DMADESC_OWNER_MASK;
620 if (first_bd) {
621 dmaflags |= DMADESC_SOP_MASK;
622 first_bd = 0;
623 }
624
625 /*
626 * extra_zero_pkt forces one more iteration through the loop
627 * after all data is queued up, to send the zero packet
628 */
629 if (extra_zero_pkt && !bytes_left)
630 extra_zero_pkt = 0;
631
632 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
633 (n_bytes == bytes_left && !extra_zero_pkt)) {
634 last_bd = 1;
635 dmaflags |= DMADESC_EOP_MASK;
636 }
637
638 d->address = breq->req.dma + breq->offset;
639 mb();
640 d->len_stat = dmaflags;
641
642 breq->offset += n_bytes;
643 breq->bd_bytes += n_bytes;
644 bytes_left -= n_bytes;
645 } while (!last_bd);
646
647 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
Florian Fainellia4c937c2014-01-14 15:36:29 -0800648 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700649}
650
651/**
652 * iudma_read - Check for IUDMA buffer completion.
653 * @udc: Reference to the device controller.
654 * @iudma: IUDMA channel to use.
655 *
656 * This checks to see if ALL of the outstanding BDs on the DMA channel
657 * have been filled. If so, it returns the actual transfer length;
658 * otherwise it returns -EBUSY.
659 */
660static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
661{
662 int i, actual_len = 0;
663 struct bcm_enet_desc *d = iudma->read_bd;
664
665 if (!iudma->n_bds_used)
666 return -EINVAL;
667
668 for (i = 0; i < iudma->n_bds_used; i++) {
669 u32 dmaflags;
670
671 dmaflags = d->len_stat;
672
673 if (dmaflags & DMADESC_OWNER_MASK)
674 return -EBUSY;
675
676 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
677 DMADESC_LENGTH_SHIFT;
678 if (d == iudma->end_bd)
679 d = iudma->bd_ring;
680 else
681 d++;
682 }
683
684 iudma->read_bd = d;
685 iudma->n_bds_used = 0;
686 return actual_len;
687}
688
689/**
690 * iudma_reset_channel - Stop DMA on a single channel.
691 * @udc: Reference to the device controller.
692 * @iudma: IUDMA channel to reset.
693 */
694static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
695{
696 int timeout = IUDMA_RESET_TIMEOUT_US;
697 struct bcm_enet_desc *d;
698 int ch_idx = iudma->ch_idx;
699
700 if (!iudma->is_tx)
701 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
702
703 /* stop DMA, then wait for the hardware to wrap up */
Florian Fainellia4c937c2014-01-14 15:36:29 -0800704 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700705
Florian Fainellia4c937c2014-01-14 15:36:29 -0800706 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
Kevin Cernekee613065e2012-08-25 12:38:52 -0700707 ENETDMAC_CHANCFG_EN_MASK) {
708 udelay(1);
709
710 /* repeatedly flush the FIFO data until the BD completes */
711 if (iudma->is_tx && iudma->ep_num >= 0)
712 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
713
714 if (!timeout--) {
715 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
716 ch_idx);
717 break;
718 }
719 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
720 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
721 ch_idx);
722 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
Florian Fainellia4c937c2014-01-14 15:36:29 -0800723 ENETDMAC_CHANCFG_REG, ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700724 }
725 }
Florian Fainellia4c937c2014-01-14 15:36:29 -0800726 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700727
728 /* don't leave "live" HW-owned entries for the next guy to step on */
729 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
730 d->len_stat = 0;
731 mb();
732
733 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
734 iudma->n_bds_used = 0;
735
736 /* set up IRQs, UBUS burst size, and BD base for this channel */
737 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
Florian Fainellia4c937c2014-01-14 15:36:29 -0800738 ENETDMAC_IRMASK_REG, ch_idx);
739 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700740
Florian Fainellia4c937c2014-01-14 15:36:29 -0800741 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
742 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700743}
744
745/**
746 * iudma_init_channel - One-time IUDMA channel initialization.
747 * @udc: Reference to the device controller.
748 * @ch_idx: Channel to initialize.
749 */
750static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
751{
752 struct iudma_ch *iudma = &udc->iudma[ch_idx];
753 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
754 unsigned int n_bds = cfg->n_bds;
755 struct bcm63xx_ep *bep = NULL;
756
757 iudma->ep_num = cfg->ep_num;
758 iudma->ch_idx = ch_idx;
759 iudma->is_tx = !!(ch_idx & 0x01);
760 if (iudma->ep_num >= 0) {
761 bep = &udc->bep[iudma->ep_num];
762 bep->iudma = iudma;
763 INIT_LIST_HEAD(&bep->queue);
764 }
765
766 iudma->bep = bep;
767 iudma->udc = udc;
768
769 /* ep0 is always active; others are controlled by the gadget driver */
770 if (iudma->ep_num <= 0)
771 iudma->enabled = true;
772
773 iudma->n_bds = n_bds;
774 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
775 n_bds * sizeof(struct bcm_enet_desc),
776 &iudma->bd_ring_dma, GFP_KERNEL);
777 if (!iudma->bd_ring)
778 return -ENOMEM;
779 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
780
781 return 0;
782}
783
784/**
785 * iudma_init - One-time initialization of all IUDMA channels.
786 * @udc: Reference to the device controller.
787 *
788 * Enable DMA, flush channels, and enable global IUDMA IRQs.
789 */
790static int iudma_init(struct bcm63xx_udc *udc)
791{
792 int i, rc;
793
794 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
795
796 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
797 rc = iudma_init_channel(udc, i);
798 if (rc)
799 return rc;
800 iudma_reset_channel(udc, &udc->iudma[i]);
801 }
802
803 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
804 return 0;
805}
806
807/**
808 * iudma_uninit - Uninitialize IUDMA channels.
809 * @udc: Reference to the device controller.
810 *
811 * Kill global IUDMA IRQs, flush channels, and kill DMA.
812 */
813static void iudma_uninit(struct bcm63xx_udc *udc)
814{
815 int i;
816
817 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
818
819 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
820 iudma_reset_channel(udc, &udc->iudma[i]);
821
822 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
823}
824
825/***********************************************************************
826 * Other low-level USBD operations
827 ***********************************************************************/
828
829/**
830 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
831 * @udc: Reference to the device controller.
832 * @enable_irqs: true to enable, false to disable.
833 */
834static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
835{
836 u32 val;
837
838 usbd_writel(udc, 0, USBD_STATUS_REG);
839
840 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
841 BIT(USBD_EVENT_IRQ_SETUP) |
842 BIT(USBD_EVENT_IRQ_SETCFG) |
843 BIT(USBD_EVENT_IRQ_SETINTF) |
844 BIT(USBD_EVENT_IRQ_USB_LINK);
845 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
846 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
847}
848
849/**
850 * bcm63xx_select_phy_mode - Select between USB device and host mode.
851 * @udc: Reference to the device controller.
852 * @is_device: true for device, false for host.
853 *
854 * This should probably be reworked to use the drivers/usb/otg
855 * infrastructure.
856 *
857 * By default, the AFE/pullups are disabled in device mode, until
858 * bcm63xx_select_pullup() is called.
859 */
860static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
861{
862 u32 val, portmask = BIT(udc->pd->port_no);
863
864 if (BCMCPU_IS_6328()) {
865 /* configure pinmux to sense VBUS signal */
866 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
867 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
868 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
869 GPIO_PINMUX_OTHR_6328_USB_HOST;
870 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
871 }
872
873 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
874 if (is_device) {
875 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
876 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
877 } else {
878 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
879 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
880 }
881 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
882
883 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
884 if (is_device)
885 val |= USBH_PRIV_SWAP_USBD_MASK;
886 else
887 val &= ~USBH_PRIV_SWAP_USBD_MASK;
888 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
889}
890
891/**
892 * bcm63xx_select_pullup - Enable/disable the pullup on D+
893 * @udc: Reference to the device controller.
894 * @is_on: true to enable the pullup, false to disable.
895 *
896 * If the pullup is active, the host will sense a FS/HS device connected to
897 * the port. If the pullup is inactive, the host will think the USB
898 * device has been disconnected.
899 */
900static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
901{
902 u32 val, portmask = BIT(udc->pd->port_no);
903
904 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
905 if (is_on)
906 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
907 else
908 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
909 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
910}
911
912/**
913 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
914 * @udc: Reference to the device controller.
915 *
916 * This just masks the IUDMA IRQs and releases the clocks. It is assumed
917 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
918 */
919static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
920{
921 set_clocks(udc, true);
922 iudma_uninit(udc);
923 set_clocks(udc, false);
924
925 clk_put(udc->usbd_clk);
926 clk_put(udc->usbh_clk);
927}
928
929/**
930 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
931 * @udc: Reference to the device controller.
932 */
933static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
934{
935 int i, rc = 0;
936 u32 val;
937
938 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
939 GFP_KERNEL);
940 if (!udc->ep0_ctrl_buf)
941 return -ENOMEM;
942
943 INIT_LIST_HEAD(&udc->gadget.ep_list);
944 for (i = 0; i < BCM63XX_NUM_EP; i++) {
945 struct bcm63xx_ep *bep = &udc->bep[i];
946
947 bep->ep.name = bcm63xx_ep_name[i];
948 bep->ep_num = i;
949 bep->ep.ops = &bcm63xx_udc_ep_ops;
950 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
951 bep->halted = 0;
952 bep->ep.maxpacket = BCM63XX_MAX_CTRL_PKT;
953 bep->udc = udc;
954 bep->ep.desc = NULL;
955 INIT_LIST_HEAD(&bep->queue);
956 }
957
958 udc->gadget.ep0 = &udc->bep[0].ep;
959 list_del(&udc->bep[0].ep.ep_list);
960
961 udc->gadget.speed = USB_SPEED_UNKNOWN;
962 udc->ep0state = EP0_SHUTDOWN;
963
964 udc->usbh_clk = clk_get(udc->dev, "usbh");
965 if (IS_ERR(udc->usbh_clk))
966 return -EIO;
967
968 udc->usbd_clk = clk_get(udc->dev, "usbd");
969 if (IS_ERR(udc->usbd_clk)) {
970 clk_put(udc->usbh_clk);
971 return -EIO;
972 }
973
974 set_clocks(udc, true);
975
976 val = USBD_CONTROL_AUTO_CSRS_MASK |
977 USBD_CONTROL_DONE_CSRS_MASK |
978 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
979 usbd_writel(udc, val, USBD_CONTROL_REG);
980
981 val = USBD_STRAPS_APP_SELF_PWR_MASK |
982 USBD_STRAPS_APP_RAM_IF_MASK |
983 USBD_STRAPS_APP_CSRPRGSUP_MASK |
984 USBD_STRAPS_APP_8BITPHY_MASK |
985 USBD_STRAPS_APP_RMTWKUP_MASK;
986
987 if (udc->gadget.max_speed == USB_SPEED_HIGH)
988 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
989 else
990 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
991 usbd_writel(udc, val, USBD_STRAPS_REG);
992
993 bcm63xx_set_ctrl_irqs(udc, false);
994
995 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
996
997 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
998 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
999 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1000
1001 rc = iudma_init(udc);
1002 set_clocks(udc, false);
1003 if (rc)
1004 bcm63xx_uninit_udc_hw(udc);
1005
1006 return 0;
1007}
1008
1009/***********************************************************************
1010 * Standard EP gadget operations
1011 ***********************************************************************/
1012
1013/**
1014 * bcm63xx_ep_enable - Enable one endpoint.
1015 * @ep: Endpoint to enable.
1016 * @desc: Contains max packet, direction, etc.
1017 *
1018 * Most of the endpoint parameters are fixed in this controller, so there
1019 * isn't much for this function to do.
1020 */
1021static int bcm63xx_ep_enable(struct usb_ep *ep,
1022 const struct usb_endpoint_descriptor *desc)
1023{
1024 struct bcm63xx_ep *bep = our_ep(ep);
1025 struct bcm63xx_udc *udc = bep->udc;
1026 struct iudma_ch *iudma = bep->iudma;
1027 unsigned long flags;
1028
1029 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1030 return -EINVAL;
1031
1032 if (!udc->driver)
1033 return -ESHUTDOWN;
1034
1035 spin_lock_irqsave(&udc->lock, flags);
1036 if (iudma->enabled) {
1037 spin_unlock_irqrestore(&udc->lock, flags);
1038 return -EINVAL;
1039 }
1040
1041 iudma->enabled = true;
1042 BUG_ON(!list_empty(&bep->queue));
1043
1044 iudma_reset_channel(udc, iudma);
1045
1046 bep->halted = 0;
1047 bcm63xx_set_stall(udc, bep, false);
1048 clear_bit(bep->ep_num, &udc->wedgemap);
1049
1050 ep->desc = desc;
1051 ep->maxpacket = usb_endpoint_maxp(desc);
1052
1053 spin_unlock_irqrestore(&udc->lock, flags);
1054 return 0;
1055}
1056
1057/**
1058 * bcm63xx_ep_disable - Disable one endpoint.
1059 * @ep: Endpoint to disable.
1060 */
1061static int bcm63xx_ep_disable(struct usb_ep *ep)
1062{
1063 struct bcm63xx_ep *bep = our_ep(ep);
1064 struct bcm63xx_udc *udc = bep->udc;
1065 struct iudma_ch *iudma = bep->iudma;
1066 struct list_head *pos, *n;
1067 unsigned long flags;
1068
1069 if (!ep || !ep->desc)
1070 return -EINVAL;
1071
1072 spin_lock_irqsave(&udc->lock, flags);
1073 if (!iudma->enabled) {
1074 spin_unlock_irqrestore(&udc->lock, flags);
1075 return -EINVAL;
1076 }
1077 iudma->enabled = false;
1078
1079 iudma_reset_channel(udc, iudma);
1080
1081 if (!list_empty(&bep->queue)) {
1082 list_for_each_safe(pos, n, &bep->queue) {
1083 struct bcm63xx_req *breq =
1084 list_entry(pos, struct bcm63xx_req, queue);
1085
1086 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1087 iudma->is_tx);
1088 list_del(&breq->queue);
1089 breq->req.status = -ESHUTDOWN;
1090
1091 spin_unlock_irqrestore(&udc->lock, flags);
1092 breq->req.complete(&iudma->bep->ep, &breq->req);
1093 spin_lock_irqsave(&udc->lock, flags);
1094 }
1095 }
1096 ep->desc = NULL;
1097
1098 spin_unlock_irqrestore(&udc->lock, flags);
1099 return 0;
1100}
1101
1102/**
1103 * bcm63xx_udc_alloc_request - Allocate a new request.
1104 * @ep: Endpoint associated with the request.
1105 * @mem_flags: Flags to pass to kzalloc().
1106 */
1107static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1108 gfp_t mem_flags)
1109{
1110 struct bcm63xx_req *breq;
1111
1112 breq = kzalloc(sizeof(*breq), mem_flags);
1113 if (!breq)
1114 return NULL;
1115 return &breq->req;
1116}
1117
1118/**
1119 * bcm63xx_udc_free_request - Free a request.
1120 * @ep: Endpoint associated with the request.
1121 * @req: Request to free.
1122 */
1123static void bcm63xx_udc_free_request(struct usb_ep *ep,
1124 struct usb_request *req)
1125{
1126 struct bcm63xx_req *breq = our_req(req);
1127 kfree(breq);
1128}
1129
1130/**
1131 * bcm63xx_udc_queue - Queue up a new request.
1132 * @ep: Endpoint associated with the request.
1133 * @req: Request to add.
1134 * @mem_flags: Unused.
1135 *
1136 * If the queue is empty, start this request immediately. Otherwise, add
1137 * it to the list.
1138 *
1139 * ep0 replies are sent through this function from the gadget driver, but
1140 * they are treated differently because they need to be handled by the ep0
1141 * state machine. (Sometimes they are replies to control requests that
1142 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1143 */
1144static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1145 gfp_t mem_flags)
1146{
1147 struct bcm63xx_ep *bep = our_ep(ep);
1148 struct bcm63xx_udc *udc = bep->udc;
1149 struct bcm63xx_req *breq = our_req(req);
1150 unsigned long flags;
1151 int rc = 0;
1152
1153 if (unlikely(!req || !req->complete || !req->buf || !ep))
1154 return -EINVAL;
1155
1156 req->actual = 0;
1157 req->status = 0;
1158 breq->offset = 0;
1159
1160 if (bep == &udc->bep[0]) {
1161 /* only one reply per request, please */
1162 if (udc->ep0_reply)
1163 return -EINVAL;
1164
1165 udc->ep0_reply = req;
1166 schedule_work(&udc->ep0_wq);
1167 return 0;
1168 }
1169
1170 spin_lock_irqsave(&udc->lock, flags);
1171 if (!bep->iudma->enabled) {
1172 rc = -ESHUTDOWN;
1173 goto out;
1174 }
1175
1176 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1177 if (rc == 0) {
1178 list_add_tail(&breq->queue, &bep->queue);
1179 if (list_is_singular(&bep->queue))
1180 iudma_write(udc, bep->iudma, breq);
1181 }
1182
1183out:
1184 spin_unlock_irqrestore(&udc->lock, flags);
1185 return rc;
1186}
1187
1188/**
1189 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1190 * @ep: Endpoint associated with the request.
1191 * @req: Request to remove.
1192 *
1193 * If the request is not at the head of the queue, this is easy - just nuke
1194 * it. If the request is at the head of the queue, we'll need to stop the
1195 * DMA transaction and then queue up the successor.
1196 */
1197static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1198{
1199 struct bcm63xx_ep *bep = our_ep(ep);
1200 struct bcm63xx_udc *udc = bep->udc;
1201 struct bcm63xx_req *breq = our_req(req), *cur;
1202 unsigned long flags;
1203 int rc = 0;
1204
1205 spin_lock_irqsave(&udc->lock, flags);
1206 if (list_empty(&bep->queue)) {
1207 rc = -EINVAL;
1208 goto out;
1209 }
1210
1211 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1212 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1213
1214 if (breq == cur) {
1215 iudma_reset_channel(udc, bep->iudma);
1216 list_del(&breq->queue);
1217
1218 if (!list_empty(&bep->queue)) {
1219 struct bcm63xx_req *next;
1220
1221 next = list_first_entry(&bep->queue,
1222 struct bcm63xx_req, queue);
1223 iudma_write(udc, bep->iudma, next);
1224 }
1225 } else {
1226 list_del(&breq->queue);
1227 }
1228
1229out:
1230 spin_unlock_irqrestore(&udc->lock, flags);
1231
1232 req->status = -ESHUTDOWN;
1233 req->complete(ep, req);
1234
1235 return rc;
1236}
1237
1238/**
1239 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1240 * @ep: Endpoint to halt.
1241 * @value: Zero to clear halt; nonzero to set halt.
1242 *
1243 * See comments in bcm63xx_update_wedge().
1244 */
1245static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1246{
1247 struct bcm63xx_ep *bep = our_ep(ep);
1248 struct bcm63xx_udc *udc = bep->udc;
1249 unsigned long flags;
1250
1251 spin_lock_irqsave(&udc->lock, flags);
1252 bcm63xx_set_stall(udc, bep, !!value);
1253 bep->halted = value;
1254 spin_unlock_irqrestore(&udc->lock, flags);
1255
1256 return 0;
1257}
1258
1259/**
1260 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1261 * @ep: Endpoint to wedge.
1262 *
1263 * See comments in bcm63xx_update_wedge().
1264 */
1265static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1266{
1267 struct bcm63xx_ep *bep = our_ep(ep);
1268 struct bcm63xx_udc *udc = bep->udc;
1269 unsigned long flags;
1270
1271 spin_lock_irqsave(&udc->lock, flags);
1272 set_bit(bep->ep_num, &udc->wedgemap);
1273 bcm63xx_set_stall(udc, bep, true);
1274 spin_unlock_irqrestore(&udc->lock, flags);
1275
1276 return 0;
1277}
1278
1279static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1280 .enable = bcm63xx_ep_enable,
1281 .disable = bcm63xx_ep_disable,
1282
1283 .alloc_request = bcm63xx_udc_alloc_request,
1284 .free_request = bcm63xx_udc_free_request,
1285
1286 .queue = bcm63xx_udc_queue,
1287 .dequeue = bcm63xx_udc_dequeue,
1288
1289 .set_halt = bcm63xx_udc_set_halt,
1290 .set_wedge = bcm63xx_udc_set_wedge,
1291};
1292
1293/***********************************************************************
1294 * EP0 handling
1295 ***********************************************************************/
1296
1297/**
1298 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1299 * @udc: Reference to the device controller.
1300 * @ctrl: 8-byte SETUP request.
1301 */
1302static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1303 struct usb_ctrlrequest *ctrl)
1304{
1305 int rc;
1306
1307 spin_unlock_irq(&udc->lock);
1308 rc = udc->driver->setup(&udc->gadget, ctrl);
1309 spin_lock_irq(&udc->lock);
1310 return rc;
1311}
1312
1313/**
1314 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1315 * @udc: Reference to the device controller.
1316 *
1317 * Many standard requests are handled automatically in the hardware, but
1318 * we still need to pass them to the gadget driver so that it can
1319 * reconfigure the interfaces/endpoints if necessary.
1320 *
1321 * Unfortunately we are not able to send a STALL response if the host
1322 * requests an invalid configuration. If this happens, we'll have to be
1323 * content with printing a warning.
1324 */
1325static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1326{
1327 struct usb_ctrlrequest ctrl;
1328 int rc;
1329
1330 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1331 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1332 ctrl.wValue = cpu_to_le16(udc->cfg);
1333 ctrl.wIndex = 0;
1334 ctrl.wLength = 0;
1335
1336 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1337 if (rc < 0) {
1338 dev_warn_ratelimited(udc->dev,
1339 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1340 udc->cfg);
1341 }
1342 return rc;
1343}
1344
1345/**
1346 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1347 * @udc: Reference to the device controller.
1348 */
1349static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1350{
1351 struct usb_ctrlrequest ctrl;
1352 int rc;
1353
1354 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1355 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1356 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1357 ctrl.wIndex = cpu_to_le16(udc->iface);
1358 ctrl.wLength = 0;
1359
1360 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1361 if (rc < 0) {
1362 dev_warn_ratelimited(udc->dev,
1363 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1364 udc->iface, udc->alt_iface);
1365 }
1366 return rc;
1367}
1368
1369/**
1370 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1371 * @udc: Reference to the device controller.
1372 * @ch_idx: IUDMA channel number.
1373 * @req: USB gadget layer representation of the request.
1374 */
1375static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1376 struct usb_request *req)
1377{
1378 struct bcm63xx_req *breq = our_req(req);
1379 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1380
1381 BUG_ON(udc->ep0_request);
1382 udc->ep0_request = req;
1383
1384 req->actual = 0;
1385 breq->offset = 0;
1386 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1387 iudma_write(udc, iudma, breq);
1388}
1389
1390/**
1391 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1392 * @udc: Reference to the device controller.
1393 * @req: USB gadget layer representation of the request.
1394 * @status: Status to return to the gadget driver.
1395 */
1396static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1397 struct usb_request *req, int status)
1398{
1399 req->status = status;
1400 if (status)
1401 req->actual = 0;
1402 if (req->complete) {
1403 spin_unlock_irq(&udc->lock);
1404 req->complete(&udc->bep[0].ep, req);
1405 spin_lock_irq(&udc->lock);
1406 }
1407}
1408
1409/**
1410 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1411 * reset/shutdown.
1412 * @udc: Reference to the device controller.
1413 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1414 */
1415static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1416{
1417 struct usb_request *req = udc->ep0_reply;
1418
1419 udc->ep0_reply = NULL;
1420 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1421 if (udc->ep0_request == req) {
1422 udc->ep0_req_completed = 0;
1423 udc->ep0_request = NULL;
1424 }
1425 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1426}
1427
1428/**
1429 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1430 * transfer len.
1431 * @udc: Reference to the device controller.
1432 */
1433static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1434{
1435 struct usb_request *req = udc->ep0_request;
1436
1437 udc->ep0_req_completed = 0;
1438 udc->ep0_request = NULL;
1439
1440 return req->actual;
1441}
1442
1443/**
1444 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1445 * @udc: Reference to the device controller.
1446 * @ch_idx: IUDMA channel number.
1447 * @length: Number of bytes to TX/RX.
1448 *
1449 * Used for simple transfers performed by the ep0 worker. This will always
1450 * use ep0_ctrl_req / ep0_ctrl_buf.
1451 */
1452static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1453 int length)
1454{
1455 struct usb_request *req = &udc->ep0_ctrl_req.req;
1456
1457 req->buf = udc->ep0_ctrl_buf;
1458 req->length = length;
1459 req->complete = NULL;
1460
1461 bcm63xx_ep0_map_write(udc, ch_idx, req);
1462}
1463
1464/**
1465 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1466 * @udc: Reference to the device controller.
1467 *
1468 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1469 * for the next packet. Anything else means the transaction requires multiple
1470 * stages of handling.
1471 */
1472static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1473{
1474 int rc;
1475 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1476
1477 rc = bcm63xx_ep0_read_complete(udc);
1478
1479 if (rc < 0) {
1480 dev_err(udc->dev, "missing SETUP packet\n");
1481 return EP0_IDLE;
1482 }
1483
1484 /*
1485 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1486 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1487 * just throw it away.
1488 */
1489 if (rc == 0)
1490 return EP0_REQUEUE;
1491
1492 /* Drop malformed SETUP packets */
1493 if (rc != sizeof(*ctrl)) {
1494 dev_warn_ratelimited(udc->dev,
1495 "malformed SETUP packet (%d bytes)\n", rc);
1496 return EP0_REQUEUE;
1497 }
1498
1499 /* Process new SETUP packet arriving on ep0 */
1500 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1501 if (rc < 0) {
1502 bcm63xx_set_stall(udc, &udc->bep[0], true);
1503 return EP0_REQUEUE;
1504 }
1505
1506 if (!ctrl->wLength)
1507 return EP0_REQUEUE;
1508 else if (ctrl->bRequestType & USB_DIR_IN)
1509 return EP0_IN_DATA_PHASE_SETUP;
1510 else
1511 return EP0_OUT_DATA_PHASE_SETUP;
1512}
1513
1514/**
1515 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1516 * @udc: Reference to the device controller.
1517 *
1518 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1519 * filled with a SETUP packet from the host. This function handles new
1520 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1521 * and reset/shutdown events.
1522 *
1523 * Returns 0 if work was done; -EAGAIN if nothing to do.
1524 */
1525static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1526{
1527 if (udc->ep0_req_reset) {
1528 udc->ep0_req_reset = 0;
1529 } else if (udc->ep0_req_set_cfg) {
1530 udc->ep0_req_set_cfg = 0;
1531 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1532 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1533 } else if (udc->ep0_req_set_iface) {
1534 udc->ep0_req_set_iface = 0;
1535 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1536 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1537 } else if (udc->ep0_req_completed) {
1538 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1539 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1540 } else if (udc->ep0_req_shutdown) {
1541 udc->ep0_req_shutdown = 0;
1542 udc->ep0_req_completed = 0;
1543 udc->ep0_request = NULL;
1544 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1545 usb_gadget_unmap_request(&udc->gadget,
1546 &udc->ep0_ctrl_req.req, 0);
1547
1548 /* bcm63xx_udc_pullup() is waiting for this */
1549 mb();
1550 udc->ep0state = EP0_SHUTDOWN;
1551 } else if (udc->ep0_reply) {
1552 /*
1553 * This could happen if a USB RESET shows up during an ep0
1554 * transaction (especially if a laggy driver like gadgetfs
1555 * is in use).
1556 */
1557 dev_warn(udc->dev, "nuking unexpected reply\n");
1558 bcm63xx_ep0_nuke_reply(udc, 0);
1559 } else {
1560 return -EAGAIN;
1561 }
1562
1563 return 0;
1564}
1565
1566/**
1567 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1568 * @udc: Reference to the device controller.
1569 *
1570 * Returns 0 if work was done; -EAGAIN if nothing to do.
1571 */
1572static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1573{
1574 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1575 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1576
1577 switch (udc->ep0state) {
1578 case EP0_REQUEUE:
1579 /* set up descriptor to receive SETUP packet */
1580 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1581 BCM63XX_MAX_CTRL_PKT);
1582 ep0state = EP0_IDLE;
1583 break;
1584 case EP0_IDLE:
1585 return bcm63xx_ep0_do_idle(udc);
1586 case EP0_IN_DATA_PHASE_SETUP:
1587 /*
1588 * Normal case: TX request is in ep0_reply (queued by the
1589 * callback), or will be queued shortly. When it's here,
1590 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1591 *
1592 * Shutdown case: Stop waiting for the reply. Just
1593 * REQUEUE->IDLE. The gadget driver is NOT expected to
1594 * queue anything else now.
1595 */
1596 if (udc->ep0_reply) {
1597 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1598 udc->ep0_reply);
1599 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1600 } else if (shutdown) {
1601 ep0state = EP0_REQUEUE;
1602 }
1603 break;
1604 case EP0_IN_DATA_PHASE_COMPLETE: {
1605 /*
1606 * Normal case: TX packet (ep0_reply) is in flight; wait for
1607 * it to finish, then go back to REQUEUE->IDLE.
1608 *
1609 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1610 * completion to the gadget driver, then REQUEUE->IDLE.
1611 */
1612 if (udc->ep0_req_completed) {
1613 udc->ep0_reply = NULL;
1614 bcm63xx_ep0_read_complete(udc);
1615 /*
1616 * the "ack" sometimes gets eaten (see
1617 * bcm63xx_ep0_do_idle)
1618 */
1619 ep0state = EP0_REQUEUE;
1620 } else if (shutdown) {
1621 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1622 bcm63xx_ep0_nuke_reply(udc, 1);
1623 ep0state = EP0_REQUEUE;
1624 }
1625 break;
1626 }
1627 case EP0_OUT_DATA_PHASE_SETUP:
1628 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1629 if (udc->ep0_reply) {
1630 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1631 udc->ep0_reply);
1632 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1633 } else if (shutdown) {
1634 ep0state = EP0_REQUEUE;
1635 }
1636 break;
1637 case EP0_OUT_DATA_PHASE_COMPLETE: {
1638 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1639 if (udc->ep0_req_completed) {
1640 udc->ep0_reply = NULL;
1641 bcm63xx_ep0_read_complete(udc);
1642
1643 /* send 0-byte ack to host */
1644 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1645 ep0state = EP0_OUT_STATUS_PHASE;
1646 } else if (shutdown) {
1647 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1648 bcm63xx_ep0_nuke_reply(udc, 0);
1649 ep0state = EP0_REQUEUE;
1650 }
1651 break;
1652 }
1653 case EP0_OUT_STATUS_PHASE:
1654 /*
1655 * Normal case: 0-byte OUT ack packet is in flight; wait
1656 * for it to finish, then go back to REQUEUE->IDLE.
1657 *
1658 * Shutdown case: just cancel the transmission. Don't bother
1659 * calling the completion, because it originated from this
1660 * function anyway. Then go back to REQUEUE->IDLE.
1661 */
1662 if (udc->ep0_req_completed) {
1663 bcm63xx_ep0_read_complete(udc);
1664 ep0state = EP0_REQUEUE;
1665 } else if (shutdown) {
1666 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1667 udc->ep0_request = NULL;
1668 ep0state = EP0_REQUEUE;
1669 }
1670 break;
1671 case EP0_IN_FAKE_STATUS_PHASE: {
1672 /*
1673 * Normal case: we spoofed a SETUP packet and are now
1674 * waiting for the gadget driver to send a 0-byte reply.
1675 * This doesn't actually get sent to the HW because the
1676 * HW has already sent its own reply. Once we get the
1677 * response, return to IDLE.
1678 *
1679 * Shutdown case: return to IDLE immediately.
1680 *
1681 * Note that the ep0 RX descriptor has remained queued
1682 * (and possibly unfilled) during this entire transaction.
1683 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1684 * or SET_INTERFACE transactions.
1685 */
1686 struct usb_request *r = udc->ep0_reply;
1687
1688 if (!r) {
1689 if (shutdown)
1690 ep0state = EP0_IDLE;
1691 break;
1692 }
1693
1694 bcm63xx_ep0_complete(udc, r, 0);
1695 udc->ep0_reply = NULL;
1696 ep0state = EP0_IDLE;
1697 break;
1698 }
1699 case EP0_SHUTDOWN:
1700 break;
1701 }
1702
1703 if (udc->ep0state == ep0state)
1704 return -EAGAIN;
1705
1706 udc->ep0state = ep0state;
1707 return 0;
1708}
1709
1710/**
1711 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1712 * @w: Workqueue struct.
1713 *
1714 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1715 * is used to synchronize ep0 events and ensure that both HW and SW events
1716 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1717 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1718 * by the USBD hardware.
1719 *
1720 * The worker function will continue iterating around the state machine
1721 * until there is nothing left to do. Usually "nothing left to do" means
1722 * that we're waiting for a new event from the hardware.
1723 */
1724static void bcm63xx_ep0_process(struct work_struct *w)
1725{
1726 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1727 spin_lock_irq(&udc->lock);
1728 while (bcm63xx_ep0_one_round(udc) == 0)
1729 ;
1730 spin_unlock_irq(&udc->lock);
1731}
1732
1733/***********************************************************************
1734 * Standard UDC gadget operations
1735 ***********************************************************************/
1736
1737/**
1738 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1739 * @gadget: USB slave device.
1740 */
1741static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1742{
1743 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1744
1745 return (usbd_readl(udc, USBD_STATUS_REG) &
1746 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1747}
1748
1749/**
1750 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1751 * @gadget: USB slave device.
1752 * @is_on: 0 to disable pullup, 1 to enable.
1753 *
1754 * See notes in bcm63xx_select_pullup().
1755 */
1756static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1757{
1758 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1759 unsigned long flags;
1760 int i, rc = -EINVAL;
1761
1762 spin_lock_irqsave(&udc->lock, flags);
1763 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1764 udc->gadget.speed = USB_SPEED_UNKNOWN;
1765 udc->ep0state = EP0_REQUEUE;
1766 bcm63xx_fifo_setup(udc);
1767 bcm63xx_fifo_reset(udc);
1768 bcm63xx_ep_setup(udc);
1769
1770 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1771 for (i = 0; i < BCM63XX_NUM_EP; i++)
1772 bcm63xx_set_stall(udc, &udc->bep[i], false);
1773
1774 bcm63xx_set_ctrl_irqs(udc, true);
1775 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1776 rc = 0;
1777 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1778 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1779
1780 udc->ep0_req_shutdown = 1;
1781 spin_unlock_irqrestore(&udc->lock, flags);
1782
1783 while (1) {
1784 schedule_work(&udc->ep0_wq);
1785 if (udc->ep0state == EP0_SHUTDOWN)
1786 break;
1787 msleep(50);
1788 }
1789 bcm63xx_set_ctrl_irqs(udc, false);
1790 cancel_work_sync(&udc->ep0_wq);
1791 return 0;
1792 }
1793
1794 spin_unlock_irqrestore(&udc->lock, flags);
1795 return rc;
1796}
1797
1798/**
1799 * bcm63xx_udc_start - Start the controller.
1800 * @gadget: USB slave device.
1801 * @driver: Driver for USB slave devices.
1802 */
1803static int bcm63xx_udc_start(struct usb_gadget *gadget,
1804 struct usb_gadget_driver *driver)
1805{
1806 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1807 unsigned long flags;
1808
1809 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1810 !driver->setup)
1811 return -EINVAL;
1812 if (!udc)
1813 return -ENODEV;
1814 if (udc->driver)
1815 return -EBUSY;
1816
1817 spin_lock_irqsave(&udc->lock, flags);
1818
1819 set_clocks(udc, true);
1820 bcm63xx_fifo_setup(udc);
1821 bcm63xx_ep_init(udc);
1822 bcm63xx_ep_setup(udc);
1823 bcm63xx_fifo_reset(udc);
1824 bcm63xx_select_phy_mode(udc, true);
1825
1826 udc->driver = driver;
1827 driver->driver.bus = NULL;
Kevin Cernekee613065e2012-08-25 12:38:52 -07001828 udc->gadget.dev.of_node = udc->dev->of_node;
1829
1830 spin_unlock_irqrestore(&udc->lock, flags);
1831
1832 return 0;
1833}
1834
1835/**
1836 * bcm63xx_udc_stop - Shut down the controller.
1837 * @gadget: USB slave device.
1838 * @driver: Driver for USB slave devices.
1839 */
1840static int bcm63xx_udc_stop(struct usb_gadget *gadget,
1841 struct usb_gadget_driver *driver)
1842{
1843 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1844 unsigned long flags;
1845
1846 spin_lock_irqsave(&udc->lock, flags);
1847
1848 udc->driver = NULL;
Kevin Cernekee613065e2012-08-25 12:38:52 -07001849
1850 /*
1851 * If we switch the PHY too abruptly after dropping D+, the host
1852 * will often complain:
1853 *
1854 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1855 */
1856 msleep(100);
1857
1858 bcm63xx_select_phy_mode(udc, false);
1859 set_clocks(udc, false);
1860
1861 spin_unlock_irqrestore(&udc->lock, flags);
1862
1863 return 0;
1864}
1865
1866static const struct usb_gadget_ops bcm63xx_udc_ops = {
1867 .get_frame = bcm63xx_udc_get_frame,
1868 .pullup = bcm63xx_udc_pullup,
1869 .udc_start = bcm63xx_udc_start,
1870 .udc_stop = bcm63xx_udc_stop,
1871};
1872
1873/***********************************************************************
1874 * IRQ handling
1875 ***********************************************************************/
1876
1877/**
1878 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1879 * @udc: Reference to the device controller.
1880 *
1881 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1882 * The driver never sees the raw control packets coming in on the ep0
1883 * IUDMA channel, but at least we get an interrupt event to tell us that
1884 * new values are waiting in the USBD_STATUS register.
1885 */
1886static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1887{
1888 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1889
1890 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1891 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1892 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1893 USBD_STATUS_ALTINTF_SHIFT;
1894 bcm63xx_ep_setup(udc);
1895}
1896
1897/**
1898 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1899 * @udc: Reference to the device controller.
1900 *
1901 * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1902 * speed has changed, so that the caller can update the endpoint settings.
1903 */
1904static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1905{
1906 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1907 enum usb_device_speed oldspeed = udc->gadget.speed;
1908
1909 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1910 case BCM63XX_SPD_HIGH:
1911 udc->gadget.speed = USB_SPEED_HIGH;
1912 break;
1913 case BCM63XX_SPD_FULL:
1914 udc->gadget.speed = USB_SPEED_FULL;
1915 break;
1916 default:
1917 /* this should never happen */
1918 udc->gadget.speed = USB_SPEED_UNKNOWN;
1919 dev_err(udc->dev,
1920 "received SETUP packet with invalid link speed\n");
1921 return 0;
1922 }
1923
1924 if (udc->gadget.speed != oldspeed) {
1925 dev_info(udc->dev, "link up, %s-speed mode\n",
1926 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1927 return 1;
1928 } else {
1929 return 0;
1930 }
1931}
1932
1933/**
1934 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1935 * @udc: Reference to the device controller.
1936 * @new_status: true to "refresh" wedge status; false to clear it.
1937 *
1938 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1939 * because the controller hardware is designed to automatically clear
1940 * stalls in response to a CLEAR_FEATURE request from the host.
1941 *
1942 * On a RESET interrupt, we do want to restore all wedged endpoints.
1943 */
1944static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1945{
1946 int i;
1947
1948 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1949 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1950 if (!new_status)
1951 clear_bit(i, &udc->wedgemap);
1952 }
1953}
1954
1955/**
1956 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1957 * @irq: IRQ number (unused).
1958 * @dev_id: Reference to the device controller.
1959 *
1960 * This is where we handle link (VBUS) down, USB reset, speed changes,
1961 * SET_CONFIGURATION, and SET_INTERFACE events.
1962 */
1963static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1964{
1965 struct bcm63xx_udc *udc = dev_id;
1966 u32 stat;
1967 bool disconnected = false;
1968
1969 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1970 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1971
1972 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1973
1974 spin_lock(&udc->lock);
1975 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1976 /* VBUS toggled */
1977
1978 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1979 USBD_EVENTS_USB_LINK_MASK) &&
1980 udc->gadget.speed != USB_SPEED_UNKNOWN)
1981 dev_info(udc->dev, "link down\n");
1982
1983 udc->gadget.speed = USB_SPEED_UNKNOWN;
1984 disconnected = true;
1985 }
1986 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1987 bcm63xx_fifo_setup(udc);
1988 bcm63xx_fifo_reset(udc);
1989 bcm63xx_ep_setup(udc);
1990
1991 bcm63xx_update_wedge(udc, false);
1992
1993 udc->ep0_req_reset = 1;
1994 schedule_work(&udc->ep0_wq);
1995 disconnected = true;
1996 }
1997 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
1998 if (bcm63xx_update_link_speed(udc)) {
1999 bcm63xx_fifo_setup(udc);
2000 bcm63xx_ep_setup(udc);
2001 }
2002 bcm63xx_update_wedge(udc, true);
2003 }
2004 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2005 bcm63xx_update_cfg_iface(udc);
2006 udc->ep0_req_set_cfg = 1;
2007 schedule_work(&udc->ep0_wq);
2008 }
2009 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2010 bcm63xx_update_cfg_iface(udc);
2011 udc->ep0_req_set_iface = 1;
2012 schedule_work(&udc->ep0_wq);
2013 }
2014 spin_unlock(&udc->lock);
2015
2016 if (disconnected && udc->driver)
2017 udc->driver->disconnect(&udc->gadget);
2018
2019 return IRQ_HANDLED;
2020}
2021
2022/**
2023 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2024 * @irq: IRQ number (unused).
2025 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2026 *
2027 * For the two ep0 channels, we have special handling that triggers the
2028 * ep0 worker thread. For normal bulk/intr channels, either queue up
2029 * the next buffer descriptor for the transaction (incomplete transaction),
2030 * or invoke the completion callback (complete transactions).
2031 */
2032static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2033{
2034 struct iudma_ch *iudma = dev_id;
2035 struct bcm63xx_udc *udc = iudma->udc;
2036 struct bcm63xx_ep *bep;
2037 struct usb_request *req = NULL;
2038 struct bcm63xx_req *breq = NULL;
2039 int rc;
2040 bool is_done = false;
2041
2042 spin_lock(&udc->lock);
2043
2044 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
Florian Fainellia4c937c2014-01-14 15:36:29 -08002045 ENETDMAC_IR_REG, iudma->ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002046 bep = iudma->bep;
2047 rc = iudma_read(udc, iudma);
2048
2049 /* special handling for EP0 RX (0) and TX (1) */
2050 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2051 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2052 req = udc->ep0_request;
2053 breq = our_req(req);
2054
2055 /* a single request could require multiple submissions */
2056 if (rc >= 0) {
2057 req->actual += rc;
2058
2059 if (req->actual >= req->length || breq->bd_bytes > rc) {
2060 udc->ep0_req_completed = 1;
2061 is_done = true;
2062 schedule_work(&udc->ep0_wq);
2063
2064 /* "actual" on a ZLP is 1 byte */
2065 req->actual = min(req->actual, req->length);
2066 } else {
2067 /* queue up the next BD (same request) */
2068 iudma_write(udc, iudma, breq);
2069 }
2070 }
2071 } else if (!list_empty(&bep->queue)) {
2072 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2073 req = &breq->req;
2074
2075 if (rc >= 0) {
2076 req->actual += rc;
2077
2078 if (req->actual >= req->length || breq->bd_bytes > rc) {
2079 is_done = true;
2080 list_del(&breq->queue);
2081
2082 req->actual = min(req->actual, req->length);
2083
2084 if (!list_empty(&bep->queue)) {
2085 struct bcm63xx_req *next;
2086
2087 next = list_first_entry(&bep->queue,
2088 struct bcm63xx_req, queue);
2089 iudma_write(udc, iudma, next);
2090 }
2091 } else {
2092 iudma_write(udc, iudma, breq);
2093 }
2094 }
2095 }
2096 spin_unlock(&udc->lock);
2097
2098 if (is_done) {
2099 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2100 if (req->complete)
2101 req->complete(&bep->ep, req);
2102 }
2103
2104 return IRQ_HANDLED;
2105}
2106
2107/***********************************************************************
2108 * Debug filesystem
2109 ***********************************************************************/
2110
2111/*
2112 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2113 * @s: seq_file to which the information will be written.
2114 * @p: Unused.
2115 *
2116 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2117 */
2118static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2119{
2120 struct bcm63xx_udc *udc = s->private;
2121
2122 if (!udc->driver)
2123 return -ENODEV;
2124
2125 seq_printf(s, "ep0 state: %s\n",
2126 bcm63xx_ep0_state_names[udc->ep0state]);
2127 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2128 udc->ep0_req_reset ? "reset " : "",
2129 udc->ep0_req_set_cfg ? "set_cfg " : "",
2130 udc->ep0_req_set_iface ? "set_iface " : "",
2131 udc->ep0_req_shutdown ? "shutdown " : "",
2132 udc->ep0_request ? "pending " : "",
2133 udc->ep0_req_completed ? "completed " : "",
2134 udc->ep0_reply ? "reply " : "");
2135 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2136 udc->cfg, udc->iface, udc->alt_iface);
2137 seq_printf(s, "regs:\n");
2138 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2139 usbd_readl(udc, USBD_CONTROL_REG),
2140 usbd_readl(udc, USBD_STRAPS_REG),
2141 usbd_readl(udc, USBD_STATUS_REG));
2142 seq_printf(s, " events: %08x; stall: %08x\n",
2143 usbd_readl(udc, USBD_EVENTS_REG),
2144 usbd_readl(udc, USBD_STALL_REG));
2145
2146 return 0;
2147}
2148
2149/*
2150 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2151 * @s: seq_file to which the information will be written.
2152 * @p: Unused.
2153 *
2154 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2155 */
2156static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2157{
2158 struct bcm63xx_udc *udc = s->private;
2159 int ch_idx, i;
2160 u32 sram2, sram3;
2161
2162 if (!udc->driver)
2163 return -ENODEV;
2164
2165 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2166 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2167 struct list_head *pos;
2168
2169 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2170 switch (iudma_defaults[ch_idx].ep_type) {
2171 case BCMEP_CTRL:
2172 seq_printf(s, "control");
2173 break;
2174 case BCMEP_BULK:
2175 seq_printf(s, "bulk");
2176 break;
2177 case BCMEP_INTR:
2178 seq_printf(s, "interrupt");
2179 break;
2180 }
2181 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2182 seq_printf(s, " [ep%d]:\n",
2183 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2184 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
Florian Fainellia4c937c2014-01-14 15:36:29 -08002185 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2186 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2187 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2188 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
Kevin Cernekee613065e2012-08-25 12:38:52 -07002189
Florian Fainellia4c937c2014-01-14 15:36:29 -08002190 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2191 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002192 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
Florian Fainellia4c937c2014-01-14 15:36:29 -08002193 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
Kevin Cernekee613065e2012-08-25 12:38:52 -07002194 sram2 >> 16, sram2 & 0xffff,
2195 sram3 >> 16, sram3 & 0xffff,
Florian Fainellia4c937c2014-01-14 15:36:29 -08002196 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
Kevin Cernekee613065e2012-08-25 12:38:52 -07002197 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2198 iudma->n_bds);
2199
2200 if (iudma->bep) {
2201 i = 0;
2202 list_for_each(pos, &iudma->bep->queue)
2203 i++;
2204 seq_printf(s, "; %d queued\n", i);
2205 } else {
2206 seq_printf(s, "\n");
2207 }
2208
2209 for (i = 0; i < iudma->n_bds; i++) {
2210 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2211
2212 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2213 i * sizeof(*d), i,
2214 d->len_stat >> 16, d->len_stat & 0xffff,
2215 d->address);
2216 if (d == iudma->read_bd)
2217 seq_printf(s, " <<RD");
2218 if (d == iudma->write_bd)
2219 seq_printf(s, " <<WR");
2220 seq_printf(s, "\n");
2221 }
2222
2223 seq_printf(s, "\n");
2224 }
2225
2226 return 0;
2227}
2228
2229static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2230{
2231 return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2232}
2233
2234static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2235{
2236 return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2237}
2238
2239static const struct file_operations usbd_dbg_fops = {
2240 .owner = THIS_MODULE,
2241 .open = bcm63xx_usbd_dbg_open,
2242 .llseek = seq_lseek,
2243 .read = seq_read,
2244 .release = single_release,
2245};
2246
2247static const struct file_operations iudma_dbg_fops = {
2248 .owner = THIS_MODULE,
2249 .open = bcm63xx_iudma_dbg_open,
2250 .llseek = seq_lseek,
2251 .read = seq_read,
2252 .release = single_release,
2253};
2254
2255
2256/**
2257 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2258 * @udc: Reference to the device controller.
2259 */
2260static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2261{
2262 struct dentry *root, *usbd, *iudma;
2263
2264 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2265 return;
2266
2267 root = debugfs_create_dir(udc->gadget.name, NULL);
2268 if (IS_ERR(root) || !root)
2269 goto err_root;
2270
2271 usbd = debugfs_create_file("usbd", 0400, root, udc,
2272 &usbd_dbg_fops);
2273 if (!usbd)
2274 goto err_usbd;
2275 iudma = debugfs_create_file("iudma", 0400, root, udc,
2276 &iudma_dbg_fops);
2277 if (!iudma)
2278 goto err_iudma;
2279
2280 udc->debugfs_root = root;
2281 udc->debugfs_usbd = usbd;
2282 udc->debugfs_iudma = iudma;
2283 return;
2284err_iudma:
2285 debugfs_remove(usbd);
2286err_usbd:
2287 debugfs_remove(root);
2288err_root:
2289 dev_err(udc->dev, "debugfs is not available\n");
2290}
2291
2292/**
2293 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2294 * @udc: Reference to the device controller.
2295 *
2296 * debugfs_remove() is safe to call with a NULL argument.
2297 */
2298static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2299{
2300 debugfs_remove(udc->debugfs_iudma);
2301 debugfs_remove(udc->debugfs_usbd);
2302 debugfs_remove(udc->debugfs_root);
2303 udc->debugfs_iudma = NULL;
2304 udc->debugfs_usbd = NULL;
2305 udc->debugfs_root = NULL;
2306}
2307
2308/***********************************************************************
2309 * Driver init/exit
2310 ***********************************************************************/
2311
2312/**
Kevin Cernekee613065e2012-08-25 12:38:52 -07002313 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2314 * @pdev: Platform device struct from the bcm63xx BSP code.
2315 *
2316 * Note that platform data is required, because pd.port_no varies from chip
2317 * to chip and is used to switch the correct USB port to device mode.
2318 */
Bill Pemberton41ac7b32012-11-19 13:21:48 -05002319static int bcm63xx_udc_probe(struct platform_device *pdev)
Kevin Cernekee613065e2012-08-25 12:38:52 -07002320{
2321 struct device *dev = &pdev->dev;
2322 struct bcm63xx_usbd_platform_data *pd = dev->platform_data;
2323 struct bcm63xx_udc *udc;
2324 struct resource *res;
2325 int rc = -ENOMEM, i, irq;
2326
2327 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2328 if (!udc) {
2329 dev_err(dev, "cannot allocate memory\n");
2330 return -ENOMEM;
2331 }
2332
2333 platform_set_drvdata(pdev, udc);
2334 udc->dev = dev;
2335 udc->pd = pd;
2336
2337 if (!pd) {
2338 dev_err(dev, "missing platform data\n");
2339 return -EINVAL;
2340 }
2341
2342 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thierry Reding148e1132013-01-21 11:09:22 +01002343 udc->usbd_regs = devm_ioremap_resource(dev, res);
2344 if (IS_ERR(udc->usbd_regs))
2345 return PTR_ERR(udc->usbd_regs);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002346
2347 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Thierry Reding148e1132013-01-21 11:09:22 +01002348 udc->iudma_regs = devm_ioremap_resource(dev, res);
2349 if (IS_ERR(udc->iudma_regs))
2350 return PTR_ERR(udc->iudma_regs);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002351
2352 spin_lock_init(&udc->lock);
2353 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002354
2355 udc->gadget.ops = &bcm63xx_udc_ops;
2356 udc->gadget.name = dev_name(dev);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002357
2358 if (!pd->use_fullspeed && !use_fullspeed)
2359 udc->gadget.max_speed = USB_SPEED_HIGH;
2360 else
2361 udc->gadget.max_speed = USB_SPEED_FULL;
2362
2363 /* request clocks, allocate buffers, and clear any pending IRQs */
2364 rc = bcm63xx_init_udc_hw(udc);
2365 if (rc)
2366 return rc;
2367
2368 rc = -ENXIO;
2369
2370 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2371 irq = platform_get_irq(pdev, 0);
2372 if (irq < 0) {
2373 dev_err(dev, "missing IRQ resource #0\n");
2374 goto out_uninit;
2375 }
2376 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2377 dev_name(dev), udc) < 0) {
2378 dev_err(dev, "error requesting IRQ #%d\n", irq);
2379 goto out_uninit;
2380 }
2381
2382 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2383 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2384 irq = platform_get_irq(pdev, i + 1);
2385 if (irq < 0) {
2386 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2387 goto out_uninit;
2388 }
2389 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2390 dev_name(dev), &udc->iudma[i]) < 0) {
2391 dev_err(dev, "error requesting IRQ #%d\n", irq);
2392 goto out_uninit;
2393 }
2394 }
2395
Kevin Cernekee613065e2012-08-25 12:38:52 -07002396 bcm63xx_udc_init_debugfs(udc);
2397 rc = usb_add_gadget_udc(dev, &udc->gadget);
2398 if (!rc)
2399 return 0;
2400
2401 bcm63xx_udc_cleanup_debugfs(udc);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002402out_uninit:
2403 bcm63xx_uninit_udc_hw(udc);
2404 return rc;
2405}
2406
2407/**
2408 * bcm63xx_udc_remove - Remove the device from the system.
2409 * @pdev: Platform device struct from the bcm63xx BSP code.
2410 */
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05002411static int bcm63xx_udc_remove(struct platform_device *pdev)
Kevin Cernekee613065e2012-08-25 12:38:52 -07002412{
2413 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2414
2415 bcm63xx_udc_cleanup_debugfs(udc);
2416 usb_del_gadget_udc(&udc->gadget);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002417 BUG_ON(udc->driver);
2418
Kevin Cernekee613065e2012-08-25 12:38:52 -07002419 bcm63xx_uninit_udc_hw(udc);
2420
2421 return 0;
2422}
2423
2424static struct platform_driver bcm63xx_udc_driver = {
2425 .probe = bcm63xx_udc_probe,
Bill Pemberton76904172012-11-19 13:21:08 -05002426 .remove = bcm63xx_udc_remove,
Kevin Cernekee613065e2012-08-25 12:38:52 -07002427 .driver = {
2428 .name = DRV_MODULE_NAME,
2429 .owner = THIS_MODULE,
2430 },
2431};
2432module_platform_driver(bcm63xx_udc_driver);
2433
2434MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2435MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2436MODULE_LICENSE("GPL");
2437MODULE_ALIAS("platform:" DRV_MODULE_NAME);