1 // SPDX-License-Identifier: GPL-2.0
3 * MUSB OTG driver core code
5 * Copyright 2005 Mentor Graphics Corporation
6 * Copyright (C) 2005-2006 by Texas Instruments
7 * Copyright (C) 2006-2007 Nokia Corporation
11 * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
13 * This consists of a Host Controller Driver (HCD) and a peripheral
14 * controller driver implementing the "Gadget" API; OTG support is
15 * in the works. These are normal Linux-USB controller drivers which
16 * use IRQs and have no dedicated thread.
18 * This version of the driver has only been used with products from
19 * Texas Instruments. Those products integrate the Inventra logic
20 * with other DMA, IRQ, and bus modules, as well as other logic that
21 * needs to be reflected in this driver.
24 * NOTE: the original Mentor code here was pretty much a collection
25 * of mechanisms that don't seem to have been fully integrated/working
26 * for *any* Linux kernel version. This version aims at Linux 2.6.now,
27 * Key open issues include:
29 * - Lack of host-side transaction scheduling, for all transfer types.
30 * The hardware doesn't do it; instead, software must.
32 * This is not an issue for OTG devices that don't support external
33 * hubs, but for more "normal" USB hosts it's a user issue that the
34 * "multipoint" support doesn't scale in the expected ways. That
35 * includes DaVinci EVM in a common non-OTG mode.
37 * * Control and bulk use dedicated endpoints, and there's as
38 * yet no mechanism to either (a) reclaim the hardware when
39 * peripherals are NAKing, which gets complicated with bulk
40 * endpoints, or (b) use more than a single bulk endpoint in
43 * RESULT: one device may be perceived as blocking another one.
45 * * Interrupt and isochronous will dynamically allocate endpoint
46 * hardware, but (a) there's no record keeping for bandwidth;
47 * (b) in the common case that few endpoints are available, there
48 * is no mechanism to reuse endpoints to talk to multiple devices.
50 * RESULT: At one extreme, bandwidth can be overcommitted in
51 * some hardware configurations, no faults will be reported.
52 * At the other extreme, the bandwidth capabilities which do
53 * exist tend to be severely undercommitted. You can't yet hook
54 * up both a keyboard and a mouse to an external USB hub.
58 * This gets many kinds of configuration information:
59 * - Kconfig for everything user-configurable
60 * - platform_device for addressing, irq, and platform_data
61 * - platform_data is mostly for board-specific information
62 * (plus recentrly, SOC or family details)
64 * Most of the conditional compilation will (someday) vanish.
67 #include <linux/module.h>
68 #include <linux/kernel.h>
69 #include <linux/sched.h>
70 #include <linux/slab.h>
71 #include <linux/list.h>
72 #include <linux/kobject.h>
73 #include <linux/prefetch.h>
74 #include <linux/platform_device.h>
76 #include <linux/dma-mapping.h>
77 #include <linux/usb.h>
78 #include <linux/usb/of.h>
80 #include "musb_core.h"
81 #include "musb_trace.h"
83 #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
86 #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
87 #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
89 #define MUSB_VERSION "6.0"
91 #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
93 #define MUSB_DRIVER_NAME "musb-hdrc"
94 const char musb_driver_name[] = MUSB_DRIVER_NAME;
96 MODULE_DESCRIPTION(DRIVER_INFO);
97 MODULE_AUTHOR(DRIVER_AUTHOR);
98 MODULE_LICENSE("GPL");
99 MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
102 /*-------------------------------------------------------------------------*/
104 static inline struct musb *dev_to_musb(struct device *dev)
106 return dev_get_drvdata(dev);
109 enum musb_mode musb_get_mode(struct device *dev)
111 enum usb_dr_mode mode;
113 mode = usb_get_dr_mode(dev);
115 case USB_DR_MODE_HOST:
117 case USB_DR_MODE_PERIPHERAL:
118 return MUSB_PERIPHERAL;
119 case USB_DR_MODE_OTG:
120 case USB_DR_MODE_UNKNOWN:
125 EXPORT_SYMBOL_GPL(musb_get_mode);
127 /*-------------------------------------------------------------------------*/
129 #ifndef CONFIG_BLACKFIN
130 static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
132 void __iomem *addr = phy->io_priv;
138 pm_runtime_get_sync(phy->io_dev);
140 /* Make sure the transceiver is not in low power mode */
141 power = musb_readb(addr, MUSB_POWER);
142 power &= ~MUSB_POWER_SUSPENDM;
143 musb_writeb(addr, MUSB_POWER, power);
145 /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
146 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
149 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
150 musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
151 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
153 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
154 & MUSB_ULPI_REG_CMPLT)) {
162 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
163 r &= ~MUSB_ULPI_REG_CMPLT;
164 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
166 ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
169 pm_runtime_put(phy->io_dev);
174 static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
176 void __iomem *addr = phy->io_priv;
182 pm_runtime_get_sync(phy->io_dev);
184 /* Make sure the transceiver is not in low power mode */
185 power = musb_readb(addr, MUSB_POWER);
186 power &= ~MUSB_POWER_SUSPENDM;
187 musb_writeb(addr, MUSB_POWER, power);
189 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
190 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
191 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
193 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
194 & MUSB_ULPI_REG_CMPLT)) {
202 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
203 r &= ~MUSB_ULPI_REG_CMPLT;
204 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
207 pm_runtime_put(phy->io_dev);
212 #define musb_ulpi_read NULL
213 #define musb_ulpi_write NULL
216 static struct usb_phy_io_ops musb_ulpi_access = {
217 .read = musb_ulpi_read,
218 .write = musb_ulpi_write,
221 /*-------------------------------------------------------------------------*/
223 static u32 musb_default_fifo_offset(u8 epnum)
225 return 0x20 + (epnum * 4);
228 /* "flat" mapping: each endpoint has its own i/o address */
229 static void musb_flat_ep_select(void __iomem *mbase, u8 epnum)
233 static u32 musb_flat_ep_offset(u8 epnum, u16 offset)
235 return 0x100 + (0x10 * epnum) + offset;
238 /* "indexed" mapping: INDEX register controls register bank select */
239 static void musb_indexed_ep_select(void __iomem *mbase, u8 epnum)
241 musb_writeb(mbase, MUSB_INDEX, epnum);
244 static u32 musb_indexed_ep_offset(u8 epnum, u16 offset)
246 return 0x10 + offset;
249 static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
251 return 0x80 + (0x08 * epnum) + offset;
254 static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
256 u8 data = __raw_readb(addr + offset);
258 trace_musb_readb(__builtin_return_address(0), addr, offset, data);
262 static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
264 trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
265 __raw_writeb(data, addr + offset);
268 static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
270 u16 data = __raw_readw(addr + offset);
272 trace_musb_readw(__builtin_return_address(0), addr, offset, data);
276 static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
278 trace_musb_writew(__builtin_return_address(0), addr, offset, data);
279 __raw_writew(data, addr + offset);
282 static u32 musb_default_readl(const void __iomem *addr, unsigned offset)
284 u32 data = __raw_readl(addr + offset);
286 trace_musb_readl(__builtin_return_address(0), addr, offset, data);
290 static void musb_default_writel(void __iomem *addr, unsigned offset, u32 data)
292 trace_musb_writel(__builtin_return_address(0), addr, offset, data);
293 __raw_writel(data, addr + offset);
297 * Load an endpoint's FIFO
299 static void musb_default_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
302 struct musb *musb = hw_ep->musb;
303 void __iomem *fifo = hw_ep->fifo;
305 if (unlikely(len == 0))
310 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
311 'T', hw_ep->epnum, fifo, len, src);
313 /* we can't assume unaligned reads work */
314 if (likely((0x01 & (unsigned long) src) == 0)) {
317 /* best case is 32bit-aligned source address */
318 if ((0x02 & (unsigned long) src) == 0) {
320 iowrite32_rep(fifo, src + index, len >> 2);
321 index += len & ~0x03;
324 __raw_writew(*(u16 *)&src[index], fifo);
329 iowrite16_rep(fifo, src + index, len >> 1);
330 index += len & ~0x01;
334 __raw_writeb(src[index], fifo);
337 iowrite8_rep(fifo, src, len);
342 * Unload an endpoint's FIFO
344 static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
346 struct musb *musb = hw_ep->musb;
347 void __iomem *fifo = hw_ep->fifo;
349 if (unlikely(len == 0))
352 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
353 'R', hw_ep->epnum, fifo, len, dst);
355 /* we can't assume unaligned writes work */
356 if (likely((0x01 & (unsigned long) dst) == 0)) {
359 /* best case is 32bit-aligned destination address */
360 if ((0x02 & (unsigned long) dst) == 0) {
362 ioread32_rep(fifo, dst, len >> 2);
366 *(u16 *)&dst[index] = __raw_readw(fifo);
371 ioread16_rep(fifo, dst, len >> 1);
376 dst[index] = __raw_readb(fifo);
379 ioread8_rep(fifo, dst, len);
384 * Old style IO functions
386 u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
387 EXPORT_SYMBOL_GPL(musb_readb);
389 void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
390 EXPORT_SYMBOL_GPL(musb_writeb);
392 u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
393 EXPORT_SYMBOL_GPL(musb_readw);
395 void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
396 EXPORT_SYMBOL_GPL(musb_writew);
398 u32 (*musb_readl)(const void __iomem *addr, unsigned offset);
399 EXPORT_SYMBOL_GPL(musb_readl);
401 void (*musb_writel)(void __iomem *addr, unsigned offset, u32 data);
402 EXPORT_SYMBOL_GPL(musb_writel);
404 #ifndef CONFIG_MUSB_PIO_ONLY
405 struct dma_controller *
406 (*musb_dma_controller_create)(struct musb *musb, void __iomem *base);
407 EXPORT_SYMBOL(musb_dma_controller_create);
409 void (*musb_dma_controller_destroy)(struct dma_controller *c);
410 EXPORT_SYMBOL(musb_dma_controller_destroy);
414 * New style IO functions
416 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
418 return hw_ep->musb->io.read_fifo(hw_ep, len, dst);
421 void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
423 return hw_ep->musb->io.write_fifo(hw_ep, len, src);
426 /*-------------------------------------------------------------------------*/
428 /* for high speed test mode; see USB 2.0 spec 7.1.20 */
429 static const u8 musb_test_packet[53] = {
430 /* implicit SYNC then DATA0 to start */
433 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
435 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
437 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
438 /* JJJJJJJKKKKKKK x8 */
439 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
441 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
442 /* JKKKKKKK x10, JK */
443 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
445 /* implicit CRC16 then EOP to end */
448 void musb_load_testpacket(struct musb *musb)
450 void __iomem *regs = musb->endpoints[0].regs;
452 musb_ep_select(musb->mregs, 0);
453 musb_write_fifo(musb->control_ep,
454 sizeof(musb_test_packet), musb_test_packet);
455 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
458 /*-------------------------------------------------------------------------*/
461 * Handles OTG hnp timeouts, such as b_ase0_brst
463 static void musb_otg_timer_func(struct timer_list *t)
465 struct musb *musb = from_timer(musb, t, otg_timer);
468 spin_lock_irqsave(&musb->lock, flags);
469 switch (musb->xceiv->otg->state) {
470 case OTG_STATE_B_WAIT_ACON:
472 "HNP: b_wait_acon timeout; back to b_peripheral");
473 musb_g_disconnect(musb);
474 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
477 case OTG_STATE_A_SUSPEND:
478 case OTG_STATE_A_WAIT_BCON:
479 musb_dbg(musb, "HNP: %s timeout",
480 usb_otg_state_string(musb->xceiv->otg->state));
481 musb_platform_set_vbus(musb, 0);
482 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
485 musb_dbg(musb, "HNP: Unhandled mode %s",
486 usb_otg_state_string(musb->xceiv->otg->state));
488 spin_unlock_irqrestore(&musb->lock, flags);
492 * Stops the HNP transition. Caller must take care of locking.
494 void musb_hnp_stop(struct musb *musb)
496 struct usb_hcd *hcd = musb->hcd;
497 void __iomem *mbase = musb->mregs;
500 musb_dbg(musb, "HNP: stop from %s",
501 usb_otg_state_string(musb->xceiv->otg->state));
503 switch (musb->xceiv->otg->state) {
504 case OTG_STATE_A_PERIPHERAL:
505 musb_g_disconnect(musb);
506 musb_dbg(musb, "HNP: back to %s",
507 usb_otg_state_string(musb->xceiv->otg->state));
509 case OTG_STATE_B_HOST:
510 musb_dbg(musb, "HNP: Disabling HR");
512 hcd->self.is_b_host = 0;
513 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
515 reg = musb_readb(mbase, MUSB_POWER);
516 reg |= MUSB_POWER_SUSPENDM;
517 musb_writeb(mbase, MUSB_POWER, reg);
518 /* REVISIT: Start SESSION_REQUEST here? */
521 musb_dbg(musb, "HNP: Stopping in unknown state %s",
522 usb_otg_state_string(musb->xceiv->otg->state));
526 * When returning to A state after HNP, avoid hub_port_rebounce(),
527 * which cause occasional OPT A "Did not receive reset after connect"
530 musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
533 static void musb_recover_from_babble(struct musb *musb);
536 * Interrupt Service Routine to record USB "global" interrupts.
537 * Since these do not happen often and signify things of
538 * paramount importance, it seems OK to check them individually;
539 * the order of the tests is specified in the manual
541 * @param musb instance pointer
542 * @param int_usb register contents
547 static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
550 irqreturn_t handled = IRQ_NONE;
552 musb_dbg(musb, "<== DevCtl=%02x, int_usb=0x%x", devctl, int_usb);
554 /* in host mode, the peripheral may issue remote wakeup.
555 * in peripheral mode, the host may resume the link.
556 * spurious RESUME irqs happen too, paired with SUSPEND.
558 if (int_usb & MUSB_INTR_RESUME) {
559 handled = IRQ_HANDLED;
560 musb_dbg(musb, "RESUME (%s)",
561 usb_otg_state_string(musb->xceiv->otg->state));
563 if (devctl & MUSB_DEVCTL_HM) {
564 switch (musb->xceiv->otg->state) {
565 case OTG_STATE_A_SUSPEND:
567 musb->port1_status |=
568 (USB_PORT_STAT_C_SUSPEND << 16)
569 | MUSB_PORT_STAT_RESUME;
570 musb->rh_timer = jiffies
571 + msecs_to_jiffies(USB_RESUME_TIMEOUT);
572 musb->xceiv->otg->state = OTG_STATE_A_HOST;
574 musb_host_resume_root_hub(musb);
575 schedule_delayed_work(&musb->finish_resume_work,
576 msecs_to_jiffies(USB_RESUME_TIMEOUT));
578 case OTG_STATE_B_WAIT_ACON:
579 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
584 WARNING("bogus %s RESUME (%s)\n",
586 usb_otg_state_string(musb->xceiv->otg->state));
589 switch (musb->xceiv->otg->state) {
590 case OTG_STATE_A_SUSPEND:
591 /* possibly DISCONNECT is upcoming */
592 musb->xceiv->otg->state = OTG_STATE_A_HOST;
593 musb_host_resume_root_hub(musb);
595 case OTG_STATE_B_WAIT_ACON:
596 case OTG_STATE_B_PERIPHERAL:
597 /* disconnect while suspended? we may
598 * not get a disconnect irq...
600 if ((devctl & MUSB_DEVCTL_VBUS)
601 != (3 << MUSB_DEVCTL_VBUS_SHIFT)
603 musb->int_usb |= MUSB_INTR_DISCONNECT;
604 musb->int_usb &= ~MUSB_INTR_SUSPEND;
609 case OTG_STATE_B_IDLE:
610 musb->int_usb &= ~MUSB_INTR_SUSPEND;
613 WARNING("bogus %s RESUME (%s)\n",
615 usb_otg_state_string(musb->xceiv->otg->state));
620 /* see manual for the order of the tests */
621 if (int_usb & MUSB_INTR_SESSREQ) {
622 void __iomem *mbase = musb->mregs;
624 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
625 && (devctl & MUSB_DEVCTL_BDEVICE)) {
626 musb_dbg(musb, "SessReq while on B state");
630 musb_dbg(musb, "SESSION_REQUEST (%s)",
631 usb_otg_state_string(musb->xceiv->otg->state));
633 /* IRQ arrives from ID pin sense or (later, if VBUS power
634 * is removed) SRP. responses are time critical:
635 * - turn on VBUS (with silicon-specific mechanism)
636 * - go through A_WAIT_VRISE
637 * - ... to A_WAIT_BCON.
638 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
640 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
641 musb->ep0_stage = MUSB_EP0_START;
642 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
644 musb_platform_set_vbus(musb, 1);
646 handled = IRQ_HANDLED;
649 if (int_usb & MUSB_INTR_VBUSERROR) {
652 /* During connection as an A-Device, we may see a short
653 * current spikes causing voltage drop, because of cable
654 * and peripheral capacitance combined with vbus draw.
655 * (So: less common with truly self-powered devices, where
656 * vbus doesn't act like a power supply.)
658 * Such spikes are short; usually less than ~500 usec, max
659 * of ~2 msec. That is, they're not sustained overcurrent
660 * errors, though they're reported using VBUSERROR irqs.
662 * Workarounds: (a) hardware: use self powered devices.
663 * (b) software: ignore non-repeated VBUS errors.
665 * REVISIT: do delays from lots of DEBUG_KERNEL checks
666 * make trouble here, keeping VBUS < 4.4V ?
668 switch (musb->xceiv->otg->state) {
669 case OTG_STATE_A_HOST:
670 /* recovery is dicey once we've gotten past the
671 * initial stages of enumeration, but if VBUS
672 * stayed ok at the other end of the link, and
673 * another reset is due (at least for high speed,
674 * to redo the chirp etc), it might work OK...
676 case OTG_STATE_A_WAIT_BCON:
677 case OTG_STATE_A_WAIT_VRISE:
678 if (musb->vbuserr_retry) {
679 void __iomem *mbase = musb->mregs;
681 musb->vbuserr_retry--;
683 devctl |= MUSB_DEVCTL_SESSION;
684 musb_writeb(mbase, MUSB_DEVCTL, devctl);
686 musb->port1_status |=
687 USB_PORT_STAT_OVERCURRENT
688 | (USB_PORT_STAT_C_OVERCURRENT << 16);
695 dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
696 "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
697 usb_otg_state_string(musb->xceiv->otg->state),
700 switch (devctl & MUSB_DEVCTL_VBUS) {
701 case 0 << MUSB_DEVCTL_VBUS_SHIFT:
702 s = "<SessEnd"; break;
703 case 1 << MUSB_DEVCTL_VBUS_SHIFT:
704 s = "<AValid"; break;
705 case 2 << MUSB_DEVCTL_VBUS_SHIFT:
706 s = "<VBusValid"; break;
707 /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
711 VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
714 /* go through A_WAIT_VFALL then start a new session */
716 musb_platform_set_vbus(musb, 0);
717 handled = IRQ_HANDLED;
720 if (int_usb & MUSB_INTR_SUSPEND) {
721 musb_dbg(musb, "SUSPEND (%s) devctl %02x",
722 usb_otg_state_string(musb->xceiv->otg->state), devctl);
723 handled = IRQ_HANDLED;
725 switch (musb->xceiv->otg->state) {
726 case OTG_STATE_A_PERIPHERAL:
727 /* We also come here if the cable is removed, since
728 * this silicon doesn't report ID-no-longer-grounded.
730 * We depend on T(a_wait_bcon) to shut us down, and
731 * hope users don't do anything dicey during this
732 * undesired detour through A_WAIT_BCON.
735 musb_host_resume_root_hub(musb);
736 musb_root_disconnect(musb);
737 musb_platform_try_idle(musb, jiffies
738 + msecs_to_jiffies(musb->a_wait_bcon
739 ? : OTG_TIME_A_WAIT_BCON));
742 case OTG_STATE_B_IDLE:
743 if (!musb->is_active)
746 case OTG_STATE_B_PERIPHERAL:
747 musb_g_suspend(musb);
748 musb->is_active = musb->g.b_hnp_enable;
749 if (musb->is_active) {
750 musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
751 musb_dbg(musb, "HNP: Setting timer for b_ase0_brst");
752 mod_timer(&musb->otg_timer, jiffies
754 OTG_TIME_B_ASE0_BRST));
757 case OTG_STATE_A_WAIT_BCON:
758 if (musb->a_wait_bcon != 0)
759 musb_platform_try_idle(musb, jiffies
760 + msecs_to_jiffies(musb->a_wait_bcon));
762 case OTG_STATE_A_HOST:
763 musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
764 musb->is_active = musb->hcd->self.b_hnp_enable;
766 case OTG_STATE_B_HOST:
767 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
768 musb_dbg(musb, "REVISIT: SUSPEND as B_HOST");
771 /* "should not happen" */
777 if (int_usb & MUSB_INTR_CONNECT) {
778 struct usb_hcd *hcd = musb->hcd;
780 handled = IRQ_HANDLED;
783 musb->ep0_stage = MUSB_EP0_START;
785 musb->intrtxe = musb->epmask;
786 musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
787 musb->intrrxe = musb->epmask & 0xfffe;
788 musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
789 musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
790 musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
791 |USB_PORT_STAT_HIGH_SPEED
792 |USB_PORT_STAT_ENABLE
794 musb->port1_status |= USB_PORT_STAT_CONNECTION
795 |(USB_PORT_STAT_C_CONNECTION << 16);
797 /* high vs full speed is just a guess until after reset */
798 if (devctl & MUSB_DEVCTL_LSDEV)
799 musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
801 /* indicate new connection to OTG machine */
802 switch (musb->xceiv->otg->state) {
803 case OTG_STATE_B_PERIPHERAL:
804 if (int_usb & MUSB_INTR_SUSPEND) {
805 musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host");
806 int_usb &= ~MUSB_INTR_SUSPEND;
809 musb_dbg(musb, "CONNECT as b_peripheral???");
811 case OTG_STATE_B_WAIT_ACON:
812 musb_dbg(musb, "HNP: CONNECT, now b_host");
814 musb->xceiv->otg->state = OTG_STATE_B_HOST;
816 musb->hcd->self.is_b_host = 1;
817 del_timer(&musb->otg_timer);
820 if ((devctl & MUSB_DEVCTL_VBUS)
821 == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
822 musb->xceiv->otg->state = OTG_STATE_A_HOST;
824 hcd->self.is_b_host = 0;
829 musb_host_poke_root_hub(musb);
831 musb_dbg(musb, "CONNECT (%s) devctl %02x",
832 usb_otg_state_string(musb->xceiv->otg->state), devctl);
835 if (int_usb & MUSB_INTR_DISCONNECT) {
836 musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x",
837 usb_otg_state_string(musb->xceiv->otg->state),
838 MUSB_MODE(musb), devctl);
839 handled = IRQ_HANDLED;
841 switch (musb->xceiv->otg->state) {
842 case OTG_STATE_A_HOST:
843 case OTG_STATE_A_SUSPEND:
844 musb_host_resume_root_hub(musb);
845 musb_root_disconnect(musb);
846 if (musb->a_wait_bcon != 0)
847 musb_platform_try_idle(musb, jiffies
848 + msecs_to_jiffies(musb->a_wait_bcon));
850 case OTG_STATE_B_HOST:
851 /* REVISIT this behaves for "real disconnect"
852 * cases; make sure the other transitions from
853 * from B_HOST act right too. The B_HOST code
854 * in hnp_stop() is currently not used...
856 musb_root_disconnect(musb);
858 musb->hcd->self.is_b_host = 0;
859 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
861 musb_g_disconnect(musb);
863 case OTG_STATE_A_PERIPHERAL:
865 musb_root_disconnect(musb);
867 case OTG_STATE_B_WAIT_ACON:
869 case OTG_STATE_B_PERIPHERAL:
870 case OTG_STATE_B_IDLE:
871 musb_g_disconnect(musb);
874 WARNING("unhandled DISCONNECT transition (%s)\n",
875 usb_otg_state_string(musb->xceiv->otg->state));
880 /* mentor saves a bit: bus reset and babble share the same irq.
881 * only host sees babble; only peripheral sees bus reset.
883 if (int_usb & MUSB_INTR_RESET) {
884 handled = IRQ_HANDLED;
885 if (is_host_active(musb)) {
887 * When BABBLE happens what we can depends on which
888 * platform MUSB is running, because some platforms
889 * implemented proprietary means for 'recovering' from
890 * Babble conditions. One such platform is AM335x. In
891 * most cases, however, the only thing we can do is
894 dev_err(musb->controller, "Babble\n");
895 musb_recover_from_babble(musb);
897 musb_dbg(musb, "BUS RESET as %s",
898 usb_otg_state_string(musb->xceiv->otg->state));
899 switch (musb->xceiv->otg->state) {
900 case OTG_STATE_A_SUSPEND:
903 case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
904 /* never use invalid T(a_wait_bcon) */
905 musb_dbg(musb, "HNP: in %s, %d msec timeout",
906 usb_otg_state_string(musb->xceiv->otg->state),
908 mod_timer(&musb->otg_timer, jiffies
909 + msecs_to_jiffies(TA_WAIT_BCON(musb)));
911 case OTG_STATE_A_PERIPHERAL:
912 del_timer(&musb->otg_timer);
915 case OTG_STATE_B_WAIT_ACON:
916 musb_dbg(musb, "HNP: RESET (%s), to b_peripheral",
917 usb_otg_state_string(musb->xceiv->otg->state));
918 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
921 case OTG_STATE_B_IDLE:
922 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
924 case OTG_STATE_B_PERIPHERAL:
928 musb_dbg(musb, "Unhandled BUS RESET as %s",
929 usb_otg_state_string(musb->xceiv->otg->state));
935 /* REVISIT ... this would be for multiplexing periodic endpoints, or
936 * supporting transfer phasing to prevent exceeding ISO bandwidth
937 * limits of a given frame or microframe.
939 * It's not needed for peripheral side, which dedicates endpoints;
940 * though it _might_ use SOF irqs for other purposes.
942 * And it's not currently needed for host side, which also dedicates
943 * endpoints, relies on TX/RX interval registers, and isn't claimed
944 * to support ISO transfers yet.
946 if (int_usb & MUSB_INTR_SOF) {
947 void __iomem *mbase = musb->mregs;
948 struct musb_hw_ep *ep;
952 dev_dbg(musb->controller, "START_OF_FRAME\n");
953 handled = IRQ_HANDLED;
955 /* start any periodic Tx transfers waiting for current frame */
956 frame = musb_readw(mbase, MUSB_FRAME);
957 ep = musb->endpoints;
958 for (epnum = 1; (epnum < musb->nr_endpoints)
959 && (musb->epmask >= (1 << epnum));
962 * FIXME handle framecounter wraps (12 bits)
963 * eliminate duplicated StartUrb logic
965 if (ep->dwWaitFrame >= frame) {
967 pr_debug("SOF --> periodic TX%s on %d\n",
968 ep->tx_channel ? " DMA" : "",
971 musb_h_tx_start(musb, epnum);
973 cppi_hostdma_start(musb, epnum);
975 } /* end of for loop */
979 schedule_delayed_work(&musb->irq_work, 0);
984 /*-------------------------------------------------------------------------*/
986 static void musb_disable_interrupts(struct musb *musb)
988 void __iomem *mbase = musb->mregs;
991 /* disable interrupts */
992 musb_writeb(mbase, MUSB_INTRUSBE, 0);
994 musb_writew(mbase, MUSB_INTRTXE, 0);
996 musb_writew(mbase, MUSB_INTRRXE, 0);
998 /* flush pending interrupts */
999 temp = musb_readb(mbase, MUSB_INTRUSB);
1000 temp = musb_readw(mbase, MUSB_INTRTX);
1001 temp = musb_readw(mbase, MUSB_INTRRX);
1004 static void musb_enable_interrupts(struct musb *musb)
1006 void __iomem *regs = musb->mregs;
1008 /* Set INT enable registers, enable interrupts */
1009 musb->intrtxe = musb->epmask;
1010 musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
1011 musb->intrrxe = musb->epmask & 0xfffe;
1012 musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
1013 musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
1018 * Program the HDRC to start (enable interrupts, dma, etc.).
1020 void musb_start(struct musb *musb)
1022 void __iomem *regs = musb->mregs;
1023 u8 devctl = musb_readb(regs, MUSB_DEVCTL);
1026 musb_dbg(musb, "<== devctl %02x", devctl);
1028 musb_enable_interrupts(musb);
1029 musb_writeb(regs, MUSB_TESTMODE, 0);
1031 power = MUSB_POWER_ISOUPDATE;
1033 * treating UNKNOWN as unspecified maximum speed, in which case
1034 * we will default to high-speed.
1036 if (musb->config->maximum_speed == USB_SPEED_HIGH ||
1037 musb->config->maximum_speed == USB_SPEED_UNKNOWN)
1038 power |= MUSB_POWER_HSENAB;
1039 musb_writeb(regs, MUSB_POWER, power);
1041 musb->is_active = 0;
1042 devctl = musb_readb(regs, MUSB_DEVCTL);
1043 devctl &= ~MUSB_DEVCTL_SESSION;
1045 /* session started after:
1046 * (a) ID-grounded irq, host mode;
1047 * (b) vbus present/connect IRQ, peripheral mode;
1048 * (c) peripheral initiates, using SRP
1050 if (musb->port_mode != MUSB_PORT_MODE_HOST &&
1051 musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
1052 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
1053 musb->is_active = 1;
1055 devctl |= MUSB_DEVCTL_SESSION;
1058 musb_platform_enable(musb);
1059 musb_writeb(regs, MUSB_DEVCTL, devctl);
1063 * Make the HDRC stop (disable interrupts, etc.);
1064 * reversible by musb_start
1065 * called on gadget driver unregister
1066 * with controller locked, irqs blocked
1067 * acts as a NOP unless some role activated the hardware
1069 void musb_stop(struct musb *musb)
1071 /* stop IRQs, timers, ... */
1072 musb_platform_disable(musb);
1073 musb_disable_interrupts(musb);
1074 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1077 * - mark host and/or peripheral drivers unusable/inactive
1078 * - disable DMA (and enable it in HdrcStart)
1079 * - make sure we can musb_start() after musb_stop(); with
1080 * OTG mode, gadget driver module rmmod/modprobe cycles that
1083 musb_platform_try_idle(musb, 0);
1086 /*-------------------------------------------------------------------------*/
1089 * The silicon either has hard-wired endpoint configurations, or else
1090 * "dynamic fifo" sizing. The driver has support for both, though at this
1091 * writing only the dynamic sizing is very well tested. Since we switched
1092 * away from compile-time hardware parameters, we can no longer rely on
1093 * dead code elimination to leave only the relevant one in the object file.
1095 * We don't currently use dynamic fifo setup capability to do anything
1096 * more than selecting one of a bunch of predefined configurations.
1098 static ushort fifo_mode;
1100 /* "modprobe ... fifo_mode=1" etc */
1101 module_param(fifo_mode, ushort, 0);
1102 MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
1105 * tables defining fifo_mode values. define more if you like.
1106 * for host side, make sure both halves of ep1 are set up.
1109 /* mode 0 - fits in 2KB */
1110 static struct musb_fifo_cfg mode_0_cfg[] = {
1111 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1112 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1113 { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
1114 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1115 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1118 /* mode 1 - fits in 4KB */
1119 static struct musb_fifo_cfg mode_1_cfg[] = {
1120 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1121 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1122 { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1123 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1124 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1127 /* mode 2 - fits in 4KB */
1128 static struct musb_fifo_cfg mode_2_cfg[] = {
1129 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1130 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1131 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1132 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1133 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 960, },
1134 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 1024, },
1137 /* mode 3 - fits in 4KB */
1138 static struct musb_fifo_cfg mode_3_cfg[] = {
1139 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1140 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1141 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1142 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1143 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1144 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1147 /* mode 4 - fits in 16KB */
1148 static struct musb_fifo_cfg mode_4_cfg[] = {
1149 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1150 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1151 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1152 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1153 { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
1154 { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
1155 { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
1156 { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
1157 { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
1158 { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
1159 { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
1160 { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
1161 { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
1162 { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
1163 { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
1164 { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
1165 { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
1166 { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
1167 { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, },
1168 { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, },
1169 { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, },
1170 { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, },
1171 { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, },
1172 { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, },
1173 { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
1174 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1175 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1178 /* mode 5 - fits in 8KB */
1179 static struct musb_fifo_cfg mode_5_cfg[] = {
1180 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1181 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1182 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1183 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1184 { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
1185 { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
1186 { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
1187 { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
1188 { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
1189 { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
1190 { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
1191 { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
1192 { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
1193 { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
1194 { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
1195 { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
1196 { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
1197 { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
1198 { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
1199 { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
1200 { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
1201 { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
1202 { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
1203 { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
1204 { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
1205 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1206 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1210 * configure a fifo; for non-shared endpoints, this may be called
1211 * once for a tx fifo and once for an rx fifo.
1213 * returns negative errno or offset for next fifo.
1216 fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
1217 const struct musb_fifo_cfg *cfg, u16 offset)
1219 void __iomem *mbase = musb->mregs;
1221 u16 maxpacket = cfg->maxpacket;
1222 u16 c_off = offset >> 3;
1225 /* expect hw_ep has already been zero-initialized */
1227 size = ffs(max(maxpacket, (u16) 8)) - 1;
1228 maxpacket = 1 << size;
1231 if (cfg->mode == BUF_DOUBLE) {
1232 if ((offset + (maxpacket << 1)) >
1233 (1 << (musb->config->ram_bits + 2)))
1235 c_size |= MUSB_FIFOSZ_DPB;
1237 if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
1241 /* configure the FIFO */
1242 musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
1244 /* EP0 reserved endpoint for control, bidirectional;
1245 * EP1 reserved for bulk, two unidirectional halves.
1247 if (hw_ep->epnum == 1)
1248 musb->bulk_ep = hw_ep;
1249 /* REVISIT error check: be sure ep0 can both rx and tx ... */
1250 switch (cfg->style) {
1252 musb_write_txfifosz(mbase, c_size);
1253 musb_write_txfifoadd(mbase, c_off);
1254 hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1255 hw_ep->max_packet_sz_tx = maxpacket;
1258 musb_write_rxfifosz(mbase, c_size);
1259 musb_write_rxfifoadd(mbase, c_off);
1260 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1261 hw_ep->max_packet_sz_rx = maxpacket;
1264 musb_write_txfifosz(mbase, c_size);
1265 musb_write_txfifoadd(mbase, c_off);
1266 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1267 hw_ep->max_packet_sz_rx = maxpacket;
1269 musb_write_rxfifosz(mbase, c_size);
1270 musb_write_rxfifoadd(mbase, c_off);
1271 hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
1272 hw_ep->max_packet_sz_tx = maxpacket;
1274 hw_ep->is_shared_fifo = true;
1278 /* NOTE rx and tx endpoint irqs aren't managed separately,
1279 * which happens to be ok
1281 musb->epmask |= (1 << hw_ep->epnum);
1283 return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
1286 static struct musb_fifo_cfg ep0_cfg = {
1287 .style = FIFO_RXTX, .maxpacket = 64,
1290 static int ep_config_from_table(struct musb *musb)
1292 const struct musb_fifo_cfg *cfg;
1295 struct musb_hw_ep *hw_ep = musb->endpoints;
1297 if (musb->config->fifo_cfg) {
1298 cfg = musb->config->fifo_cfg;
1299 n = musb->config->fifo_cfg_size;
1303 switch (fifo_mode) {
1309 n = ARRAY_SIZE(mode_0_cfg);
1313 n = ARRAY_SIZE(mode_1_cfg);
1317 n = ARRAY_SIZE(mode_2_cfg);
1321 n = ARRAY_SIZE(mode_3_cfg);
1325 n = ARRAY_SIZE(mode_4_cfg);
1329 n = ARRAY_SIZE(mode_5_cfg);
1333 pr_debug("%s: setup fifo_mode %d\n", musb_driver_name, fifo_mode);
1337 offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
1338 /* assert(offset > 0) */
1340 /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
1341 * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
1344 for (i = 0; i < n; i++) {
1345 u8 epn = cfg->hw_ep_num;
1347 if (epn >= musb->config->num_eps) {
1348 pr_debug("%s: invalid ep %d\n",
1349 musb_driver_name, epn);
1352 offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
1354 pr_debug("%s: mem overrun, ep %d\n",
1355 musb_driver_name, epn);
1359 musb->nr_endpoints = max(epn, musb->nr_endpoints);
1362 pr_debug("%s: %d/%d max ep, %d/%d memory\n",
1364 n + 1, musb->config->num_eps * 2 - 1,
1365 offset, (1 << (musb->config->ram_bits + 2)));
1367 if (!musb->bulk_ep) {
1368 pr_debug("%s: missing bulk\n", musb_driver_name);
1377 * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
1378 * @param musb the controller
1380 static int ep_config_from_hw(struct musb *musb)
1383 struct musb_hw_ep *hw_ep;
1384 void __iomem *mbase = musb->mregs;
1387 musb_dbg(musb, "<== static silicon ep config");
1389 /* FIXME pick up ep0 maxpacket size */
1391 for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
1392 musb_ep_select(mbase, epnum);
1393 hw_ep = musb->endpoints + epnum;
1395 ret = musb_read_fifosize(musb, hw_ep, epnum);
1399 /* FIXME set up hw_ep->{rx,tx}_double_buffered */
1401 /* pick an RX/TX endpoint for bulk */
1402 if (hw_ep->max_packet_sz_tx < 512
1403 || hw_ep->max_packet_sz_rx < 512)
1406 /* REVISIT: this algorithm is lazy, we should at least
1407 * try to pick a double buffered endpoint.
1411 musb->bulk_ep = hw_ep;
1414 if (!musb->bulk_ep) {
1415 pr_debug("%s: missing bulk\n", musb_driver_name);
1422 enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
1424 /* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
1425 * configure endpoints, or take their config from silicon
1427 static int musb_core_init(u16 musb_type, struct musb *musb)
1432 void __iomem *mbase = musb->mregs;
1436 /* log core options (read using indexed model) */
1437 reg = musb_read_configdata(mbase);
1439 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
1440 if (reg & MUSB_CONFIGDATA_DYNFIFO) {
1441 strcat(aInfo, ", dyn FIFOs");
1442 musb->dyn_fifo = true;
1444 if (reg & MUSB_CONFIGDATA_MPRXE) {
1445 strcat(aInfo, ", bulk combine");
1446 musb->bulk_combine = true;
1448 if (reg & MUSB_CONFIGDATA_MPTXE) {
1449 strcat(aInfo, ", bulk split");
1450 musb->bulk_split = true;
1452 if (reg & MUSB_CONFIGDATA_HBRXE) {
1453 strcat(aInfo, ", HB-ISO Rx");
1454 musb->hb_iso_rx = true;
1456 if (reg & MUSB_CONFIGDATA_HBTXE) {
1457 strcat(aInfo, ", HB-ISO Tx");
1458 musb->hb_iso_tx = true;
1460 if (reg & MUSB_CONFIGDATA_SOFTCONE)
1461 strcat(aInfo, ", SoftConn");
1463 pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name, reg, aInfo);
1465 if (MUSB_CONTROLLER_MHDRC == musb_type) {
1466 musb->is_multipoint = 1;
1469 musb->is_multipoint = 0;
1471 #ifndef CONFIG_USB_OTG_BLACKLIST_HUB
1472 pr_err("%s: kernel must blacklist external hubs\n",
1477 /* log release info */
1478 musb->hwvers = musb_read_hwvers(mbase);
1479 pr_debug("%s: %sHDRC RTL version %d.%d%s\n",
1480 musb_driver_name, type, MUSB_HWVERS_MAJOR(musb->hwvers),
1481 MUSB_HWVERS_MINOR(musb->hwvers),
1482 (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
1485 musb_configure_ep0(musb);
1487 /* discover endpoint configuration */
1488 musb->nr_endpoints = 1;
1492 status = ep_config_from_table(musb);
1494 status = ep_config_from_hw(musb);
1499 /* finish init, and print endpoint config */
1500 for (i = 0; i < musb->nr_endpoints; i++) {
1501 struct musb_hw_ep *hw_ep = musb->endpoints + i;
1503 hw_ep->fifo = musb->io.fifo_offset(i) + mbase;
1504 #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
1505 if (musb->io.quirks & MUSB_IN_TUSB) {
1506 hw_ep->fifo_async = musb->async + 0x400 +
1507 musb->io.fifo_offset(i);
1508 hw_ep->fifo_sync = musb->sync + 0x400 +
1509 musb->io.fifo_offset(i);
1510 hw_ep->fifo_sync_va =
1511 musb->sync_va + 0x400 + musb->io.fifo_offset(i);
1514 hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
1516 hw_ep->conf = mbase + 0x400 +
1517 (((i - 1) & 0xf) << 2);
1521 hw_ep->regs = musb->io.ep_offset(i, 0) + mbase;
1522 hw_ep->rx_reinit = 1;
1523 hw_ep->tx_reinit = 1;
1525 if (hw_ep->max_packet_sz_tx) {
1526 musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
1527 musb_driver_name, i,
1528 hw_ep->is_shared_fifo ? "shared" : "tx",
1529 hw_ep->tx_double_buffered
1530 ? "doublebuffer, " : "",
1531 hw_ep->max_packet_sz_tx);
1533 if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
1534 musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
1535 musb_driver_name, i,
1537 hw_ep->rx_double_buffered
1538 ? "doublebuffer, " : "",
1539 hw_ep->max_packet_sz_rx);
1541 if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
1542 musb_dbg(musb, "hw_ep %d not configured", i);
1548 /*-------------------------------------------------------------------------*/
1551 * handle all the irqs defined by the HDRC core. for now we expect: other
1552 * irq sources (phy, dma, etc) will be handled first, musb->int_* values
1553 * will be assigned, and the irq will already have been acked.
1555 * called in irq context with spinlock held, irqs blocked
1557 irqreturn_t musb_interrupt(struct musb *musb)
1559 irqreturn_t retval = IRQ_NONE;
1560 unsigned long status;
1561 unsigned long epnum;
1564 if (!musb->int_usb && !musb->int_tx && !musb->int_rx)
1567 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1569 trace_musb_isr(musb);
1572 * According to Mentor Graphics' documentation, flowchart on page 98,
1573 * IRQ should be handled as follows:
1576 * . Session Request IRQ
1581 * . Reset/Babble IRQ
1582 * . SOF IRQ (we're not using this one)
1587 * We will be following that flowchart in order to avoid any problems
1588 * that might arise with internal Finite State Machine.
1592 retval |= musb_stage0_irq(musb, musb->int_usb, devctl);
1594 if (musb->int_tx & 1) {
1595 if (is_host_active(musb))
1596 retval |= musb_h_ep0_irq(musb);
1598 retval |= musb_g_ep0_irq(musb);
1600 /* we have just handled endpoint 0 IRQ, clear it */
1601 musb->int_tx &= ~BIT(0);
1604 status = musb->int_tx;
1606 for_each_set_bit(epnum, &status, 16) {
1607 retval = IRQ_HANDLED;
1608 if (is_host_active(musb))
1609 musb_host_tx(musb, epnum);
1611 musb_g_tx(musb, epnum);
1614 status = musb->int_rx;
1616 for_each_set_bit(epnum, &status, 16) {
1617 retval = IRQ_HANDLED;
1618 if (is_host_active(musb))
1619 musb_host_rx(musb, epnum);
1621 musb_g_rx(musb, epnum);
1626 EXPORT_SYMBOL_GPL(musb_interrupt);
1628 #ifndef CONFIG_MUSB_PIO_ONLY
1629 static bool use_dma = 1;
1631 /* "modprobe ... use_dma=0" etc */
1632 module_param(use_dma, bool, 0644);
1633 MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
1635 void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
1637 /* called with controller lock already held */
1640 if (!is_cppi_enabled(musb)) {
1642 if (is_host_active(musb))
1643 musb_h_ep0_irq(musb);
1645 musb_g_ep0_irq(musb);
1648 /* endpoints 1..15 */
1650 if (is_host_active(musb))
1651 musb_host_tx(musb, epnum);
1653 musb_g_tx(musb, epnum);
1656 if (is_host_active(musb))
1657 musb_host_rx(musb, epnum);
1659 musb_g_rx(musb, epnum);
1663 EXPORT_SYMBOL_GPL(musb_dma_completion);
1669 static int (*musb_phy_callback)(enum musb_vbus_id_status status);
1672 * musb_mailbox - optional phy notifier function
1673 * @status phy state change
1675 * Optionally gets called from the USB PHY. Note that the USB PHY must be
1676 * disabled at the point the phy_callback is registered or unregistered.
1678 int musb_mailbox(enum musb_vbus_id_status status)
1680 if (musb_phy_callback)
1681 return musb_phy_callback(status);
1685 EXPORT_SYMBOL_GPL(musb_mailbox);
1687 /*-------------------------------------------------------------------------*/
1690 musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1692 struct musb *musb = dev_to_musb(dev);
1693 unsigned long flags;
1696 spin_lock_irqsave(&musb->lock, flags);
1697 ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->otg->state));
1698 spin_unlock_irqrestore(&musb->lock, flags);
1704 musb_mode_store(struct device *dev, struct device_attribute *attr,
1705 const char *buf, size_t n)
1707 struct musb *musb = dev_to_musb(dev);
1708 unsigned long flags;
1711 spin_lock_irqsave(&musb->lock, flags);
1712 if (sysfs_streq(buf, "host"))
1713 status = musb_platform_set_mode(musb, MUSB_HOST);
1714 else if (sysfs_streq(buf, "peripheral"))
1715 status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
1716 else if (sysfs_streq(buf, "otg"))
1717 status = musb_platform_set_mode(musb, MUSB_OTG);
1720 spin_unlock_irqrestore(&musb->lock, flags);
1722 return (status == 0) ? n : status;
1724 static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
1727 musb_vbus_store(struct device *dev, struct device_attribute *attr,
1728 const char *buf, size_t n)
1730 struct musb *musb = dev_to_musb(dev);
1731 unsigned long flags;
1734 if (sscanf(buf, "%lu", &val) < 1) {
1735 dev_err(dev, "Invalid VBUS timeout ms value\n");
1739 spin_lock_irqsave(&musb->lock, flags);
1740 /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
1741 musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
1742 if (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)
1743 musb->is_active = 0;
1744 musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
1745 spin_unlock_irqrestore(&musb->lock, flags);
1751 musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1753 struct musb *musb = dev_to_musb(dev);
1754 unsigned long flags;
1759 spin_lock_irqsave(&musb->lock, flags);
1760 val = musb->a_wait_bcon;
1761 vbus = musb_platform_get_vbus_status(musb);
1763 /* Use default MUSB method by means of DEVCTL register */
1764 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1765 if ((devctl & MUSB_DEVCTL_VBUS)
1766 == (3 << MUSB_DEVCTL_VBUS_SHIFT))
1771 spin_unlock_irqrestore(&musb->lock, flags);
1773 return sprintf(buf, "Vbus %s, timeout %lu msec\n",
1774 vbus ? "on" : "off", val);
1776 static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
1778 /* Gadget drivers can't know that a host is connected so they might want
1779 * to start SRP, but users can. This allows userspace to trigger SRP.
1782 musb_srp_store(struct device *dev, struct device_attribute *attr,
1783 const char *buf, size_t n)
1785 struct musb *musb = dev_to_musb(dev);
1788 if (sscanf(buf, "%hu", &srp) != 1
1790 dev_err(dev, "SRP: Value must be 1\n");
1795 musb_g_wakeup(musb);
1799 static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
1801 static struct attribute *musb_attributes[] = {
1802 &dev_attr_mode.attr,
1803 &dev_attr_vbus.attr,
1808 static const struct attribute_group musb_attr_group = {
1809 .attrs = musb_attributes,
1812 #define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
1813 (2 << MUSB_DEVCTL_VBUS_SHIFT) | \
1814 MUSB_DEVCTL_SESSION)
1815 #define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
1816 MUSB_DEVCTL_SESSION)
1819 * Check the musb devctl session bit to determine if we want to
1820 * allow PM runtime for the device. In general, we want to keep things
1821 * active when the session bit is set except after host disconnect.
1823 * Only called from musb_irq_work. If this ever needs to get called
1824 * elsewhere, proper locking must be implemented for musb->session.
1826 static void musb_pm_runtime_check_session(struct musb *musb)
1831 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1833 /* Handle session status quirks first */
1834 s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
1836 switch (devctl & ~s) {
1837 case MUSB_QUIRK_B_INVALID_VBUS_91:
1838 if (musb->quirk_retries && !musb->flush_irq_work) {
1840 "Poll devctl on invalid vbus, assume no session");
1841 schedule_delayed_work(&musb->irq_work,
1842 msecs_to_jiffies(1000));
1843 musb->quirk_retries--;
1847 case MUSB_QUIRK_A_DISCONNECT_19:
1848 if (musb->quirk_retries && !musb->flush_irq_work) {
1850 "Poll devctl on possible host mode disconnect");
1851 schedule_delayed_work(&musb->irq_work,
1852 msecs_to_jiffies(1000));
1853 musb->quirk_retries--;
1858 musb_dbg(musb, "Allow PM on possible host mode disconnect");
1859 pm_runtime_mark_last_busy(musb->controller);
1860 pm_runtime_put_autosuspend(musb->controller);
1861 musb->session = false;
1867 /* No need to do anything if session has not changed */
1868 s = devctl & MUSB_DEVCTL_SESSION;
1869 if (s == musb->session)
1872 /* Block PM or allow PM? */
1874 musb_dbg(musb, "Block PM on active session: %02x", devctl);
1875 error = pm_runtime_get_sync(musb->controller);
1877 dev_err(musb->controller, "Could not enable: %i\n",
1879 musb->quirk_retries = 3;
1881 musb_dbg(musb, "Allow PM with no session: %02x", devctl);
1882 pm_runtime_mark_last_busy(musb->controller);
1883 pm_runtime_put_autosuspend(musb->controller);
1889 /* Only used to provide driver mode change events */
1890 static void musb_irq_work(struct work_struct *data)
1892 struct musb *musb = container_of(data, struct musb, irq_work.work);
1895 error = pm_runtime_get_sync(musb->controller);
1897 dev_err(musb->controller, "Could not enable: %i\n", error);
1902 musb_pm_runtime_check_session(musb);
1904 if (musb->xceiv->otg->state != musb->xceiv_old_state) {
1905 musb->xceiv_old_state = musb->xceiv->otg->state;
1906 sysfs_notify(&musb->controller->kobj, NULL, "mode");
1909 pm_runtime_mark_last_busy(musb->controller);
1910 pm_runtime_put_autosuspend(musb->controller);
1913 static void musb_recover_from_babble(struct musb *musb)
1918 musb_disable_interrupts(musb);
1921 * wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
1922 * it some slack and wait for 10us.
1926 ret = musb_platform_recover(musb);
1928 musb_enable_interrupts(musb);
1932 /* drop session bit */
1933 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1934 devctl &= ~MUSB_DEVCTL_SESSION;
1935 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
1937 /* tell usbcore about it */
1938 musb_root_disconnect(musb);
1941 * When a babble condition occurs, the musb controller
1942 * removes the session bit and the endpoint config is lost.
1945 ret = ep_config_from_table(musb);
1947 ret = ep_config_from_hw(musb);
1949 /* restart session */
1954 /* --------------------------------------------------------------------------
1958 static struct musb *allocate_instance(struct device *dev,
1959 const struct musb_hdrc_config *config, void __iomem *mbase)
1962 struct musb_hw_ep *ep;
1966 musb = devm_kzalloc(dev, sizeof(*musb), GFP_KERNEL);
1970 INIT_LIST_HEAD(&musb->control);
1971 INIT_LIST_HEAD(&musb->in_bulk);
1972 INIT_LIST_HEAD(&musb->out_bulk);
1973 INIT_LIST_HEAD(&musb->pending_list);
1975 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1976 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
1977 musb->mregs = mbase;
1978 musb->ctrl_base = mbase;
1979 musb->nIrq = -ENODEV;
1980 musb->config = config;
1981 BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS);
1982 for (epnum = 0, ep = musb->endpoints;
1983 epnum < musb->config->num_eps;
1989 musb->controller = dev;
1991 ret = musb_host_alloc(musb);
1995 dev_set_drvdata(dev, musb);
2003 static void musb_free(struct musb *musb)
2005 /* this has multiple entry modes. it handles fault cleanup after
2006 * probe(), where things may be partially set up, as well as rmmod
2007 * cleanup after everything's been de-activated.
2011 sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
2014 if (musb->nIrq >= 0) {
2016 disable_irq_wake(musb->nIrq);
2017 free_irq(musb->nIrq, musb);
2020 musb_host_free(musb);
2023 struct musb_pending_work {
2024 int (*callback)(struct musb *musb, void *data);
2026 struct list_head node;
2031 * Called from musb_runtime_resume(), musb_resume(), and
2032 * musb_queue_resume_work(). Callers must take musb->lock.
2034 static int musb_run_resume_work(struct musb *musb)
2036 struct musb_pending_work *w, *_w;
2037 unsigned long flags;
2040 spin_lock_irqsave(&musb->list_lock, flags);
2041 list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
2043 error = w->callback(musb, w->data);
2045 dev_err(musb->controller,
2046 "resume callback %p failed: %i\n",
2047 w->callback, error);
2051 devm_kfree(musb->controller, w);
2053 spin_unlock_irqrestore(&musb->list_lock, flags);
2060 * Called to run work if device is active or else queue the work to happen
2061 * on resume. Caller must take musb->lock and must hold an RPM reference.
2063 * Note that we cowardly refuse queuing work after musb PM runtime
2064 * resume is done calling musb_run_resume_work() and return -EINPROGRESS
2067 int musb_queue_resume_work(struct musb *musb,
2068 int (*callback)(struct musb *musb, void *data),
2071 struct musb_pending_work *w;
2072 unsigned long flags;
2075 if (WARN_ON(!callback))
2078 if (pm_runtime_active(musb->controller))
2079 return callback(musb, data);
2081 w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
2085 w->callback = callback;
2087 spin_lock_irqsave(&musb->list_lock, flags);
2088 if (musb->is_runtime_suspended) {
2089 list_add_tail(&w->node, &musb->pending_list);
2092 dev_err(musb->controller, "could not add resume work %p\n",
2094 devm_kfree(musb->controller, w);
2095 error = -EINPROGRESS;
2097 spin_unlock_irqrestore(&musb->list_lock, flags);
2101 EXPORT_SYMBOL_GPL(musb_queue_resume_work);
2103 static void musb_deassert_reset(struct work_struct *work)
2106 unsigned long flags;
2108 musb = container_of(work, struct musb, deassert_reset_work.work);
2110 spin_lock_irqsave(&musb->lock, flags);
2112 if (musb->port1_status & USB_PORT_STAT_RESET)
2113 musb_port_reset(musb, false);
2115 spin_unlock_irqrestore(&musb->lock, flags);
2119 * Perform generic per-controller initialization.
2121 * @dev: the controller (already clocked, etc)
2123 * @ctrl: virtual address of controller registers,
2124 * not yet corrected for platform-specific offsets
2127 musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2131 struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
2133 /* The driver might handle more features than the board; OK.
2134 * Fail when the board needs a feature that's not enabled.
2137 dev_err(dev, "no platform_data?\n");
2143 musb = allocate_instance(dev, plat->config, ctrl);
2149 spin_lock_init(&musb->lock);
2150 spin_lock_init(&musb->list_lock);
2151 musb->board_set_power = plat->set_power;
2152 musb->min_power = plat->min_power;
2153 musb->ops = plat->platform_ops;
2154 musb->port_mode = plat->mode;
2157 * Initialize the default IO functions. At least omap2430 needs
2158 * these early. We initialize the platform specific IO functions
2161 musb_readb = musb_default_readb;
2162 musb_writeb = musb_default_writeb;
2163 musb_readw = musb_default_readw;
2164 musb_writew = musb_default_writew;
2165 musb_readl = musb_default_readl;
2166 musb_writel = musb_default_writel;
2168 /* The musb_platform_init() call:
2169 * - adjusts musb->mregs
2170 * - sets the musb->isr
2171 * - may initialize an integrated transceiver
2172 * - initializes musb->xceiv, usually by otg_get_phy()
2173 * - stops powering VBUS
2175 * There are various transceiver configurations. Blackfin,
2176 * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
2177 * external/discrete ones in various flavors (twl4030 family,
2178 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
2180 status = musb_platform_init(musb);
2189 if (musb->ops->quirks)
2190 musb->io.quirks = musb->ops->quirks;
2192 /* Most devices use indexed offset or flat offset */
2193 if (musb->io.quirks & MUSB_INDEXED_EP) {
2194 musb->io.ep_offset = musb_indexed_ep_offset;
2195 musb->io.ep_select = musb_indexed_ep_select;
2197 musb->io.ep_offset = musb_flat_ep_offset;
2198 musb->io.ep_select = musb_flat_ep_select;
2201 if (musb->io.quirks & MUSB_G_NO_SKB_RESERVE)
2202 musb->g.quirk_avoids_skb_reserve = 1;
2204 /* At least tusb6010 has its own offsets */
2205 if (musb->ops->ep_offset)
2206 musb->io.ep_offset = musb->ops->ep_offset;
2207 if (musb->ops->ep_select)
2208 musb->io.ep_select = musb->ops->ep_select;
2210 if (musb->ops->fifo_mode)
2211 fifo_mode = musb->ops->fifo_mode;
2215 if (musb->ops->fifo_offset)
2216 musb->io.fifo_offset = musb->ops->fifo_offset;
2218 musb->io.fifo_offset = musb_default_fifo_offset;
2220 if (musb->ops->busctl_offset)
2221 musb->io.busctl_offset = musb->ops->busctl_offset;
2223 musb->io.busctl_offset = musb_default_busctl_offset;
2225 if (musb->ops->readb)
2226 musb_readb = musb->ops->readb;
2227 if (musb->ops->writeb)
2228 musb_writeb = musb->ops->writeb;
2229 if (musb->ops->readw)
2230 musb_readw = musb->ops->readw;
2231 if (musb->ops->writew)
2232 musb_writew = musb->ops->writew;
2233 if (musb->ops->readl)
2234 musb_readl = musb->ops->readl;
2235 if (musb->ops->writel)
2236 musb_writel = musb->ops->writel;
2238 #ifndef CONFIG_MUSB_PIO_ONLY
2239 if (!musb->ops->dma_init || !musb->ops->dma_exit) {
2240 dev_err(dev, "DMA controller not set\n");
2244 musb_dma_controller_create = musb->ops->dma_init;
2245 musb_dma_controller_destroy = musb->ops->dma_exit;
2248 if (musb->ops->read_fifo)
2249 musb->io.read_fifo = musb->ops->read_fifo;
2251 musb->io.read_fifo = musb_default_read_fifo;
2253 if (musb->ops->write_fifo)
2254 musb->io.write_fifo = musb->ops->write_fifo;
2256 musb->io.write_fifo = musb_default_write_fifo;
2258 if (!musb->xceiv->io_ops) {
2259 musb->xceiv->io_dev = musb->controller;
2260 musb->xceiv->io_priv = musb->mregs;
2261 musb->xceiv->io_ops = &musb_ulpi_access;
2264 if (musb->ops->phy_callback)
2265 musb_phy_callback = musb->ops->phy_callback;
2268 * We need musb_read/write functions initialized for PM.
2269 * Note that at least 2430 glue needs autosuspend delay
2270 * somewhere above 300 ms for the hardware to idle properly
2271 * after disconnecting the cable in host mode. Let's use
2272 * 500 ms for some margin.
2274 pm_runtime_use_autosuspend(musb->controller);
2275 pm_runtime_set_autosuspend_delay(musb->controller, 500);
2276 pm_runtime_enable(musb->controller);
2277 pm_runtime_get_sync(musb->controller);
2279 status = usb_phy_init(musb->xceiv);
2281 goto err_usb_phy_init;
2283 if (use_dma && dev->dma_mask) {
2284 musb->dma_controller =
2285 musb_dma_controller_create(musb, musb->mregs);
2286 if (IS_ERR(musb->dma_controller)) {
2287 status = PTR_ERR(musb->dma_controller);
2292 /* be sure interrupts are disabled before connecting ISR */
2293 musb_platform_disable(musb);
2294 musb_disable_interrupts(musb);
2295 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2297 /* Init IRQ workqueue before request_irq */
2298 INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
2299 INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
2300 INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
2302 /* setup musb parts of the core (especially endpoints) */
2303 status = musb_core_init(plat->config->multipoint
2304 ? MUSB_CONTROLLER_MHDRC
2305 : MUSB_CONTROLLER_HDRC, musb);
2309 timer_setup(&musb->otg_timer, musb_otg_timer_func, 0);
2311 /* attach to the IRQ */
2312 if (request_irq(nIrq, musb->isr, IRQF_SHARED, dev_name(dev), musb)) {
2313 dev_err(dev, "request_irq %d failed!\n", nIrq);
2318 /* FIXME this handles wakeup irqs wrong */
2319 if (enable_irq_wake(nIrq) == 0) {
2321 device_init_wakeup(dev, 1);
2326 /* program PHY to use external vBus if required */
2327 if (plat->extvbus) {
2328 u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
2329 busctl |= MUSB_ULPI_USE_EXTVBUS;
2330 musb_write_ulpi_buscontrol(musb->mregs, busctl);
2333 if (musb->xceiv->otg->default_a) {
2334 MUSB_HST_MODE(musb);
2335 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2337 MUSB_DEV_MODE(musb);
2338 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
2341 switch (musb->port_mode) {
2342 case MUSB_PORT_MODE_HOST:
2343 status = musb_host_setup(musb, plat->power);
2346 status = musb_platform_set_mode(musb, MUSB_HOST);
2348 case MUSB_PORT_MODE_GADGET:
2349 status = musb_gadget_setup(musb);
2352 status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
2354 case MUSB_PORT_MODE_DUAL_ROLE:
2355 status = musb_host_setup(musb, plat->power);
2358 status = musb_gadget_setup(musb);
2360 musb_host_cleanup(musb);
2363 status = musb_platform_set_mode(musb, MUSB_OTG);
2366 dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
2373 status = musb_init_debugfs(musb);
2377 status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
2381 musb->is_initialized = 1;
2382 pm_runtime_mark_last_busy(musb->controller);
2383 pm_runtime_put_autosuspend(musb->controller);
2388 musb_exit_debugfs(musb);
2391 musb_gadget_cleanup(musb);
2392 musb_host_cleanup(musb);
2395 cancel_delayed_work_sync(&musb->irq_work);
2396 cancel_delayed_work_sync(&musb->finish_resume_work);
2397 cancel_delayed_work_sync(&musb->deassert_reset_work);
2398 if (musb->dma_controller)
2399 musb_dma_controller_destroy(musb->dma_controller);
2402 usb_phy_shutdown(musb->xceiv);
2405 pm_runtime_dont_use_autosuspend(musb->controller);
2406 pm_runtime_put_sync(musb->controller);
2407 pm_runtime_disable(musb->controller);
2411 device_init_wakeup(dev, 0);
2412 musb_platform_exit(musb);
2415 if (status != -EPROBE_DEFER)
2416 dev_err(musb->controller,
2417 "%s failed with status %d\n", __func__, status);
2427 /*-------------------------------------------------------------------------*/
2429 /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
2430 * bridge to a platform device; this driver then suffices.
2432 static int musb_probe(struct platform_device *pdev)
2434 struct device *dev = &pdev->dev;
2435 int irq = platform_get_irq_byname(pdev, "mc");
2436 struct resource *iomem;
2442 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2443 base = devm_ioremap_resource(dev, iomem);
2445 return PTR_ERR(base);
2447 return musb_init_controller(dev, irq, base);
2450 static int musb_remove(struct platform_device *pdev)
2452 struct device *dev = &pdev->dev;
2453 struct musb *musb = dev_to_musb(dev);
2454 unsigned long flags;
2456 /* this gets called on rmmod.
2457 * - Host mode: host may still be active
2458 * - Peripheral mode: peripheral is deactivated (or never-activated)
2459 * - OTG mode: both roles are deactivated (or never-activated)
2461 musb_exit_debugfs(musb);
2463 cancel_delayed_work_sync(&musb->irq_work);
2464 cancel_delayed_work_sync(&musb->finish_resume_work);
2465 cancel_delayed_work_sync(&musb->deassert_reset_work);
2466 pm_runtime_get_sync(musb->controller);
2467 musb_host_cleanup(musb);
2468 musb_gadget_cleanup(musb);
2470 musb_platform_disable(musb);
2471 spin_lock_irqsave(&musb->lock, flags);
2472 musb_disable_interrupts(musb);
2473 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2474 spin_unlock_irqrestore(&musb->lock, flags);
2476 pm_runtime_dont_use_autosuspend(musb->controller);
2477 pm_runtime_put_sync(musb->controller);
2478 pm_runtime_disable(musb->controller);
2479 musb_platform_exit(musb);
2480 musb_phy_callback = NULL;
2481 if (musb->dma_controller)
2482 musb_dma_controller_destroy(musb->dma_controller);
2483 usb_phy_shutdown(musb->xceiv);
2485 device_init_wakeup(dev, 0);
2491 static void musb_save_context(struct musb *musb)
2494 void __iomem *musb_base = musb->mregs;
2497 musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
2498 musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
2499 musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
2500 musb->context.power = musb_readb(musb_base, MUSB_POWER);
2501 musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
2502 musb->context.index = musb_readb(musb_base, MUSB_INDEX);
2503 musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
2505 for (i = 0; i < musb->config->num_eps; ++i) {
2506 struct musb_hw_ep *hw_ep;
2508 hw_ep = &musb->endpoints[i];
2516 musb_writeb(musb_base, MUSB_INDEX, i);
2517 musb->context.index_regs[i].txmaxp =
2518 musb_readw(epio, MUSB_TXMAXP);
2519 musb->context.index_regs[i].txcsr =
2520 musb_readw(epio, MUSB_TXCSR);
2521 musb->context.index_regs[i].rxmaxp =
2522 musb_readw(epio, MUSB_RXMAXP);
2523 musb->context.index_regs[i].rxcsr =
2524 musb_readw(epio, MUSB_RXCSR);
2526 if (musb->dyn_fifo) {
2527 musb->context.index_regs[i].txfifoadd =
2528 musb_read_txfifoadd(musb_base);
2529 musb->context.index_regs[i].rxfifoadd =
2530 musb_read_rxfifoadd(musb_base);
2531 musb->context.index_regs[i].txfifosz =
2532 musb_read_txfifosz(musb_base);
2533 musb->context.index_regs[i].rxfifosz =
2534 musb_read_rxfifosz(musb_base);
2537 musb->context.index_regs[i].txtype =
2538 musb_readb(epio, MUSB_TXTYPE);
2539 musb->context.index_regs[i].txinterval =
2540 musb_readb(epio, MUSB_TXINTERVAL);
2541 musb->context.index_regs[i].rxtype =
2542 musb_readb(epio, MUSB_RXTYPE);
2543 musb->context.index_regs[i].rxinterval =
2544 musb_readb(epio, MUSB_RXINTERVAL);
2546 musb->context.index_regs[i].txfunaddr =
2547 musb_read_txfunaddr(musb, i);
2548 musb->context.index_regs[i].txhubaddr =
2549 musb_read_txhubaddr(musb, i);
2550 musb->context.index_regs[i].txhubport =
2551 musb_read_txhubport(musb, i);
2553 musb->context.index_regs[i].rxfunaddr =
2554 musb_read_rxfunaddr(musb, i);
2555 musb->context.index_regs[i].rxhubaddr =
2556 musb_read_rxhubaddr(musb, i);
2557 musb->context.index_regs[i].rxhubport =
2558 musb_read_rxhubport(musb, i);
2562 static void musb_restore_context(struct musb *musb)
2565 void __iomem *musb_base = musb->mregs;
2569 musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
2570 musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
2571 musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
2573 /* Don't affect SUSPENDM/RESUME bits in POWER reg */
2574 power = musb_readb(musb_base, MUSB_POWER);
2575 power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
2576 musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
2577 power |= musb->context.power;
2578 musb_writeb(musb_base, MUSB_POWER, power);
2580 musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
2581 musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
2582 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
2583 if (musb->context.devctl & MUSB_DEVCTL_SESSION)
2584 musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
2586 for (i = 0; i < musb->config->num_eps; ++i) {
2587 struct musb_hw_ep *hw_ep;
2589 hw_ep = &musb->endpoints[i];
2597 musb_writeb(musb_base, MUSB_INDEX, i);
2598 musb_writew(epio, MUSB_TXMAXP,
2599 musb->context.index_regs[i].txmaxp);
2600 musb_writew(epio, MUSB_TXCSR,
2601 musb->context.index_regs[i].txcsr);
2602 musb_writew(epio, MUSB_RXMAXP,
2603 musb->context.index_regs[i].rxmaxp);
2604 musb_writew(epio, MUSB_RXCSR,
2605 musb->context.index_regs[i].rxcsr);
2607 if (musb->dyn_fifo) {
2608 musb_write_txfifosz(musb_base,
2609 musb->context.index_regs[i].txfifosz);
2610 musb_write_rxfifosz(musb_base,
2611 musb->context.index_regs[i].rxfifosz);
2612 musb_write_txfifoadd(musb_base,
2613 musb->context.index_regs[i].txfifoadd);
2614 musb_write_rxfifoadd(musb_base,
2615 musb->context.index_regs[i].rxfifoadd);
2618 musb_writeb(epio, MUSB_TXTYPE,
2619 musb->context.index_regs[i].txtype);
2620 musb_writeb(epio, MUSB_TXINTERVAL,
2621 musb->context.index_regs[i].txinterval);
2622 musb_writeb(epio, MUSB_RXTYPE,
2623 musb->context.index_regs[i].rxtype);
2624 musb_writeb(epio, MUSB_RXINTERVAL,
2626 musb->context.index_regs[i].rxinterval);
2627 musb_write_txfunaddr(musb, i,
2628 musb->context.index_regs[i].txfunaddr);
2629 musb_write_txhubaddr(musb, i,
2630 musb->context.index_regs[i].txhubaddr);
2631 musb_write_txhubport(musb, i,
2632 musb->context.index_regs[i].txhubport);
2634 musb_write_rxfunaddr(musb, i,
2635 musb->context.index_regs[i].rxfunaddr);
2636 musb_write_rxhubaddr(musb, i,
2637 musb->context.index_regs[i].rxhubaddr);
2638 musb_write_rxhubport(musb, i,
2639 musb->context.index_regs[i].rxhubport);
2641 musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
2644 static int musb_suspend(struct device *dev)
2646 struct musb *musb = dev_to_musb(dev);
2647 unsigned long flags;
2650 ret = pm_runtime_get_sync(dev);
2652 pm_runtime_put_noidle(dev);
2656 musb_platform_disable(musb);
2657 musb_disable_interrupts(musb);
2659 musb->flush_irq_work = true;
2660 while (flush_delayed_work(&musb->irq_work))
2662 musb->flush_irq_work = false;
2664 if (!(musb->io.quirks & MUSB_PRESERVE_SESSION))
2665 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2667 WARN_ON(!list_empty(&musb->pending_list));
2669 spin_lock_irqsave(&musb->lock, flags);
2671 if (is_peripheral_active(musb)) {
2672 /* FIXME force disconnect unless we know USB will wake
2673 * the system up quickly enough to respond ...
2675 } else if (is_host_active(musb)) {
2676 /* we know all the children are suspended; sometimes
2677 * they will even be wakeup-enabled.
2681 musb_save_context(musb);
2683 spin_unlock_irqrestore(&musb->lock, flags);
2687 static int musb_resume(struct device *dev)
2689 struct musb *musb = dev_to_musb(dev);
2690 unsigned long flags;
2696 * For static cmos like DaVinci, register values were preserved
2697 * unless for some reason the whole soc powered down or the USB
2698 * module got reset through the PSC (vs just being disabled).
2700 * For the DSPS glue layer though, a full register restore has to
2701 * be done. As it shouldn't harm other platforms, we do it
2705 musb_restore_context(musb);
2707 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2708 mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
2709 if ((devctl & mask) != (musb->context.devctl & mask))
2710 musb->port1_status = 0;
2714 spin_lock_irqsave(&musb->lock, flags);
2715 error = musb_run_resume_work(musb);
2717 dev_err(musb->controller, "resume work failed with %i\n",
2719 spin_unlock_irqrestore(&musb->lock, flags);
2721 pm_runtime_mark_last_busy(dev);
2722 pm_runtime_put_autosuspend(dev);
2727 static int musb_runtime_suspend(struct device *dev)
2729 struct musb *musb = dev_to_musb(dev);
2731 musb_save_context(musb);
2732 musb->is_runtime_suspended = 1;
2737 static int musb_runtime_resume(struct device *dev)
2739 struct musb *musb = dev_to_musb(dev);
2740 unsigned long flags;
2744 * When pm_runtime_get_sync called for the first time in driver
2745 * init, some of the structure is still not initialized which is
2746 * used in restore function. But clock needs to be
2747 * enabled before any register access, so
2748 * pm_runtime_get_sync has to be called.
2749 * Also context restore without save does not make
2752 if (!musb->is_initialized)
2755 musb_restore_context(musb);
2757 spin_lock_irqsave(&musb->lock, flags);
2758 error = musb_run_resume_work(musb);
2760 dev_err(musb->controller, "resume work failed with %i\n",
2762 musb->is_runtime_suspended = 0;
2763 spin_unlock_irqrestore(&musb->lock, flags);
2768 static const struct dev_pm_ops musb_dev_pm_ops = {
2769 .suspend = musb_suspend,
2770 .resume = musb_resume,
2771 .runtime_suspend = musb_runtime_suspend,
2772 .runtime_resume = musb_runtime_resume,
2775 #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
2777 #define MUSB_DEV_PM_OPS NULL
2780 static struct platform_driver musb_driver = {
2782 .name = (char *)musb_driver_name,
2783 .bus = &platform_bus_type,
2784 .pm = MUSB_DEV_PM_OPS,
2786 .probe = musb_probe,
2787 .remove = musb_remove,
2790 module_platform_driver(musb_driver);