X-Git-Url: https://git.librecmc.org/?a=blobdiff_plain;f=drivers%2Fusb%2Fgadget%2Fci_udc.c;h=2b9dbf06b991d527965a91e0694cc18c805725fc;hb=83d290c56fab2d38cd1ab4c4cc7099559c1d5046;hp=14b1e9b8bf1117037d4b21a41c0fd76a50f1a171;hpb=e7538fee991308c16f76f7eca2517089f2a30ff4;p=oweals%2Fu-boot.git diff --git a/drivers/usb/gadget/ci_udc.c b/drivers/usb/gadget/ci_udc.c index 14b1e9b8bf..2b9dbf06b9 100644 --- a/drivers/usb/gadget/ci_udc.c +++ b/drivers/usb/gadget/ci_udc.c @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2011, Marvell Semiconductor Inc. * Lei Wen * - * SPDX-License-Identifier: GPL-2.0+ - * * Back ported to the 8xx platform (from the 8260 platform) by * Murray.Jensen@cmst.csiro.au, 27-Jan-01. */ @@ -14,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -34,6 +33,24 @@ #error This driver can not work on systems with caches longer than 128b #endif +/* + * Every QTD must be individually aligned, since we can program any + * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN, + * and the USB HW requires 32-byte alignment. Align to both: + */ +#define ILIST_ALIGN roundup(ARCH_DMA_MINALIGN, 32) +/* Each QTD is this size */ +#define ILIST_ENT_RAW_SZ sizeof(struct ept_queue_item) +/* + * Align the size of the QTD too, so we can add this value to each + * QTD's address to get another aligned address. + */ +#define ILIST_ENT_SZ roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN) +/* For each endpoint, we need 2 QTDs, one for each of IN and OUT */ +#define ILIST_SZ (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ) + +#define EP_MAX_LENGTH_TRANSFER 0x4000 + #ifndef DEBUG #define DBG(x...) do {} while (0) #else @@ -56,14 +73,7 @@ static const char *reqname(unsigned r) } #endif -static struct usb_endpoint_descriptor ep0_out_desc = { - .bLength = sizeof(struct usb_endpoint_descriptor), - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = 0, - .bmAttributes = USB_ENDPOINT_XFER_CONTROL, -}; - -static struct usb_endpoint_descriptor ep0_in_desc = { +static struct usb_endpoint_descriptor ep0_desc = { .bLength = sizeof(struct usb_endpoint_descriptor), .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, @@ -76,6 +86,7 @@ static int ci_ep_enable(struct usb_ep *ep, static int ci_ep_disable(struct usb_ep *ep); static int ci_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags); +static int ci_ep_dequeue(struct usb_ep *ep, struct usb_request *req); static struct usb_request * ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags); static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req); @@ -88,18 +99,34 @@ static struct usb_ep_ops ci_ep_ops = { .enable = ci_ep_enable, .disable = ci_ep_disable, .queue = ci_ep_queue, + .dequeue = ci_ep_dequeue, .alloc_request = ci_ep_alloc_request, .free_request = ci_ep_free_request, }; /* Init values for USB endpoints. */ -static const struct usb_ep ci_ep_init[2] = { +static const struct usb_ep ci_ep_init[5] = { [0] = { /* EP 0 */ .maxpacket = 64, .name = "ep0", .ops = &ci_ep_ops, }, - [1] = { /* EP 1..n */ + [1] = { + .maxpacket = 512, + .name = "ep1in-bulk", + .ops = &ci_ep_ops, + }, + [2] = { + .maxpacket = 512, + .name = "ep2out-bulk", + .ops = &ci_ep_ops, + }, + [3] = { + .maxpacket = 512, + .name = "ep3in-int", + .ops = &ci_ep_ops, + }, + [4] = { .maxpacket = 512, .name = "ep-", .ops = &ci_ep_ops, @@ -137,7 +164,9 @@ static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in) */ static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in) { - return controller.items[(ep_num * 2) + dir_in]; + int index = (ep_num * 2) + dir_in; + uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ); + return (struct ept_queue_item *)imem; } /** @@ -149,8 +178,8 @@ static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in) static void ci_flush_qh(int ep_num) { struct ept_queue_head *head = ci_get_qh(ep_num, 0); - const uint32_t start = (uint32_t)head; - const uint32_t end = start + 2 * sizeof(*head); + const unsigned long start = (unsigned long)head; + const unsigned long end = start + 2 * sizeof(*head); flush_dcache_range(start, end); } @@ -164,8 +193,8 @@ static void ci_flush_qh(int ep_num) static void ci_invalidate_qh(int ep_num) { struct ept_queue_head *head = ci_get_qh(ep_num, 0); - uint32_t start = (uint32_t)head; - uint32_t end = start + 2 * sizeof(*head); + unsigned long start = (unsigned long)head; + unsigned long end = start + 2 * sizeof(*head); invalidate_dcache_range(start, end); } @@ -179,13 +208,25 @@ static void ci_invalidate_qh(int ep_num) static void ci_flush_qtd(int ep_num) { struct ept_queue_item *item = ci_get_qtd(ep_num, 0); - const uint32_t start = (uint32_t)item; - const uint32_t end_raw = start + 2 * sizeof(*item); - const uint32_t end = roundup(end_raw, ARCH_DMA_MINALIGN); + const unsigned long start = (unsigned long)item; + const unsigned long end = start + 2 * ILIST_ENT_SZ; flush_dcache_range(start, end); } +/** + * ci_flush_td - flush cache over queue item + * @td: td pointer + * + * This function flushes cache for particular transfer descriptor. + */ +static void ci_flush_td(struct ept_queue_item *td) +{ + const unsigned long start = (unsigned long)td; + const unsigned long end = (unsigned long)td + ILIST_ENT_SZ; + flush_dcache_range(start, end); +} + /** * ci_invalidate_qtd - invalidate cache over queue item * @ep_num: Endpoint number @@ -195,10 +236,22 @@ static void ci_flush_qtd(int ep_num) static void ci_invalidate_qtd(int ep_num) { struct ept_queue_item *item = ci_get_qtd(ep_num, 0); - const uint32_t start = (uint32_t)item; - const uint32_t end_raw = start + 2 * sizeof(*item); - const uint32_t end = roundup(end_raw, ARCH_DMA_MINALIGN); + const unsigned long start = (unsigned long)item; + const unsigned long end = start + 2 * ILIST_ENT_SZ; + + invalidate_dcache_range(start, end); +} +/** + * ci_invalidate_td - invalidate cache over queue item + * @td: td pointer + * + * This function invalidates cache for particular transfer descriptor. + */ +static void ci_invalidate_td(struct ept_queue_item *td) +{ + const unsigned long start = (unsigned long)td; + const unsigned long end = start + ILIST_ENT_SZ; invalidate_dcache_range(start, end); } @@ -206,12 +259,45 @@ static struct usb_request * ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags) { struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); - return &ci_ep->req; + int num = -1; + struct ci_req *ci_req; + + if (ci_ep->desc) + num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; + + if (num == 0 && controller.ep0_req) + return &controller.ep0_req->req; + + ci_req = calloc(1, sizeof(*ci_req)); + if (!ci_req) + return NULL; + + INIT_LIST_HEAD(&ci_req->queue); + + if (num == 0) + controller.ep0_req = ci_req; + + return &ci_req->req; } -static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req) +static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req) { - return; + struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); + struct ci_req *ci_req = container_of(req, struct ci_req, req); + int num = -1; + + if (ci_ep->desc) + num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; + + if (num == 0) { + if (!controller.ep0_req) + return; + controller.ep0_req = 0; + } + + if (ci_req->b_buf) + free(ci_req->b_buf); + free(ci_req); } static void ep_enable(int num, int in, int maxpacket) @@ -267,96 +353,167 @@ static int ci_ep_disable(struct usb_ep *ep) return 0; } -static int ci_bounce(struct ci_ep *ep, int in) +static int ci_bounce(struct ci_req *ci_req, int in) { - uint32_t addr = (uint32_t)ep->req.buf; - uint32_t ba; + struct usb_request *req = &ci_req->req; + unsigned long addr = (unsigned long)req->buf; + unsigned long hwaddr; + uint32_t aligned_used_len; /* Input buffer address is not aligned. */ if (addr & (ARCH_DMA_MINALIGN - 1)) goto align; /* Input buffer length is not aligned. */ - if (ep->req.length & (ARCH_DMA_MINALIGN - 1)) + if (req->length & (ARCH_DMA_MINALIGN - 1)) goto align; /* The buffer is well aligned, only flush cache. */ - ep->b_len = ep->req.length; - ep->b_buf = ep->req.buf; + ci_req->hw_len = req->length; + ci_req->hw_buf = req->buf; goto flush; align: - /* Use internal buffer for small payloads. */ - if (ep->req.length <= 64) { - ep->b_len = 64; - ep->b_buf = ep->b_fast; - } else { - ep->b_len = roundup(ep->req.length, ARCH_DMA_MINALIGN); - ep->b_buf = memalign(ARCH_DMA_MINALIGN, ep->b_len); - if (!ep->b_buf) + if (ci_req->b_buf && req->length > ci_req->b_len) { + free(ci_req->b_buf); + ci_req->b_buf = 0; + } + if (!ci_req->b_buf) { + ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN); + ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len); + if (!ci_req->b_buf) return -ENOMEM; } + ci_req->hw_len = ci_req->b_len; + ci_req->hw_buf = ci_req->b_buf; + if (in) - memcpy(ep->b_buf, ep->req.buf, ep->req.length); + memcpy(ci_req->hw_buf, req->buf, req->length); flush: - ba = (uint32_t)ep->b_buf; - flush_dcache_range(ba, ba + ep->b_len); + hwaddr = (unsigned long)ci_req->hw_buf; + aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN); + flush_dcache_range(hwaddr, hwaddr + aligned_used_len); return 0; } -static void ci_debounce(struct ci_ep *ep, int in) +static void ci_debounce(struct ci_req *ci_req, int in) { - uint32_t addr = (uint32_t)ep->req.buf; - uint32_t ba = (uint32_t)ep->b_buf; + struct usb_request *req = &ci_req->req; + unsigned long addr = (unsigned long)req->buf; + unsigned long hwaddr = (unsigned long)ci_req->hw_buf; + uint32_t aligned_used_len; - if (in) { - if (addr == ba) - return; /* not a bounce */ - goto free; - } - invalidate_dcache_range(ba, ba + ep->b_len); + if (in) + return; + + aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN); + invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len); - if (addr == ba) - return; /* not a bounce */ + if (addr == hwaddr) + return; /* not a bounce */ - memcpy(ep->req.buf, ep->b_buf, ep->req.length); -free: - /* Large payloads use allocated buffer, free it. */ - if (ep->b_buf != ep->b_fast) - free(ep->b_buf); + memcpy(req->buf, ci_req->hw_buf, req->actual); } -static int ci_ep_queue(struct usb_ep *ep, - struct usb_request *req, gfp_t gfp_flags) +static void ci_ep_submit_next_request(struct ci_ep *ci_ep) { - struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; struct ept_queue_item *item; struct ept_queue_head *head; - int bit, num, len, in, ret; + int bit, num, len, in; + struct ci_req *ci_req; + u8 *buf; + uint32_t len_left, len_this_dtd; + struct ept_queue_item *dtd, *qtd; + + ci_ep->req_primed = true; + num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; item = ci_get_qtd(num, in); head = ci_get_qh(num, in); - len = req->length; - ret = ci_bounce(ci_ep, in); - if (ret) - return ret; + ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); + len = ci_req->req.length; + + head->next = (unsigned long)item; + head->info = 0; + + ci_req->dtd_count = 0; + buf = ci_req->hw_buf; + len_left = len; + dtd = item; + + do { + len_this_dtd = min(len_left, (unsigned)EP_MAX_LENGTH_TRANSFER); + + dtd->info = INFO_BYTES(len_this_dtd) | INFO_ACTIVE; + dtd->page0 = (unsigned long)buf; + dtd->page1 = ((unsigned long)buf & 0xfffff000) + 0x1000; + dtd->page2 = ((unsigned long)buf & 0xfffff000) + 0x2000; + dtd->page3 = ((unsigned long)buf & 0xfffff000) + 0x3000; + dtd->page4 = ((unsigned long)buf & 0xfffff000) + 0x4000; + + len_left -= len_this_dtd; + buf += len_this_dtd; + + if (len_left) { + qtd = (struct ept_queue_item *) + memalign(ILIST_ALIGN, ILIST_ENT_SZ); + dtd->next = (unsigned long)qtd; + dtd = qtd; + memset(dtd, 0, ILIST_ENT_SZ); + } + + ci_req->dtd_count++; + } while (len_left); + + item = dtd; + /* + * When sending the data for an IN transaction, the attached host + * knows that all data for the IN is sent when one of the following + * occurs: + * a) A zero-length packet is transmitted. + * b) A packet with length that isn't an exact multiple of the ep's + * maxpacket is transmitted. + * c) Enough data is sent to exactly fill the host's maximum expected + * IN transaction size. + * + * One of these conditions MUST apply at the end of an IN transaction, + * or the transaction will not be considered complete by the host. If + * none of (a)..(c) already applies, then we must force (a) to apply + * by explicitly sending an extra zero-length packet. + */ + /* IN !a !b !c */ + if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) { + /* + * Each endpoint has 2 items allocated, even though typically + * only 1 is used at a time since either an IN or an OUT but + * not both is queued. For an IN transaction, item currently + * points at the second of these items, so we know that we + * can use the other to transmit the extra zero-length packet. + */ + struct ept_queue_item *other_item = ci_get_qtd(num, 0); + item->next = (unsigned long)other_item; + item = other_item; + item->info = INFO_ACTIVE; + } item->next = TERMINATE; - item->info = INFO_BYTES(len) | INFO_IOC | INFO_ACTIVE; - item->page0 = (uint32_t)ci_ep->b_buf; - item->page1 = ((uint32_t)ci_ep->b_buf & 0xfffff000) + 0x1000; + item->info |= INFO_IOC; + ci_flush_qtd(num); - head->next = (unsigned) item; - head->info = 0; + item = (struct ept_queue_item *)(unsigned long)head->next; + while (item->next != TERMINATE) { + ci_flush_td((struct ept_queue_item *)(unsigned long)item->next); + item = (struct ept_queue_item *)(unsigned long)item->next; + } - DBG("ept%d %s queue len %x, buffer %p\n", - num, in ? "in" : "out", len, ci_ep->b_buf); + DBG("ept%d %s queue len %x, req %p, buffer %p\n", + num, in ? "in" : "out", len, ci_req, ci_req->hw_buf); ci_flush_qh(num); if (in) @@ -365,36 +522,134 @@ static int ci_ep_queue(struct usb_ep *ep, bit = EPT_RX(num); writel(bit, &udc->epprime); +} + +static int ci_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) +{ + struct ci_ep *ci_ep = container_of(_ep, struct ci_ep, ep); + struct ci_req *ci_req; + + list_for_each_entry(ci_req, &ci_ep->queue, queue) { + if (&ci_req->req == _req) + break; + } + + if (&ci_req->req != _req) + return -EINVAL; + + list_del_init(&ci_req->queue); + + if (ci_req->req.status == -EINPROGRESS) { + ci_req->req.status = -ECONNRESET; + if (ci_req->req.complete) + ci_req->req.complete(_ep, _req); + } return 0; } -static void handle_ep_complete(struct ci_ep *ep) +static int ci_ep_queue(struct usb_ep *ep, + struct usb_request *req, gfp_t gfp_flags) { - struct ept_queue_item *item; - int num, in, len; - num = ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; - in = (ep->desc->bEndpointAddress & USB_DIR_IN) != 0; - if (num == 0) - ep->desc = &ep0_out_desc; + struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); + struct ci_req *ci_req = container_of(req, struct ci_req, req); + int in, ret; + int __maybe_unused num; + + num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; + in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; + + if (!num && ci_ep->req_primed) { + /* + * The flipping of ep0 between IN and OUT relies on + * ci_ep_queue consuming the current IN/OUT setting + * immediately. If this is deferred to a later point when the + * req is pulled out of ci_req->queue, then the IN/OUT setting + * may have been changed since the req was queued, and state + * will get out of sync. This condition doesn't occur today, + * but could if bugs were introduced later, and this error + * check will save a lot of debugging time. + */ + printf("%s: ep0 transaction already in progress\n", __func__); + return -EPROTO; + } + + ret = ci_bounce(ci_req, in); + if (ret) + return ret; + + DBG("ept%d %s pre-queue req %p, buffer %p\n", + num, in ? "in" : "out", ci_req, ci_req->hw_buf); + list_add_tail(&ci_req->queue, &ci_ep->queue); + + if (!ci_ep->req_primed) + ci_ep_submit_next_request(ci_ep); + + return 0; +} + +static void flip_ep0_direction(void) +{ + if (ep0_desc.bEndpointAddress == USB_DIR_IN) { + DBG("%s: Flipping ep0 to OUT\n", __func__); + ep0_desc.bEndpointAddress = 0; + } else { + DBG("%s: Flipping ep0 to IN\n", __func__); + ep0_desc.bEndpointAddress = USB_DIR_IN; + } +} + +static void handle_ep_complete(struct ci_ep *ci_ep) +{ + struct ept_queue_item *item, *next_td; + int num, in, len, j; + struct ci_req *ci_req; + + num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; + in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; item = ci_get_qtd(num, in); ci_invalidate_qtd(num); + ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); + + next_td = item; + len = 0; + for (j = 0; j < ci_req->dtd_count; j++) { + ci_invalidate_td(next_td); + item = next_td; + len += (item->info >> 16) & 0x7fff; + if (item->info & 0xff) + printf("EP%d/%s FAIL info=%x pg0=%x\n", + num, in ? "in" : "out", item->info, item->page0); + if (j != ci_req->dtd_count - 1) + next_td = (struct ept_queue_item *)(unsigned long) + item->next; + if (j != 0) + free(item); + } - if (item->info & 0xff) - printf("EP%d/%s FAIL info=%x pg0=%x\n", - num, in ? "in" : "out", item->info, item->page0); + list_del_init(&ci_req->queue); + ci_ep->req_primed = false; - len = (item->info >> 16) & 0x7fff; - ep->req.length -= len; - ci_debounce(ep, in); + if (!list_empty(&ci_ep->queue)) + ci_ep_submit_next_request(ci_ep); - DBG("ept%d %s complete %x\n", - num, in ? "in" : "out", len); - ep->req.complete(&ep->ep, &ep->req); - if (num == 0) { - ep->req.length = 0; - usb_ep_queue(&ep->ep, &ep->req, 0); - ep->desc = &ep0_in_desc; + ci_req->req.actual = ci_req->req.length - len; + ci_debounce(ci_req, in); + + DBG("ept%d %s req %p, complete %x\n", + num, in ? "in" : "out", ci_req, len); + if (num != 0 || controller.ep0_data_phase) + ci_req->req.complete(&ci_ep->ep, &ci_req->req); + if (num == 0 && controller.ep0_data_phase) { + /* + * Data Stage is complete, so flip ep0 dir for Status Stage, + * which always transfers a packet in the opposite direction. + */ + DBG("%s: flip ep0 dir for Status Stage\n", __func__); + flip_ep0_direction(); + controller.ep0_data_phase = false; + ci_req->req.length = 0; + usb_ep_queue(&ci_ep->ep, &ci_req->req, 0); } } @@ -402,20 +657,50 @@ static void handle_ep_complete(struct ci_ep *ep) static void handle_setup(void) { - struct usb_request *req = &controller.ep[0].req; + struct ci_ep *ci_ep = &controller.ep[0]; + struct ci_req *ci_req; + struct usb_request *req; struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; struct ept_queue_head *head; struct usb_ctrlrequest r; int status = 0; int num, in, _num, _in, i; char *buf; + + ci_req = controller.ep0_req; + req = &ci_req->req; head = ci_get_qh(0, 0); /* EP0 OUT */ ci_invalidate_qh(0); memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest)); +#ifdef CONFIG_CI_UDC_HAS_HOSTPC + writel(EPT_RX(0), &udc->epsetupstat); +#else writel(EPT_RX(0), &udc->epstat); - DBG("handle setup %s, %x, %x index %x value %x\n", reqname(r.bRequest), - r.bRequestType, r.bRequest, r.wIndex, r.wValue); +#endif + DBG("handle setup %s, %x, %x index %x value %x length %x\n", + reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex, + r.wValue, r.wLength); + + /* Set EP0 dir for Data Stage based on Setup Stage data */ + if (r.bRequestType & USB_DIR_IN) { + DBG("%s: Set ep0 to IN for Data Stage\n", __func__); + ep0_desc.bEndpointAddress = USB_DIR_IN; + } else { + DBG("%s: Set ep0 to OUT for Data Stage\n", __func__); + ep0_desc.bEndpointAddress = 0; + } + if (r.wLength) { + controller.ep0_data_phase = true; + } else { + /* 0 length -> no Data Stage. Flip dir for Status Stage */ + DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__); + flip_ep0_direction(); + controller.ep0_data_phase = false; + } + + list_del_init(&ci_req->queue); + ci_ep->req_primed = false; switch (SETUP(r.bRequestType, r.bRequest)) { case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE): @@ -480,6 +765,9 @@ static void stop_activity(void) struct ept_queue_head *head; struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; writel(readl(&udc->epcomp), &udc->epcomp); +#ifdef CONFIG_CI_UDC_HAS_HOSTPC + writel(readl(&udc->epsetupstat), &udc->epsetupstat); +#endif writel(readl(&udc->epstat), &udc->epstat); writel(0xffffffff, &udc->epflush); @@ -521,7 +809,11 @@ void udc_irq(void) int max = 64; int speed = USB_SPEED_FULL; +#ifdef CONFIG_CI_UDC_HAS_HOSTPC + bit = (readl(&udc->hostpc1_devlc) >> 25) & 3; +#else bit = (readl(&udc->portsc) >> 26) & 3; +#endif DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full"); if (bit == 2) { speed = USB_SPEED_HIGH; @@ -538,7 +830,11 @@ void udc_irq(void) printf("\n", readl(&udc->epcomp)); if ((n & STS_UI) || (n & STS_UEI)) { +#ifdef CONFIG_CI_UDC_HAS_HOSTPC + n = readl(&udc->epsetupstat); +#else n = readl(&udc->epstat); +#endif if (n & EPT_RX(0)) handle_setup(); @@ -560,7 +856,7 @@ void udc_irq(void) } } -int usb_gadget_handle_interrupts(void) +int usb_gadget_handle_interrupts(int index) { u32 value; struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; @@ -572,6 +868,17 @@ int usb_gadget_handle_interrupts(void) return value; } +void udc_disconnect(void) +{ + struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; + /* disable pullup */ + stop_activity(); + writel(USBCMD_FS2, &udc->usbcmd); + udelay(800); + if (controller.driver) + controller.driver->disconnect(&controller.gadget); +} + static int ci_pullup(struct usb_gadget *gadget, int is_on) { struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; @@ -580,41 +887,30 @@ static int ci_pullup(struct usb_gadget *gadget, int is_on) writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd); udelay(200); - writel((unsigned)controller.epts, &udc->epinitaddr); + writel((unsigned long)controller.epts, &udc->epinitaddr); /* select DEVICE mode */ writel(USBMODE_DEVICE, &udc->usbmode); +#if !defined(CONFIG_USB_GADGET_DUALSPEED) + /* Port force Full-Speed Connect */ + setbits_le32(&udc->portsc, PFSC); +#endif + writel(0xffffffff, &udc->epflush); /* Turn on the USB connection by enabling the pullup resistor */ writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd); } else { - stop_activity(); - writel(USBCMD_FS2, &udc->usbcmd); - udelay(800); - if (controller.driver) - controller.driver->disconnect(gadget); + udc_disconnect(); } return 0; } -void udc_disconnect(void) -{ - struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; - /* disable pullup */ - stop_activity(); - writel(USBCMD_FS2, &udc->usbcmd); - udelay(800); - if (controller.driver) - controller.driver->disconnect(&controller.gadget); -} - static int ci_udc_probe(void) { struct ept_queue_head *head; - uint8_t *imem; int i; const int num = 2 * NUM_ENDPOINTS; @@ -624,29 +920,18 @@ static int ci_udc_probe(void) const int eplist_raw_sz = num * sizeof(struct ept_queue_head); const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN); - const int ilist_align = roundup(ARCH_DMA_MINALIGN, 32); - const int ilist_ent_raw_sz = 2 * sizeof(struct ept_queue_item); - const int ilist_ent_sz = roundup(ilist_ent_raw_sz, ARCH_DMA_MINALIGN); - const int ilist_sz = NUM_ENDPOINTS * ilist_ent_sz; - /* The QH list must be aligned to 4096 bytes. */ controller.epts = memalign(eplist_align, eplist_sz); if (!controller.epts) return -ENOMEM; memset(controller.epts, 0, eplist_sz); - /* - * Each qTD item must be 32-byte aligned, each qTD touple must be - * cacheline aligned. There are two qTD items for each endpoint and - * only one of them is used for the endpoint at time, so we can group - * them together. - */ - controller.items_mem = memalign(ilist_align, ilist_sz); + controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ); if (!controller.items_mem) { free(controller.epts); return -ENOMEM; } - memset(controller.items_mem, 0, ilist_sz); + memset(controller.items_mem, 0, ILIST_SZ); for (i = 0; i < 2 * NUM_ENDPOINTS; i++) { /* @@ -666,15 +951,9 @@ static int ci_udc_probe(void) head->next = TERMINATE; head->info = 0; - imem = controller.items_mem + ((i >> 1) * ilist_ent_sz); - if (i & 1) - imem += sizeof(struct ept_queue_item); - - controller.items[i] = (struct ept_queue_item *)imem; - if (i & 1) { - ci_flush_qh(i - 1); - ci_flush_qtd(i - 1); + ci_flush_qh(i / 2); + ci_flush_qtd(i / 2); } } @@ -682,24 +961,44 @@ static int ci_udc_probe(void) /* Init EP 0 */ memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init)); - controller.ep[0].desc = &ep0_in_desc; + controller.ep[0].desc = &ep0_desc; + INIT_LIST_HEAD(&controller.ep[0].queue); + controller.ep[0].req_primed = false; controller.gadget.ep0 = &controller.ep[0].ep; INIT_LIST_HEAD(&controller.gadget.ep0->ep_list); - /* Init EP 1..n */ - for (i = 1; i < NUM_ENDPOINTS; i++) { - memcpy(&controller.ep[i].ep, &ci_ep_init[1], + /* Init EP 1..3 */ + for (i = 1; i < 4; i++) { + memcpy(&controller.ep[i].ep, &ci_ep_init[i], + sizeof(*ci_ep_init)); + INIT_LIST_HEAD(&controller.ep[i].queue); + controller.ep[i].req_primed = false; + list_add_tail(&controller.ep[i].ep.ep_list, + &controller.gadget.ep_list); + } + + /* Init EP 4..n */ + for (i = 4; i < NUM_ENDPOINTS; i++) { + memcpy(&controller.ep[i].ep, &ci_ep_init[4], sizeof(*ci_ep_init)); + INIT_LIST_HEAD(&controller.ep[i].queue); + controller.ep[i].req_primed = false; list_add_tail(&controller.ep[i].ep.ep_list, &controller.gadget.ep_list); } + ci_ep_alloc_request(&controller.ep[0].ep, 0); + if (!controller.ep0_req) { + free(controller.items_mem); + free(controller.epts); + return -ENOMEM; + } + return 0; } int usb_gadget_register_driver(struct usb_gadget_driver *driver) { - struct ci_udc *udc; int ret; if (!driver) @@ -709,16 +1008,18 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver) if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH) return -EINVAL; +#ifdef CONFIG_DM_USB + ret = usb_setup_ehci_gadget(&controller.ctrl); +#else ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl); +#endif if (ret) return ret; ret = ci_udc_probe(); - if (!ret) { - udc = (struct ci_udc *)controller.ctrl->hcor; - - /* select ULPI phy */ - writel(PTS(PTS_ENABLE) | PFSC, &udc->portsc); + if (ret) { + DBG("udc probe failed, returned %d\n", ret); + return ret; } ret = driver->bind(&controller.gadget); @@ -733,5 +1034,21 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver) int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) { + udc_disconnect(); + + driver->unbind(&controller.gadget); + controller.driver = NULL; + + ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req); + free(controller.items_mem); + free(controller.epts); + return 0; } + +bool dfu_usb_get_reset(void) +{ + struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; + + return !!(readl(&udc->usbsts) & STS_URI); +}