3 * Data transfer and URB enqueing
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
33 * Sounds simple, it is a pain to implement.
40 * LIFE CYCLE / STATE DIAGRAM
44 * THIS CODE IS DISGUSTING
46 * Warned you are; it's my second try and still not happy with it.
52 * - Supports DMA xfers, control, bulk and maybe interrupt
54 * - Does not recycle unused rpipes
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
62 * Two methods it could be done:
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/slab.h>
85 #include <linux/hash.h>
86 #include <linux/ratelimit.h>
87 #include <linux/export.h>
93 /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
109 static void wa_xfer_delayed_run(struct wa_rpipe *);
112 * Life cycle governed by 'struct urb' (the refcount of the struct is
113 * that of the 'struct urb' and usb_free_urb() would free the whole
118 struct urb *dto_urb; /* for data output? */
119 struct list_head list_node; /* for rpipe->req_list */
120 struct wa_xfer *xfer; /* out xfer */
121 u8 index; /* which segment we are */
122 enum wa_seg_status status;
123 ssize_t result; /* bytes xfered or error */
124 struct wa_xfer_hdr xfer_hdr;
125 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
128 static void wa_seg_init(struct wa_seg *seg)
130 /* usb_init_urb() repeats a lot of work, so we do it here */
131 kref_init(&seg->urb.kref);
135 * Protected by xfer->lock
140 struct list_head list_node;
144 struct wahc *wa; /* Wire adapter we are plugged to */
145 struct usb_host_endpoint *ep;
146 struct urb *urb; /* URB we are transferring for */
147 struct wa_seg **seg; /* transfer segments */
148 u8 segs, segs_submitted, segs_done;
149 unsigned is_inbound:1;
154 gfp_t gfp; /* allocation mask */
156 struct wusb_dev *wusb_dev; /* for activity timestamps */
159 static inline void wa_xfer_init(struct wa_xfer *xfer)
161 kref_init(&xfer->refcnt);
162 INIT_LIST_HEAD(&xfer->list_node);
163 spin_lock_init(&xfer->lock);
167 * Destroy a transfer structure
169 * Note that the xfer->seg[index] thingies follow the URB life cycle,
170 * so we need to put them, not free them.
172 static void wa_xfer_destroy(struct kref *_xfer)
174 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
177 for (cnt = 0; cnt < xfer->segs; cnt++) {
178 if (xfer->is_inbound)
179 usb_put_urb(xfer->seg[cnt]->dto_urb);
180 usb_put_urb(&xfer->seg[cnt]->urb);
186 static void wa_xfer_get(struct wa_xfer *xfer)
188 kref_get(&xfer->refcnt);
191 static void wa_xfer_put(struct wa_xfer *xfer)
193 kref_put(&xfer->refcnt, wa_xfer_destroy);
199 * xfer->lock has to be unlocked
201 * We take xfer->lock for setting the result; this is a barrier
202 * against drivers/usb/core/hcd.c:unlink1() being called after we call
203 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
204 * reference to the transfer.
206 static void wa_xfer_giveback(struct wa_xfer *xfer)
210 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
211 list_del_init(&xfer->list_node);
212 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
213 /* FIXME: segmentation broken -- kills DWA */
214 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
222 * xfer->lock has to be unlocked
224 static void wa_xfer_completion(struct wa_xfer *xfer)
227 wusb_dev_put(xfer->wusb_dev);
228 rpipe_put(xfer->ep->hcpriv);
229 wa_xfer_giveback(xfer);
233 * If transfer is done, wrap it up and return true
235 * xfer->lock has to be locked
237 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
239 struct device *dev = &xfer->wa->usb_iface->dev;
240 unsigned result, cnt;
242 struct urb *urb = xfer->urb;
243 unsigned found_short = 0;
245 result = xfer->segs_done == xfer->segs_submitted;
248 urb->actual_length = 0;
249 for (cnt = 0; cnt < xfer->segs; cnt++) {
250 seg = xfer->seg[cnt];
251 switch (seg->status) {
253 if (found_short && seg->result > 0) {
254 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
255 xfer, cnt, seg->result);
256 urb->status = -EINVAL;
259 urb->actual_length += seg->result;
260 if (seg->result < xfer->seg_size
261 && cnt != xfer->segs-1)
263 dev_dbg(dev, "xfer %p#%u: DONE short %d "
264 "result %zu urb->actual_length %d\n",
265 xfer, seg->index, found_short, seg->result,
269 xfer->result = seg->result;
270 dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
271 xfer, seg->index, seg->result);
274 dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
275 xfer, seg->index, urb->status);
276 xfer->result = urb->status;
279 dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
280 xfer, cnt, seg->status);
281 xfer->result = -EINVAL;
291 * Initialize a transfer's ID
293 * We need to use a sequential number; if we use the pointer or the
294 * hash of the pointer, it can repeat over sequential transfers and
295 * then it will confuse the HWA....wonder why in hell they put a 32
296 * bit handle in there then.
298 static void wa_xfer_id_init(struct wa_xfer *xfer)
300 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
304 * Return the xfer's ID associated with xfer
308 static u32 wa_xfer_id(struct wa_xfer *xfer)
314 * Search for a transfer list ID on the HCD's URB list
316 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
317 * 32-bit hash of the pointer.
319 * @returns NULL if not found.
321 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
324 struct wa_xfer *xfer_itr;
325 spin_lock_irqsave(&wa->xfer_list_lock, flags);
326 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
327 if (id == xfer_itr->id) {
328 wa_xfer_get(xfer_itr);
334 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
338 struct wa_xfer_abort_buffer {
340 struct wa_xfer_abort cmd;
343 static void __wa_xfer_abort_cb(struct urb *urb)
345 struct wa_xfer_abort_buffer *b = urb->context;
346 usb_put_urb(&b->urb);
350 * Aborts an ongoing transaction
352 * Assumes the transfer is referenced and locked and in a submitted
353 * state (mainly that there is an endpoint/rpipe assigned).
355 * The callback (see above) does nothing but freeing up the data by
356 * putting the URB. Because the URB is allocated at the head of the
357 * struct, the whole space we allocated is kfreed.
359 * We'll get an 'aborted transaction' xfer result on DTI, that'll
360 * politely ignore because at this point the transaction has been
361 * marked as aborted already.
363 static void __wa_xfer_abort(struct wa_xfer *xfer)
366 struct device *dev = &xfer->wa->usb_iface->dev;
367 struct wa_xfer_abort_buffer *b;
368 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
370 b = kmalloc(sizeof(*b), GFP_ATOMIC);
373 b->cmd.bLength = sizeof(b->cmd);
374 b->cmd.bRequestType = WA_XFER_ABORT;
375 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
376 b->cmd.dwTransferID = wa_xfer_id(xfer);
378 usb_init_urb(&b->urb);
379 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
380 usb_sndbulkpipe(xfer->wa->usb_dev,
381 xfer->wa->dto_epd->bEndpointAddress),
382 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
383 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
386 return; /* callback frees! */
390 if (printk_ratelimit())
391 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
401 * @returns < 0 on error, transfer segment request size if ok
403 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
404 enum wa_xfer_type *pxfer_type)
407 struct device *dev = &xfer->wa->usb_iface->dev;
409 struct urb *urb = xfer->urb;
410 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
412 switch (rpipe->descr.bmAttribute & 0x3) {
413 case USB_ENDPOINT_XFER_CONTROL:
414 *pxfer_type = WA_XFER_TYPE_CTL;
415 result = sizeof(struct wa_xfer_ctl);
417 case USB_ENDPOINT_XFER_INT:
418 case USB_ENDPOINT_XFER_BULK:
419 *pxfer_type = WA_XFER_TYPE_BI;
420 result = sizeof(struct wa_xfer_bi);
422 case USB_ENDPOINT_XFER_ISOC:
423 dev_err(dev, "FIXME: ISOC not implemented\n");
429 result = -EINVAL; /* shut gcc up */
431 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
432 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
433 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
434 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
435 /* Compute the segment size and make sure it is a multiple of
436 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
438 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
439 if (xfer->seg_size < maxpktsize) {
440 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
441 "%zu\n", xfer->seg_size, maxpktsize);
445 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
446 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
448 if (xfer->segs > WA_SEGS_MAX) {
449 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
450 (int)(urb->transfer_buffer_length / xfer->seg_size),
455 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
461 /* Fill in the common request header and xfer-type specific data. */
462 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
463 struct wa_xfer_hdr *xfer_hdr0,
464 enum wa_xfer_type xfer_type,
465 size_t xfer_hdr_size)
467 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
469 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
470 xfer_hdr0->bLength = xfer_hdr_size;
471 xfer_hdr0->bRequestType = xfer_type;
472 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
473 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
474 xfer_hdr0->bTransferSegment = 0;
476 case WA_XFER_TYPE_CTL: {
477 struct wa_xfer_ctl *xfer_ctl =
478 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
479 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
480 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
481 sizeof(xfer_ctl->baSetupData));
484 case WA_XFER_TYPE_BI:
486 case WA_XFER_TYPE_ISO:
487 printk(KERN_ERR "FIXME: ISOC not implemented\n");
494 * Callback for the OUT data phase of the segment request
496 * Check wa_seg_cb(); most comments also apply here because this
497 * function does almost the same thing and they work closely
500 * If the seg request has failed but this DTO phase has succeeded,
501 * wa_seg_cb() has already failed the segment and moved the
502 * status to WA_SEG_ERROR, so this will go through 'case 0' and
503 * effectively do nothing.
505 static void wa_seg_dto_cb(struct urb *urb)
507 struct wa_seg *seg = urb->context;
508 struct wa_xfer *xfer = seg->xfer;
511 struct wa_rpipe *rpipe;
513 unsigned rpipe_ready = 0;
516 switch (urb->status) {
518 spin_lock_irqsave(&xfer->lock, flags);
520 dev = &wa->usb_iface->dev;
521 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
522 xfer, seg->index, urb->actual_length);
523 if (seg->status < WA_SEG_PENDING)
524 seg->status = WA_SEG_PENDING;
525 seg->result = urb->actual_length;
526 spin_unlock_irqrestore(&xfer->lock, flags);
528 case -ECONNRESET: /* URB unlinked; no need to do anything */
529 case -ENOENT: /* as it was done by the who unlinked us */
531 default: /* Other errors ... */
532 spin_lock_irqsave(&xfer->lock, flags);
534 dev = &wa->usb_iface->dev;
535 rpipe = xfer->ep->hcpriv;
536 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
537 xfer, seg->index, urb->status);
538 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
539 EDC_ERROR_TIMEFRAME)){
540 dev_err(dev, "DTO: URB max acceptable errors "
541 "exceeded, resetting device\n");
544 if (seg->status != WA_SEG_ERROR) {
545 seg->status = WA_SEG_ERROR;
546 seg->result = urb->status;
548 __wa_xfer_abort(xfer);
549 rpipe_ready = rpipe_avail_inc(rpipe);
550 done = __wa_xfer_is_done(xfer);
552 spin_unlock_irqrestore(&xfer->lock, flags);
554 wa_xfer_completion(xfer);
556 wa_xfer_delayed_run(rpipe);
561 * Callback for the segment request
563 * If successful transition state (unless already transitioned or
564 * outbound transfer); otherwise, take a note of the error, mark this
565 * segment done and try completion.
567 * Note we don't access until we are sure that the transfer hasn't
568 * been cancelled (ECONNRESET, ENOENT), which could mean that
569 * seg->xfer could be already gone.
571 * We have to check before setting the status to WA_SEG_PENDING
572 * because sometimes the xfer result callback arrives before this
573 * callback (geeeeeeze), so it might happen that we are already in
574 * another state. As well, we don't set it if the transfer is inbound,
575 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
578 static void wa_seg_cb(struct urb *urb)
580 struct wa_seg *seg = urb->context;
581 struct wa_xfer *xfer = seg->xfer;
584 struct wa_rpipe *rpipe;
586 unsigned rpipe_ready;
589 switch (urb->status) {
591 spin_lock_irqsave(&xfer->lock, flags);
593 dev = &wa->usb_iface->dev;
594 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
595 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
596 seg->status = WA_SEG_PENDING;
597 spin_unlock_irqrestore(&xfer->lock, flags);
599 case -ECONNRESET: /* URB unlinked; no need to do anything */
600 case -ENOENT: /* as it was done by the who unlinked us */
602 default: /* Other errors ... */
603 spin_lock_irqsave(&xfer->lock, flags);
605 dev = &wa->usb_iface->dev;
606 rpipe = xfer->ep->hcpriv;
607 if (printk_ratelimit())
608 dev_err(dev, "xfer %p#%u: request error %d\n",
609 xfer, seg->index, urb->status);
610 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
611 EDC_ERROR_TIMEFRAME)){
612 dev_err(dev, "DTO: URB max acceptable errors "
613 "exceeded, resetting device\n");
616 usb_unlink_urb(seg->dto_urb);
617 seg->status = WA_SEG_ERROR;
618 seg->result = urb->status;
620 __wa_xfer_abort(xfer);
621 rpipe_ready = rpipe_avail_inc(rpipe);
622 done = __wa_xfer_is_done(xfer);
623 spin_unlock_irqrestore(&xfer->lock, flags);
625 wa_xfer_completion(xfer);
627 wa_xfer_delayed_run(rpipe);
632 * Allocate the segs array and initialize each of them
634 * The segments are freed by wa_xfer_destroy() when the xfer use count
635 * drops to zero; however, because each segment is given the same life
636 * cycle as the USB URB it contains, it is actually freed by
637 * usb_put_urb() on the contained USB URB (twisted, eh?).
639 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
642 size_t alloc_size = sizeof(*xfer->seg[0])
643 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
644 struct usb_device *usb_dev = xfer->wa->usb_dev;
645 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
647 size_t buf_itr, buf_size, buf_itr_size;
650 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
651 if (xfer->seg == NULL)
652 goto error_segs_kzalloc;
654 buf_size = xfer->urb->transfer_buffer_length;
655 for (cnt = 0; cnt < xfer->segs; cnt++) {
656 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
658 goto error_seg_kzalloc;
662 usb_fill_bulk_urb(&seg->urb, usb_dev,
663 usb_sndbulkpipe(usb_dev,
664 dto_epd->bEndpointAddress),
665 &seg->xfer_hdr, xfer_hdr_size,
667 buf_itr_size = buf_size > xfer->seg_size ?
668 xfer->seg_size : buf_size;
669 if (xfer->is_inbound == 0 && buf_size > 0) {
670 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
671 if (seg->dto_urb == NULL)
672 goto error_dto_alloc;
674 seg->dto_urb, usb_dev,
675 usb_sndbulkpipe(usb_dev,
676 dto_epd->bEndpointAddress),
677 NULL, 0, wa_seg_dto_cb, seg);
679 seg->dto_urb->transfer_dma =
680 xfer->urb->transfer_dma + buf_itr;
681 seg->dto_urb->transfer_flags |=
682 URB_NO_TRANSFER_DMA_MAP;
684 seg->dto_urb->transfer_buffer =
685 xfer->urb->transfer_buffer + buf_itr;
686 seg->dto_urb->transfer_buffer_length = buf_itr_size;
688 seg->status = WA_SEG_READY;
689 buf_itr += buf_itr_size;
690 buf_size -= buf_itr_size;
695 kfree(xfer->seg[cnt]);
698 /* use the fact that cnt is left at were it failed */
699 for (; cnt > 0; cnt--) {
700 if (xfer->is_inbound == 0)
701 kfree(xfer->seg[cnt]->dto_urb);
702 kfree(xfer->seg[cnt]);
709 * Allocates all the stuff needed to submit a transfer
711 * Breaks the whole data buffer in a list of segments, each one has a
712 * structure allocated to it and linked in xfer->seg[index]
714 * FIXME: merge setup_segs() and the last part of this function, no
715 * need to do two for loops when we could run everything in a
718 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
721 struct device *dev = &xfer->wa->usb_iface->dev;
722 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
723 size_t xfer_hdr_size, cnt, transfer_size;
724 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
726 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
728 goto error_setup_sizes;
729 xfer_hdr_size = result;
730 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
732 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
733 xfer, xfer->segs, result);
734 goto error_setup_segs;
736 /* Fill the first header */
737 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
738 wa_xfer_id_init(xfer);
739 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
741 /* Fill remainig headers */
742 xfer_hdr = xfer_hdr0;
743 transfer_size = urb->transfer_buffer_length;
744 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
745 xfer->seg_size : transfer_size;
746 transfer_size -= xfer->seg_size;
747 for (cnt = 1; cnt < xfer->segs; cnt++) {
748 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
749 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
750 xfer_hdr->bTransferSegment = cnt;
751 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
752 cpu_to_le32(xfer->seg_size)
753 : cpu_to_le32(transfer_size);
754 xfer->seg[cnt]->status = WA_SEG_READY;
755 transfer_size -= xfer->seg_size;
757 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
767 * rpipe->seg_lock is held!
769 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
773 result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
775 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
776 xfer, seg->index, result);
777 goto error_seg_submit;
780 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
782 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
783 xfer, seg->index, result);
784 goto error_dto_submit;
787 seg->status = WA_SEG_SUBMITTED;
788 rpipe_avail_dec(rpipe);
792 usb_unlink_urb(&seg->urb);
794 seg->status = WA_SEG_ERROR;
795 seg->result = result;
800 * Execute more queued request segments until the maximum concurrent allowed
802 * The ugly unlock/lock sequence on the error path is needed as the
803 * xfer->lock normally nests the seg_lock and not viceversa.
806 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
809 struct device *dev = &rpipe->wa->usb_iface->dev;
811 struct wa_xfer *xfer;
814 spin_lock_irqsave(&rpipe->seg_lock, flags);
815 while (atomic_read(&rpipe->segs_available) > 0
816 && !list_empty(&rpipe->seg_list)) {
817 seg = list_entry(rpipe->seg_list.next, struct wa_seg,
819 list_del(&seg->list_node);
821 result = __wa_seg_submit(rpipe, xfer, seg);
822 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
823 xfer, seg->index, atomic_read(&rpipe->segs_available), result);
824 if (unlikely(result < 0)) {
825 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
826 spin_lock_irqsave(&xfer->lock, flags);
827 __wa_xfer_abort(xfer);
829 spin_unlock_irqrestore(&xfer->lock, flags);
830 spin_lock_irqsave(&rpipe->seg_lock, flags);
833 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
838 * xfer->lock is taken
840 * On failure submitting we just stop submitting and return error;
841 * wa_urb_enqueue_b() will execute the completion path
843 static int __wa_xfer_submit(struct wa_xfer *xfer)
846 struct wahc *wa = xfer->wa;
847 struct device *dev = &wa->usb_iface->dev;
851 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
852 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
856 spin_lock_irqsave(&wa->xfer_list_lock, flags);
857 list_add_tail(&xfer->list_node, &wa->xfer_list);
858 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
860 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
862 spin_lock_irqsave(&rpipe->seg_lock, flags);
863 for (cnt = 0; cnt < xfer->segs; cnt++) {
864 available = atomic_read(&rpipe->segs_available);
865 empty = list_empty(&rpipe->seg_list);
866 seg = xfer->seg[cnt];
867 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
868 xfer, cnt, available, empty,
869 available == 0 || !empty ? "delayed" : "submitted");
870 if (available == 0 || !empty) {
871 dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
872 seg->status = WA_SEG_DELAYED;
873 list_add_tail(&seg->list_node, &rpipe->seg_list);
875 result = __wa_seg_submit(rpipe, xfer, seg);
877 __wa_xfer_abort(xfer);
878 goto error_seg_submit;
881 xfer->segs_submitted++;
884 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
889 * Second part of a URB/transfer enqueuement
891 * Assumes this comes from wa_urb_enqueue() [maybe through
892 * wa_urb_enqueue_run()]. At this point:
894 * xfer->wa filled and refcounted
895 * xfer->ep filled with rpipe refcounted if
897 * xfer->urb filled and refcounted (this is the case when called
898 * from wa_urb_enqueue() as we come from usb_submit_urb()
899 * and when called by wa_urb_enqueue_run(), as we took an
900 * extra ref dropped by _run() after we return).
903 * If we fail at __wa_xfer_submit(), then we just check if we are done
904 * and if so, we run the completion procedure. However, if we are not
905 * yet done, we do nothing and wait for the completion handlers from
906 * the submitted URBs or from the xfer-result path to kick in. If xfer
907 * result never kicks in, the xfer will timeout from the USB code and
908 * dequeue() will be called.
910 static void wa_urb_enqueue_b(struct wa_xfer *xfer)
914 struct urb *urb = xfer->urb;
915 struct wahc *wa = xfer->wa;
916 struct wusbhc *wusbhc = wa->wusb;
917 struct wusb_dev *wusb_dev;
920 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
922 goto error_rpipe_get;
924 /* FIXME: segmentation broken -- kills DWA */
925 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
926 if (urb->dev == NULL) {
927 mutex_unlock(&wusbhc->mutex);
930 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
931 if (wusb_dev == NULL) {
932 mutex_unlock(&wusbhc->mutex);
935 mutex_unlock(&wusbhc->mutex);
937 spin_lock_irqsave(&xfer->lock, flags);
938 xfer->wusb_dev = wusb_dev;
939 result = urb->status;
940 if (urb->status != -EINPROGRESS)
943 result = __wa_xfer_setup(xfer, urb);
945 goto error_xfer_setup;
946 result = __wa_xfer_submit(xfer);
948 goto error_xfer_submit;
949 spin_unlock_irqrestore(&xfer->lock, flags);
952 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
953 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
958 spin_unlock_irqrestore(&xfer->lock, flags);
959 /* FIXME: segmentation broken, kills DWA */
961 wusb_dev_put(wusb_dev);
963 rpipe_put(xfer->ep->hcpriv);
965 xfer->result = result;
966 wa_xfer_giveback(xfer);
970 done = __wa_xfer_is_done(xfer);
971 xfer->result = result;
972 spin_unlock_irqrestore(&xfer->lock, flags);
974 wa_xfer_completion(xfer);
978 * Execute the delayed transfers in the Wire Adapter @wa
980 * We need to be careful here, as dequeue() could be called in the
981 * middle. That's why we do the whole thing under the
982 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
983 * and then checks the list -- so as we would be acquiring in inverse
984 * order, we just drop the lock once we have the xfer and reacquire it
987 void wa_urb_enqueue_run(struct work_struct *ws)
989 struct wahc *wa = container_of(ws, struct wahc, xfer_work);
990 struct wa_xfer *xfer, *next;
993 spin_lock_irq(&wa->xfer_list_lock);
994 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
996 list_del_init(&xfer->list_node);
997 spin_unlock_irq(&wa->xfer_list_lock);
1000 wa_urb_enqueue_b(xfer);
1001 usb_put_urb(urb); /* taken when queuing */
1003 spin_lock_irq(&wa->xfer_list_lock);
1005 spin_unlock_irq(&wa->xfer_list_lock);
1007 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1010 * Submit a transfer to the Wire Adapter in a delayed way
1012 * The process of enqueuing involves possible sleeps() [see
1013 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1014 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1016 * @urb: We own a reference to it done by the HCI Linux USB stack that
1017 * will be given up by calling usb_hcd_giveback_urb() or by
1018 * returning error from this function -> ergo we don't have to
1021 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1022 struct urb *urb, gfp_t gfp)
1025 struct device *dev = &wa->usb_iface->dev;
1026 struct wa_xfer *xfer;
1027 unsigned long my_flags;
1028 unsigned cant_sleep = irqs_disabled() | in_atomic();
1030 if (urb->transfer_buffer == NULL
1031 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1032 && urb->transfer_buffer_length != 0) {
1033 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1038 xfer = kzalloc(sizeof(*xfer), gfp);
1043 if (urb->status != -EINPROGRESS) /* cancelled */
1044 goto error_dequeued; /* before starting? */
1046 xfer->wa = wa_get(wa);
1052 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1053 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1054 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1055 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1056 cant_sleep ? "deferred" : "inline");
1060 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1061 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1062 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1063 queue_work(wusbd, &wa->xfer_work);
1065 wa_urb_enqueue_b(xfer);
1074 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1077 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1078 * handler] is called.
1080 * Until a transfer goes successfully through wa_urb_enqueue() it
1081 * needs to be dequeued with completion calling; when stuck in delayed
1082 * or before wa_xfer_setup() is called, we need to do completion.
1084 * not setup If there is no hcpriv yet, that means that that enqueue
1085 * still had no time to set the xfer up. Because
1086 * urb->status should be other than -EINPROGRESS,
1087 * enqueue() will catch that and bail out.
1089 * If the transfer has gone through setup, we just need to clean it
1090 * up. If it has gone through submit(), we have to abort it [with an
1091 * asynch request] and then make sure we cancel each segment.
1094 int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1096 unsigned long flags, flags2;
1097 struct wa_xfer *xfer;
1099 struct wa_rpipe *rpipe;
1101 unsigned rpipe_ready = 0;
1105 /* NOthing setup yet enqueue will see urb->status !=
1106 * -EINPROGRESS (by hcd layer) and bail out with
1107 * error, no need to do completion
1109 BUG_ON(urb->status == -EINPROGRESS);
1112 spin_lock_irqsave(&xfer->lock, flags);
1113 rpipe = xfer->ep->hcpriv;
1114 /* Check the delayed list -> if there, release and complete */
1115 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1116 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1117 goto dequeue_delayed;
1118 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1119 if (xfer->seg == NULL) /* still hasn't reached */
1120 goto out_unlock; /* setup(), enqueue_b() completes */
1121 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1122 __wa_xfer_abort(xfer);
1123 for (cnt = 0; cnt < xfer->segs; cnt++) {
1124 seg = xfer->seg[cnt];
1125 switch (seg->status) {
1126 case WA_SEG_NOTREADY:
1128 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1129 xfer, cnt, seg->status);
1132 case WA_SEG_DELAYED:
1133 seg->status = WA_SEG_ABORTED;
1134 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1135 list_del(&seg->list_node);
1137 rpipe_ready = rpipe_avail_inc(rpipe);
1138 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1140 case WA_SEG_SUBMITTED:
1141 seg->status = WA_SEG_ABORTED;
1142 usb_unlink_urb(&seg->urb);
1143 if (xfer->is_inbound == 0)
1144 usb_unlink_urb(seg->dto_urb);
1146 rpipe_ready = rpipe_avail_inc(rpipe);
1148 case WA_SEG_PENDING:
1149 seg->status = WA_SEG_ABORTED;
1151 rpipe_ready = rpipe_avail_inc(rpipe);
1153 case WA_SEG_DTI_PENDING:
1154 usb_unlink_urb(wa->dti_urb);
1155 seg->status = WA_SEG_ABORTED;
1157 rpipe_ready = rpipe_avail_inc(rpipe);
1161 case WA_SEG_ABORTED:
1165 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1166 __wa_xfer_is_done(xfer);
1167 spin_unlock_irqrestore(&xfer->lock, flags);
1168 wa_xfer_completion(xfer);
1170 wa_xfer_delayed_run(rpipe);
1174 spin_unlock_irqrestore(&xfer->lock, flags);
1179 list_del_init(&xfer->list_node);
1180 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1181 xfer->result = urb->status;
1182 spin_unlock_irqrestore(&xfer->lock, flags);
1183 wa_xfer_giveback(xfer);
1184 usb_put_urb(urb); /* we got a ref in enqueue() */
1187 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1190 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1193 * Positive errno values are internal inconsistencies and should be
1194 * flagged louder. Negative are to be passed up to the user in the
1197 * @status: USB WA status code -- high two bits are stripped.
1199 static int wa_xfer_status_to_errno(u8 status)
1202 u8 real_status = status;
1203 static int xlat[] = {
1204 [WA_XFER_STATUS_SUCCESS] = 0,
1205 [WA_XFER_STATUS_HALTED] = -EPIPE,
1206 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1207 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1208 [WA_XFER_RESERVED] = EINVAL,
1209 [WA_XFER_STATUS_NOT_FOUND] = 0,
1210 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1211 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1212 [WA_XFER_STATUS_ABORTED] = -EINTR,
1213 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1214 [WA_XFER_INVALID_FORMAT] = EINVAL,
1215 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1216 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1222 if (status >= ARRAY_SIZE(xlat)) {
1223 printk_ratelimited(KERN_ERR "%s(): BUG? "
1224 "Unknown WA transfer status 0x%02x\n",
1225 __func__, real_status);
1228 errno = xlat[status];
1229 if (unlikely(errno > 0)) {
1230 printk_ratelimited(KERN_ERR "%s(): BUG? "
1231 "Inconsistent WA status: 0x%02x\n",
1232 __func__, real_status);
1239 * Process a xfer result completion message
1241 * inbound transfers: need to schedule a DTI read
1243 * FIXME: this functio needs to be broken up in parts
1245 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1248 struct device *dev = &wa->usb_iface->dev;
1249 unsigned long flags;
1252 struct wa_rpipe *rpipe;
1253 struct wa_xfer_result *xfer_result = wa->xfer_result;
1256 unsigned rpipe_ready = 0;
1258 spin_lock_irqsave(&xfer->lock, flags);
1259 seg_idx = xfer_result->bTransferSegment & 0x7f;
1260 if (unlikely(seg_idx >= xfer->segs))
1262 seg = xfer->seg[seg_idx];
1263 rpipe = xfer->ep->hcpriv;
1264 usb_status = xfer_result->bTransferStatus;
1265 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1266 xfer, seg_idx, usb_status, seg->status);
1267 if (seg->status == WA_SEG_ABORTED
1268 || seg->status == WA_SEG_ERROR) /* already handled */
1269 goto segment_aborted;
1270 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1271 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1272 if (seg->status != WA_SEG_PENDING) {
1273 if (printk_ratelimit())
1274 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1275 xfer, seg_idx, seg->status);
1276 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1278 if (usb_status & 0x80) {
1279 seg->result = wa_xfer_status_to_errno(usb_status);
1280 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1281 xfer, seg->index, usb_status);
1282 goto error_complete;
1284 /* FIXME: we ignore warnings, tally them for stats */
1285 if (usb_status & 0x40) /* Warning?... */
1286 usb_status = 0; /* ... pass */
1287 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1288 seg->status = WA_SEG_DTI_PENDING;
1289 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1291 wa->buf_in_urb->transfer_dma =
1292 xfer->urb->transfer_dma
1293 + seg_idx * xfer->seg_size;
1294 wa->buf_in_urb->transfer_flags
1295 |= URB_NO_TRANSFER_DMA_MAP;
1297 wa->buf_in_urb->transfer_buffer =
1298 xfer->urb->transfer_buffer
1299 + seg_idx * xfer->seg_size;
1300 wa->buf_in_urb->transfer_flags
1301 &= ~URB_NO_TRANSFER_DMA_MAP;
1303 wa->buf_in_urb->transfer_buffer_length =
1304 le32_to_cpu(xfer_result->dwTransferLength);
1305 wa->buf_in_urb->context = seg;
1306 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1308 goto error_submit_buf_in;
1310 /* OUT data phase, complete it -- */
1311 seg->status = WA_SEG_DONE;
1312 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1314 rpipe_ready = rpipe_avail_inc(rpipe);
1315 done = __wa_xfer_is_done(xfer);
1317 spin_unlock_irqrestore(&xfer->lock, flags);
1319 wa_xfer_completion(xfer);
1321 wa_xfer_delayed_run(rpipe);
1324 error_submit_buf_in:
1325 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1326 dev_err(dev, "DTI: URB max acceptable errors "
1327 "exceeded, resetting device\n");
1330 if (printk_ratelimit())
1331 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1332 xfer, seg_idx, result);
1333 seg->result = result;
1335 seg->status = WA_SEG_ERROR;
1337 rpipe_ready = rpipe_avail_inc(rpipe);
1338 __wa_xfer_abort(xfer);
1339 done = __wa_xfer_is_done(xfer);
1340 spin_unlock_irqrestore(&xfer->lock, flags);
1342 wa_xfer_completion(xfer);
1344 wa_xfer_delayed_run(rpipe);
1348 spin_unlock_irqrestore(&xfer->lock, flags);
1349 wa_urb_dequeue(wa, xfer->urb);
1350 if (printk_ratelimit())
1351 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1352 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1353 dev_err(dev, "DTI: URB max acceptable errors "
1354 "exceeded, resetting device\n");
1360 /* nothing to do, as the aborter did the completion */
1361 spin_unlock_irqrestore(&xfer->lock, flags);
1365 * Callback for the IN data phase
1367 * If successful transition state; otherwise, take a note of the
1368 * error, mark this segment done and try completion.
1370 * Note we don't access until we are sure that the transfer hasn't
1371 * been cancelled (ECONNRESET, ENOENT), which could mean that
1372 * seg->xfer could be already gone.
1374 static void wa_buf_in_cb(struct urb *urb)
1376 struct wa_seg *seg = urb->context;
1377 struct wa_xfer *xfer = seg->xfer;
1380 struct wa_rpipe *rpipe;
1381 unsigned rpipe_ready;
1382 unsigned long flags;
1385 switch (urb->status) {
1387 spin_lock_irqsave(&xfer->lock, flags);
1389 dev = &wa->usb_iface->dev;
1390 rpipe = xfer->ep->hcpriv;
1391 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1392 xfer, seg->index, (size_t)urb->actual_length);
1393 seg->status = WA_SEG_DONE;
1394 seg->result = urb->actual_length;
1396 rpipe_ready = rpipe_avail_inc(rpipe);
1397 done = __wa_xfer_is_done(xfer);
1398 spin_unlock_irqrestore(&xfer->lock, flags);
1400 wa_xfer_completion(xfer);
1402 wa_xfer_delayed_run(rpipe);
1404 case -ECONNRESET: /* URB unlinked; no need to do anything */
1405 case -ENOENT: /* as it was done by the who unlinked us */
1407 default: /* Other errors ... */
1408 spin_lock_irqsave(&xfer->lock, flags);
1410 dev = &wa->usb_iface->dev;
1411 rpipe = xfer->ep->hcpriv;
1412 if (printk_ratelimit())
1413 dev_err(dev, "xfer %p#%u: data in error %d\n",
1414 xfer, seg->index, urb->status);
1415 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1416 EDC_ERROR_TIMEFRAME)){
1417 dev_err(dev, "DTO: URB max acceptable errors "
1418 "exceeded, resetting device\n");
1421 seg->status = WA_SEG_ERROR;
1422 seg->result = urb->status;
1424 rpipe_ready = rpipe_avail_inc(rpipe);
1425 __wa_xfer_abort(xfer);
1426 done = __wa_xfer_is_done(xfer);
1427 spin_unlock_irqrestore(&xfer->lock, flags);
1429 wa_xfer_completion(xfer);
1431 wa_xfer_delayed_run(rpipe);
1436 * Handle an incoming transfer result buffer
1438 * Given a transfer result buffer, it completes the transfer (possibly
1439 * scheduling and buffer in read) and then resubmits the DTI URB for a
1440 * new transfer result read.
1443 * The xfer_result DTI URB state machine
1445 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1447 * We start in OFF mode, the first xfer_result notification [through
1448 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1451 * We receive a buffer -- if it is not a xfer_result, we complain and
1452 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1453 * request accounting. If it is an IN segment, we move to RBI and post
1454 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1455 * repost the DTI-URB and move to RXR state. if there was no IN
1456 * segment, it will repost the DTI-URB.
1458 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1459 * errors) in the URBs.
1461 static void wa_xfer_result_cb(struct urb *urb)
1464 struct wahc *wa = urb->context;
1465 struct device *dev = &wa->usb_iface->dev;
1466 struct wa_xfer_result *xfer_result;
1468 struct wa_xfer *xfer;
1471 BUG_ON(wa->dti_urb != urb);
1472 switch (wa->dti_urb->status) {
1474 /* We have a xfer result buffer; check it */
1475 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1476 urb->actual_length, urb->transfer_buffer);
1477 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1478 dev_err(dev, "DTI Error: xfer result--bad size "
1479 "xfer result (%d bytes vs %zu needed)\n",
1480 urb->actual_length, sizeof(*xfer_result));
1483 xfer_result = wa->xfer_result;
1484 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1485 dev_err(dev, "DTI Error: xfer result--"
1486 "bad header length %u\n",
1487 xfer_result->hdr.bLength);
1490 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1491 dev_err(dev, "DTI Error: xfer result--"
1492 "bad header type 0x%02x\n",
1493 xfer_result->hdr.bNotifyType);
1496 usb_status = xfer_result->bTransferStatus & 0x3f;
1497 if (usb_status == WA_XFER_STATUS_ABORTED
1498 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1499 /* taken care of already */
1501 xfer_id = xfer_result->dwTransferID;
1502 xfer = wa_xfer_get_by_id(wa, xfer_id);
1504 /* FIXME: transaction might have been cancelled */
1505 dev_err(dev, "DTI Error: xfer result--"
1506 "unknown xfer 0x%08x (status 0x%02x)\n",
1507 xfer_id, usb_status);
1510 wa_xfer_result_chew(wa, xfer);
1513 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1514 case -ESHUTDOWN: /* going away! */
1515 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1519 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1520 EDC_ERROR_TIMEFRAME)) {
1521 dev_err(dev, "DTI: URB max acceptable errors "
1522 "exceeded, resetting device\n");
1526 if (printk_ratelimit())
1527 dev_err(dev, "DTI: URB error %d\n", urb->status);
1530 /* Resubmit the DTI URB */
1531 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1533 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1534 "resetting\n", result);
1542 * Transfer complete notification
1544 * Called from the notif.c code. We get a notification on EP2 saying
1545 * that some endpoint has some transfer result data available. We are
1548 * To speed up things, we always have a URB reading the DTI URB; we
1549 * don't really set it up and start it until the first xfer complete
1550 * notification arrives, which is what we do here.
1552 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1555 * So here we just initialize the DTI URB for reading transfer result
1556 * notifications and also the buffer-in URB, for reading buffers. Then
1557 * we just submit the DTI URB.
1559 * @wa shall be referenced
1561 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1564 struct device *dev = &wa->usb_iface->dev;
1565 struct wa_notif_xfer *notif_xfer;
1566 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1568 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1569 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1571 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1572 /* FIXME: hardcoded limitation, adapt */
1573 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1574 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1577 if (wa->dti_urb != NULL) /* DTI URB already started */
1580 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1581 if (wa->dti_urb == NULL) {
1582 dev_err(dev, "Can't allocate DTI URB\n");
1583 goto error_dti_urb_alloc;
1586 wa->dti_urb, wa->usb_dev,
1587 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1588 wa->xfer_result, wa->xfer_result_size,
1589 wa_xfer_result_cb, wa);
1591 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1592 if (wa->buf_in_urb == NULL) {
1593 dev_err(dev, "Can't allocate BUF-IN URB\n");
1594 goto error_buf_in_urb_alloc;
1597 wa->buf_in_urb, wa->usb_dev,
1598 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1599 NULL, 0, wa_buf_in_cb, wa);
1600 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1602 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1603 "resetting\n", result);
1604 goto error_dti_urb_submit;
1609 error_dti_urb_submit:
1610 usb_put_urb(wa->buf_in_urb);
1611 error_buf_in_urb_alloc:
1612 usb_put_urb(wa->dti_urb);
1614 error_dti_urb_alloc: