Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / usb / usbip / stub_rx.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2003-2008 Takahiro Hirofuchi
4  */
5
6 #include <asm/byteorder.h>
7 #include <linux/kthread.h>
8 #include <linux/usb.h>
9 #include <linux/usb/hcd.h>
10 #include <linux/scatterlist.h>
11
12 #include "usbip_common.h"
13 #include "stub.h"
14
15 static int is_clear_halt_cmd(struct urb *urb)
16 {
17         struct usb_ctrlrequest *req;
18
19         req = (struct usb_ctrlrequest *) urb->setup_packet;
20
21         return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
22                (req->bRequestType == USB_RECIP_ENDPOINT) &&
23                (req->wValue == USB_ENDPOINT_HALT);
24 }
25
26 static int is_set_interface_cmd(struct urb *urb)
27 {
28         struct usb_ctrlrequest *req;
29
30         req = (struct usb_ctrlrequest *) urb->setup_packet;
31
32         return (req->bRequest == USB_REQ_SET_INTERFACE) &&
33                 (req->bRequestType == USB_RECIP_INTERFACE);
34 }
35
36 static int is_set_configuration_cmd(struct urb *urb)
37 {
38         struct usb_ctrlrequest *req;
39
40         req = (struct usb_ctrlrequest *) urb->setup_packet;
41
42         return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
43                 (req->bRequestType == USB_RECIP_DEVICE);
44 }
45
46 static int is_reset_device_cmd(struct urb *urb)
47 {
48         struct usb_ctrlrequest *req;
49         __u16 value;
50         __u16 index;
51
52         req = (struct usb_ctrlrequest *) urb->setup_packet;
53         value = le16_to_cpu(req->wValue);
54         index = le16_to_cpu(req->wIndex);
55
56         if ((req->bRequest == USB_REQ_SET_FEATURE) &&
57             (req->bRequestType == USB_RT_PORT) &&
58             (value == USB_PORT_FEAT_RESET)) {
59                 usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
60                 return 1;
61         } else
62                 return 0;
63 }
64
65 static int tweak_clear_halt_cmd(struct urb *urb)
66 {
67         struct usb_ctrlrequest *req;
68         int target_endp;
69         int target_dir;
70         int target_pipe;
71         int ret;
72
73         req = (struct usb_ctrlrequest *) urb->setup_packet;
74
75         /*
76          * The stalled endpoint is specified in the wIndex value. The endpoint
77          * of the urb is the target of this clear_halt request (i.e., control
78          * endpoint).
79          */
80         target_endp = le16_to_cpu(req->wIndex) & 0x000f;
81
82         /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80.  */
83         target_dir = le16_to_cpu(req->wIndex) & 0x0080;
84
85         if (target_dir)
86                 target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
87         else
88                 target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
89
90         ret = usb_clear_halt(urb->dev, target_pipe);
91         if (ret < 0)
92                 dev_err(&urb->dev->dev,
93                         "usb_clear_halt error: devnum %d endp %d ret %d\n",
94                         urb->dev->devnum, target_endp, ret);
95         else
96                 dev_info(&urb->dev->dev,
97                          "usb_clear_halt done: devnum %d endp %d\n",
98                          urb->dev->devnum, target_endp);
99
100         return ret;
101 }
102
103 static int tweak_set_interface_cmd(struct urb *urb)
104 {
105         struct usb_ctrlrequest *req;
106         __u16 alternate;
107         __u16 interface;
108         int ret;
109
110         req = (struct usb_ctrlrequest *) urb->setup_packet;
111         alternate = le16_to_cpu(req->wValue);
112         interface = le16_to_cpu(req->wIndex);
113
114         usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
115                           interface, alternate);
116
117         ret = usb_set_interface(urb->dev, interface, alternate);
118         if (ret < 0)
119                 dev_err(&urb->dev->dev,
120                         "usb_set_interface error: inf %u alt %u ret %d\n",
121                         interface, alternate, ret);
122         else
123                 dev_info(&urb->dev->dev,
124                         "usb_set_interface done: inf %u alt %u\n",
125                         interface, alternate);
126
127         return ret;
128 }
129
130 static int tweak_set_configuration_cmd(struct urb *urb)
131 {
132         struct stub_priv *priv = (struct stub_priv *) urb->context;
133         struct stub_device *sdev = priv->sdev;
134         struct usb_ctrlrequest *req;
135         __u16 config;
136         int err;
137
138         req = (struct usb_ctrlrequest *) urb->setup_packet;
139         config = le16_to_cpu(req->wValue);
140
141         err = usb_set_configuration(sdev->udev, config);
142         if (err && err != -ENODEV)
143                 dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
144                         config, err);
145         return 0;
146 }
147
148 static int tweak_reset_device_cmd(struct urb *urb)
149 {
150         struct stub_priv *priv = (struct stub_priv *) urb->context;
151         struct stub_device *sdev = priv->sdev;
152
153         dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
154
155         if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
156                 dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
157                 return 0;
158         }
159         usb_reset_device(sdev->udev);
160         usb_unlock_device(sdev->udev);
161
162         return 0;
163 }
164
165 /*
166  * clear_halt, set_interface, and set_configuration require special tricks.
167  */
168 static void tweak_special_requests(struct urb *urb)
169 {
170         if (!urb || !urb->setup_packet)
171                 return;
172
173         if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
174                 return;
175
176         if (is_clear_halt_cmd(urb))
177                 /* tweak clear_halt */
178                  tweak_clear_halt_cmd(urb);
179
180         else if (is_set_interface_cmd(urb))
181                 /* tweak set_interface */
182                 tweak_set_interface_cmd(urb);
183
184         else if (is_set_configuration_cmd(urb))
185                 /* tweak set_configuration */
186                 tweak_set_configuration_cmd(urb);
187
188         else if (is_reset_device_cmd(urb))
189                 tweak_reset_device_cmd(urb);
190         else
191                 usbip_dbg_stub_rx("no need to tweak\n");
192 }
193
194 /*
195  * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
196  * By unlinking the urb asynchronously, stub_rx can continuously
197  * process coming urbs.  Even if the urb is unlinked, its completion
198  * handler will be called and stub_tx will send a return pdu.
199  *
200  * See also comments about unlinking strategy in vhci_hcd.c.
201  */
202 static int stub_recv_cmd_unlink(struct stub_device *sdev,
203                                 struct usbip_header *pdu)
204 {
205         int ret, i;
206         unsigned long flags;
207         struct stub_priv *priv;
208
209         spin_lock_irqsave(&sdev->priv_lock, flags);
210
211         list_for_each_entry(priv, &sdev->priv_init, list) {
212                 if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
213                         continue;
214
215                 /*
216                  * This matched urb is not completed yet (i.e., be in
217                  * flight in usb hcd hardware/driver). Now we are
218                  * cancelling it. The unlinking flag means that we are
219                  * now not going to return the normal result pdu of a
220                  * submission request, but going to return a result pdu
221                  * of the unlink request.
222                  */
223                 priv->unlinking = 1;
224
225                 /*
226                  * In the case that unlinking flag is on, prev->seqnum
227                  * is changed from the seqnum of the cancelling urb to
228                  * the seqnum of the unlink request. This will be used
229                  * to make the result pdu of the unlink request.
230                  */
231                 priv->seqnum = pdu->base.seqnum;
232
233                 spin_unlock_irqrestore(&sdev->priv_lock, flags);
234
235                 /*
236                  * usb_unlink_urb() is now out of spinlocking to avoid
237                  * spinlock recursion since stub_complete() is
238                  * sometimes called in this context but not in the
239                  * interrupt context.  If stub_complete() is executed
240                  * before we call usb_unlink_urb(), usb_unlink_urb()
241                  * will return an error value. In this case, stub_tx
242                  * will return the result pdu of this unlink request
243                  * though submission is completed and actual unlinking
244                  * is not executed. OK?
245                  */
246                 /* In the above case, urb->status is not -ECONNRESET,
247                  * so a driver in a client host will know the failure
248                  * of the unlink request ?
249                  */
250                 for (i = priv->completed_urbs; i < priv->num_urbs; i++) {
251                         ret = usb_unlink_urb(priv->urbs[i]);
252                         if (ret != -EINPROGRESS)
253                                 dev_err(&priv->urbs[i]->dev->dev,
254                                         "failed to unlink %d/%d urb of seqnum %lu, ret %d\n",
255                                         i + 1, priv->num_urbs,
256                                         priv->seqnum, ret);
257                 }
258                 return 0;
259         }
260
261         usbip_dbg_stub_rx("seqnum %d is not pending\n",
262                           pdu->u.cmd_unlink.seqnum);
263
264         /*
265          * The urb of the unlink target is not found in priv_init queue. It was
266          * already completed and its results is/was going to be sent by a
267          * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
268          * return the completeness of this unlink request to vhci_hcd.
269          */
270         stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
271
272         spin_unlock_irqrestore(&sdev->priv_lock, flags);
273
274         return 0;
275 }
276
277 static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
278 {
279         struct usbip_device *ud = &sdev->ud;
280         int valid = 0;
281
282         if (pdu->base.devid == sdev->devid) {
283                 spin_lock_irq(&ud->lock);
284                 if (ud->status == SDEV_ST_USED) {
285                         /* A request is valid. */
286                         valid = 1;
287                 }
288                 spin_unlock_irq(&ud->lock);
289         }
290
291         return valid;
292 }
293
294 static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
295                                          struct usbip_header *pdu)
296 {
297         struct stub_priv *priv;
298         struct usbip_device *ud = &sdev->ud;
299         unsigned long flags;
300
301         spin_lock_irqsave(&sdev->priv_lock, flags);
302
303         priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
304         if (!priv) {
305                 dev_err(&sdev->udev->dev, "alloc stub_priv\n");
306                 spin_unlock_irqrestore(&sdev->priv_lock, flags);
307                 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
308                 return NULL;
309         }
310
311         priv->seqnum = pdu->base.seqnum;
312         priv->sdev = sdev;
313
314         /*
315          * After a stub_priv is linked to a list_head,
316          * our error handler can free allocated data.
317          */
318         list_add_tail(&priv->list, &sdev->priv_init);
319
320         spin_unlock_irqrestore(&sdev->priv_lock, flags);
321
322         return priv;
323 }
324
325 static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
326 {
327         struct usb_device *udev = sdev->udev;
328         struct usb_host_endpoint *ep;
329         struct usb_endpoint_descriptor *epd = NULL;
330         int epnum = pdu->base.ep;
331         int dir = pdu->base.direction;
332
333         if (epnum < 0 || epnum > 15)
334                 goto err_ret;
335
336         if (dir == USBIP_DIR_IN)
337                 ep = udev->ep_in[epnum & 0x7f];
338         else
339                 ep = udev->ep_out[epnum & 0x7f];
340         if (!ep)
341                 goto err_ret;
342
343         epd = &ep->desc;
344
345         if (usb_endpoint_xfer_control(epd)) {
346                 if (dir == USBIP_DIR_OUT)
347                         return usb_sndctrlpipe(udev, epnum);
348                 else
349                         return usb_rcvctrlpipe(udev, epnum);
350         }
351
352         if (usb_endpoint_xfer_bulk(epd)) {
353                 if (dir == USBIP_DIR_OUT)
354                         return usb_sndbulkpipe(udev, epnum);
355                 else
356                         return usb_rcvbulkpipe(udev, epnum);
357         }
358
359         if (usb_endpoint_xfer_int(epd)) {
360                 if (dir == USBIP_DIR_OUT)
361                         return usb_sndintpipe(udev, epnum);
362                 else
363                         return usb_rcvintpipe(udev, epnum);
364         }
365
366         if (usb_endpoint_xfer_isoc(epd)) {
367                 /* validate number of packets */
368                 if (pdu->u.cmd_submit.number_of_packets < 0 ||
369                     pdu->u.cmd_submit.number_of_packets >
370                     USBIP_MAX_ISO_PACKETS) {
371                         dev_err(&sdev->udev->dev,
372                                 "CMD_SUBMIT: isoc invalid num packets %d\n",
373                                 pdu->u.cmd_submit.number_of_packets);
374                         return -1;
375                 }
376                 if (dir == USBIP_DIR_OUT)
377                         return usb_sndisocpipe(udev, epnum);
378                 else
379                         return usb_rcvisocpipe(udev, epnum);
380         }
381
382 err_ret:
383         /* NOT REACHED */
384         dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
385         return -1;
386 }
387
388 static void masking_bogus_flags(struct urb *urb)
389 {
390         int                             xfertype;
391         struct usb_device               *dev;
392         struct usb_host_endpoint        *ep;
393         int                             is_out;
394         unsigned int    allowed;
395
396         if (!urb || urb->hcpriv || !urb->complete)
397                 return;
398         dev = urb->dev;
399         if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
400                 return;
401
402         ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
403                 [usb_pipeendpoint(urb->pipe)];
404         if (!ep)
405                 return;
406
407         xfertype = usb_endpoint_type(&ep->desc);
408         if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
409                 struct usb_ctrlrequest *setup =
410                         (struct usb_ctrlrequest *) urb->setup_packet;
411
412                 if (!setup)
413                         return;
414                 is_out = !(setup->bRequestType & USB_DIR_IN) ||
415                         !setup->wLength;
416         } else {
417                 is_out = usb_endpoint_dir_out(&ep->desc);
418         }
419
420         /* enforce simple/standard policy */
421         allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
422                    URB_DIR_MASK | URB_FREE_BUFFER);
423         switch (xfertype) {
424         case USB_ENDPOINT_XFER_BULK:
425                 if (is_out)
426                         allowed |= URB_ZERO_PACKET;
427                 /* FALLTHROUGH */
428         default:                        /* all non-iso endpoints */
429                 if (!is_out)
430                         allowed |= URB_SHORT_NOT_OK;
431                 break;
432         case USB_ENDPOINT_XFER_ISOC:
433                 allowed |= URB_ISO_ASAP;
434                 break;
435         }
436         urb->transfer_flags &= allowed;
437 }
438
439 static int stub_recv_xbuff(struct usbip_device *ud, struct stub_priv *priv)
440 {
441         int ret;
442         int i;
443
444         for (i = 0; i < priv->num_urbs; i++) {
445                 ret = usbip_recv_xbuff(ud, priv->urbs[i]);
446                 if (ret < 0)
447                         break;
448         }
449
450         return ret;
451 }
452
453 static void stub_recv_cmd_submit(struct stub_device *sdev,
454                                  struct usbip_header *pdu)
455 {
456         struct stub_priv *priv;
457         struct usbip_device *ud = &sdev->ud;
458         struct usb_device *udev = sdev->udev;
459         struct scatterlist *sgl = NULL, *sg;
460         void *buffer = NULL;
461         unsigned long long buf_len;
462         int nents;
463         int num_urbs = 1;
464         int pipe = get_pipe(sdev, pdu);
465         int use_sg = pdu->u.cmd_submit.transfer_flags & URB_DMA_MAP_SG;
466         int support_sg = 1;
467         int np = 0;
468         int ret, i;
469
470         if (pipe == -1)
471                 return;
472
473         priv = stub_priv_alloc(sdev, pdu);
474         if (!priv)
475                 return;
476
477         buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
478
479         /* allocate urb transfer buffer, if needed */
480         if (buf_len) {
481                 if (use_sg) {
482                         sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
483                         if (!sgl)
484                                 goto err_malloc;
485                 } else {
486                         buffer = kzalloc(buf_len, GFP_KERNEL);
487                         if (!buffer)
488                                 goto err_malloc;
489                 }
490         }
491
492         /* Check if the server's HCD supports SG */
493         if (use_sg && !udev->bus->sg_tablesize) {
494                 /*
495                  * If the server's HCD doesn't support SG, break a single SG
496                  * request into several URBs and map each SG list entry to
497                  * corresponding URB buffer. The previously allocated SG
498                  * list is stored in priv->sgl (If the server's HCD support SG,
499                  * SG list is stored only in urb->sg) and it is used as an
500                  * indicator that the server split single SG request into
501                  * several URBs. Later, priv->sgl is used by stub_complete() and
502                  * stub_send_ret_submit() to reassemble the divied URBs.
503                  */
504                 support_sg = 0;
505                 num_urbs = nents;
506                 priv->completed_urbs = 0;
507                 pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG;
508         }
509
510         /* allocate urb array */
511         priv->num_urbs = num_urbs;
512         priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
513         if (!priv->urbs)
514                 goto err_urbs;
515
516         /* setup a urb */
517         if (support_sg) {
518                 if (usb_pipeisoc(pipe))
519                         np = pdu->u.cmd_submit.number_of_packets;
520
521                 priv->urbs[0] = usb_alloc_urb(np, GFP_KERNEL);
522                 if (!priv->urbs[0])
523                         goto err_urb;
524
525                 if (buf_len) {
526                         if (use_sg) {
527                                 priv->urbs[0]->sg = sgl;
528                                 priv->urbs[0]->num_sgs = nents;
529                                 priv->urbs[0]->transfer_buffer = NULL;
530                         } else {
531                                 priv->urbs[0]->transfer_buffer = buffer;
532                         }
533                 }
534
535                 /* copy urb setup packet */
536                 priv->urbs[0]->setup_packet = kmemdup(&pdu->u.cmd_submit.setup,
537                                         8, GFP_KERNEL);
538                 if (!priv->urbs[0]->setup_packet) {
539                         usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
540                         return;
541                 }
542
543                 usbip_pack_pdu(pdu, priv->urbs[0], USBIP_CMD_SUBMIT, 0);
544         } else {
545                 for_each_sg(sgl, sg, nents, i) {
546                         priv->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
547                         /* The URBs which is previously allocated will be freed
548                          * in stub_device_cleanup_urbs() if error occurs.
549                          */
550                         if (!priv->urbs[i])
551                                 goto err_urb;
552
553                         usbip_pack_pdu(pdu, priv->urbs[i], USBIP_CMD_SUBMIT, 0);
554                         priv->urbs[i]->transfer_buffer = sg_virt(sg);
555                         priv->urbs[i]->transfer_buffer_length = sg->length;
556                 }
557                 priv->sgl = sgl;
558         }
559
560         for (i = 0; i < num_urbs; i++) {
561                 /* set other members from the base header of pdu */
562                 priv->urbs[i]->context = (void *) priv;
563                 priv->urbs[i]->dev = udev;
564                 priv->urbs[i]->pipe = pipe;
565                 priv->urbs[i]->complete = stub_complete;
566
567                 /* no need to submit an intercepted request, but harmless? */
568                 tweak_special_requests(priv->urbs[i]);
569
570                 masking_bogus_flags(priv->urbs[i]);
571         }
572
573         if (stub_recv_xbuff(ud, priv) < 0)
574                 return;
575
576         if (usbip_recv_iso(ud, priv->urbs[0]) < 0)
577                 return;
578
579         /* urb is now ready to submit */
580         for (i = 0; i < priv->num_urbs; i++) {
581                 ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
582
583                 if (ret == 0)
584                         usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
585                                         pdu->base.seqnum);
586                 else {
587                         dev_err(&udev->dev, "submit_urb error, %d\n", ret);
588                         usbip_dump_header(pdu);
589                         usbip_dump_urb(priv->urbs[i]);
590
591                         /*
592                          * Pessimistic.
593                          * This connection will be discarded.
594                          */
595                         usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
596                         break;
597                 }
598         }
599
600         usbip_dbg_stub_rx("Leave\n");
601         return;
602
603 err_urb:
604         kfree(priv->urbs);
605 err_urbs:
606         kfree(buffer);
607         sgl_free(sgl);
608 err_malloc:
609         usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
610 }
611
612 /* recv a pdu */
613 static void stub_rx_pdu(struct usbip_device *ud)
614 {
615         int ret;
616         struct usbip_header pdu;
617         struct stub_device *sdev = container_of(ud, struct stub_device, ud);
618         struct device *dev = &sdev->udev->dev;
619
620         usbip_dbg_stub_rx("Enter\n");
621
622         memset(&pdu, 0, sizeof(pdu));
623
624         /* receive a pdu header */
625         ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
626         if (ret != sizeof(pdu)) {
627                 dev_err(dev, "recv a header, %d\n", ret);
628                 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
629                 return;
630         }
631
632         usbip_header_correct_endian(&pdu, 0);
633
634         if (usbip_dbg_flag_stub_rx)
635                 usbip_dump_header(&pdu);
636
637         if (!valid_request(sdev, &pdu)) {
638                 dev_err(dev, "recv invalid request\n");
639                 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
640                 return;
641         }
642
643         switch (pdu.base.command) {
644         case USBIP_CMD_UNLINK:
645                 stub_recv_cmd_unlink(sdev, &pdu);
646                 break;
647
648         case USBIP_CMD_SUBMIT:
649                 stub_recv_cmd_submit(sdev, &pdu);
650                 break;
651
652         default:
653                 /* NOTREACHED */
654                 dev_err(dev, "unknown pdu\n");
655                 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
656                 break;
657         }
658 }
659
660 int stub_rx_loop(void *data)
661 {
662         struct usbip_device *ud = data;
663
664         while (!kthread_should_stop()) {
665                 if (usbip_event_happened(ud))
666                         break;
667
668                 stub_rx_pdu(ud);
669         }
670
671         return 0;
672 }