brcm2708: add linux 4.19 support
[oweals/openwrt.git] / target / linux / brcm2708 / patches-4.19 / 950-0239-staging-bcm2835-camera-Do-not-bulk-receive-from-serv.patch
1 From 7cd2d38371edd4a04401c02c098a0e436816f3af Mon Sep 17 00:00:00 2001
2 From: Dave Stevenson <dave.stevenson@raspberrypi.org>
3 Date: Wed, 14 Feb 2018 17:04:26 +0000
4 Subject: [PATCH 239/703] staging: bcm2835-camera: Do not bulk receive from
5  service thread
6
7 vchi_bulk_queue_receive will queue up to a default of 4
8 bulk receives on a connection before blocking.
9 If called from the VCHI service_callback thread, then
10 that thread is unable to service the VCHI_CALLBACK_BULK_RECEIVED
11 events that would enable the queue call to succeed.
12
13 Add a workqueue to schedule the call vchi_bulk_queue_receive
14 in an alternate context to avoid the lock up.
15
16 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
17 ---
18  .../vc04_services/bcm2835-camera/mmal-vchiq.c | 101 ++++++++++--------
19  1 file changed, 59 insertions(+), 42 deletions(-)
20
21 --- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
22 +++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
23 @@ -118,8 +118,10 @@ struct mmal_msg_context {
24  
25         union {
26                 struct {
27 -                       /* work struct for defered callback - must come first */
28 +                       /* work struct for buffer_cb callback */
29                         struct work_struct work;
30 +                       /* work struct for deferred callback */
31 +                       struct work_struct buffer_to_host_work;
32                         /* mmal instance */
33                         struct vchiq_mmal_instance *instance;
34                         /* mmal port */
35 @@ -167,6 +169,9 @@ struct vchiq_mmal_instance {
36         /* component to use next */
37         int component_idx;
38         struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
39 +
40 +       /* ordered workqueue to process all bulk operations */
41 +       struct workqueue_struct *bulk_wq;
42  };
43  
44  static struct mmal_msg_context *
45 @@ -248,7 +253,44 @@ static void buffer_work_cb(struct work_s
46                                             msg_context->u.bulk.mmal_flags,
47                                             msg_context->u.bulk.dts,
48                                             msg_context->u.bulk.pts);
49 +}
50  
51 +/* workqueue scheduled callback to handle receiving buffers
52 + *
53 + * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
54 + * If we block in the service_callback context then we can't process the
55 + * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
56 + * vchi_bulk_queue_receive() call to complete.
57 + */
58 +static void buffer_to_host_work_cb(struct work_struct *work)
59 +{
60 +       struct mmal_msg_context *msg_context =
61 +               container_of(work, struct mmal_msg_context,
62 +                            u.bulk.buffer_to_host_work);
63 +       struct vchiq_mmal_instance *instance = msg_context->instance;
64 +       unsigned long len = msg_context->u.bulk.buffer_used;
65 +       int ret;
66 +
67 +       if (!len)
68 +               /* Dummy receive to ensure the buffers remain in order */
69 +               len = 8;
70 +       /* queue the bulk submission */
71 +       vchi_service_use(instance->handle);
72 +       ret = vchi_bulk_queue_receive(instance->handle,
73 +                                     msg_context->u.bulk.buffer->buffer,
74 +                                     /* Actual receive needs to be a multiple
75 +                                      * of 4 bytes
76 +                                      */
77 +                                     (len + 3) & ~3,
78 +                                     VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
79 +                                     VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
80 +                                     msg_context);
81 +
82 +       vchi_service_release(instance->handle);
83 +
84 +       if (ret != 0)
85 +               pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
86 +                      __func__, msg_context, ret);
87  }
88  
89  /* enqueue a bulk receive for a given message context */
90 @@ -257,7 +299,6 @@ static int bulk_receive(struct vchiq_mma
91                         struct mmal_msg_context *msg_context)
92  {
93         unsigned long rd_len;
94 -       int ret;
95  
96         rd_len = msg->u.buffer_from_host.buffer_header.length;
97  
98 @@ -293,45 +334,10 @@ static int bulk_receive(struct vchiq_mma
99         msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
100         msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
101  
102 -       /* queue the bulk submission */
103 -       vchi_service_use(instance->handle);
104 -       ret = vchi_bulk_queue_receive(instance->handle,
105 -                                     msg_context->u.bulk.buffer->buffer,
106 -                                     /* Actual receive needs to be a multiple
107 -                                      * of 4 bytes
108 -                                      */
109 -                                     (rd_len + 3) & ~3,
110 -                                     VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
111 -                                     VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
112 -                                     msg_context);
113 -
114 -       vchi_service_release(instance->handle);
115 +       queue_work(msg_context->instance->bulk_wq,
116 +                  &msg_context->u.bulk.buffer_to_host_work);
117  
118 -       return ret;
119 -}
120 -
121 -/* enque a dummy bulk receive for a given message context */
122 -static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
123 -                             struct mmal_msg_context *msg_context)
124 -{
125 -       int ret;
126 -
127 -       /* zero length indicates this was a dummy transfer */
128 -       msg_context->u.bulk.buffer_used = 0;
129 -
130 -       /* queue the bulk submission */
131 -       vchi_service_use(instance->handle);
132 -
133 -       ret = vchi_bulk_queue_receive(instance->handle,
134 -                                     instance->bulk_scratch,
135 -                                     8,
136 -                                     VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
137 -                                     VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
138 -                                     msg_context);
139 -
140 -       vchi_service_release(instance->handle);
141 -
142 -       return ret;
143 +       return 0;
144  }
145  
146  /* data in message, memcpy from packet into output buffer */
147 @@ -379,6 +385,8 @@ buffer_from_host(struct vchiq_mmal_insta
148  
149         /* initialise work structure ready to schedule callback */
150         INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
151 +       INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
152 +                 buffer_to_host_work_cb);
153  
154         /* prep the buffer from host message */
155         memset(&m, 0xbc, sizeof(m));    /* just to make debug clearer */
156 @@ -459,7 +467,7 @@ static void buffer_to_host_cb(struct vch
157                 if (msg->u.buffer_from_host.buffer_header.flags &
158                     MMAL_BUFFER_HEADER_FLAG_EOS) {
159                         msg_context->u.bulk.status =
160 -                           dummy_bulk_receive(instance, msg_context);
161 +                           bulk_receive(instance, msg, msg_context);
162                         if (msg_context->u.bulk.status == 0)
163                                 return; /* successful bulk submission, bulk
164                                          * completion will trigger callback
165 @@ -1793,6 +1801,9 @@ int vchiq_mmal_finalise(struct vchiq_mma
166  
167         mutex_unlock(&instance->vchiq_mutex);
168  
169 +       flush_workqueue(instance->bulk_wq);
170 +       destroy_workqueue(instance->bulk_wq);
171 +
172         vfree(instance->bulk_scratch);
173  
174         idr_destroy(&instance->context_map);
175 @@ -1862,6 +1873,11 @@ int vchiq_mmal_init(struct vchiq_mmal_in
176  
177         params.callback_param = instance;
178  
179 +       instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
180 +                                                   WQ_MEM_RECLAIM);
181 +       if (!instance->bulk_wq)
182 +               goto err_free;
183 +
184         status = vchi_service_open(vchi_instance, &params, &instance->handle);
185         if (status) {
186                 pr_err("Failed to open VCHI service connection (status=%d)\n",
187 @@ -1876,8 +1892,9 @@ int vchiq_mmal_init(struct vchiq_mmal_in
188         return 0;
189  
190  err_close_services:
191 -
192         vchi_service_close(instance->handle);
193 +       destroy_workqueue(instance->bulk_wq);
194 +err_free:
195         vfree(instance->bulk_scratch);
196         kfree(instance);
197         return -ENODEV;