1 From 522f1499310d389e663a4e8dd0ccbb916b768766 Mon Sep 17 00:00:00 2001
2 From: Dave Stevenson <dave.stevenson@raspberrypi.org>
3 Date: Wed, 14 Feb 2018 17:04:26 +0000
4 Subject: [PATCH] staging: bcm2835-camera: Do not bulk receive from
7 vchi_bulk_queue_receive will queue up to a default of 4
8 bulk receives on a connection before blocking.
9 If called from the VCHI service_callback thread, then
10 that thread is unable to service the VCHI_CALLBACK_BULK_RECEIVED
11 events that would enable the queue call to succeed.
13 Add a workqueue to schedule the call vchi_bulk_queue_receive
14 in an alternate context to avoid the lock up.
16 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
18 .../vc04_services/bcm2835-camera/mmal-vchiq.c | 101 ++++++++++--------
19 1 file changed, 59 insertions(+), 42 deletions(-)
21 --- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
22 +++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
23 @@ -118,8 +118,10 @@ struct mmal_msg_context {
27 - /* work struct for defered callback - must come first */
28 + /* work struct for buffer_cb callback */
29 struct work_struct work;
30 + /* work struct for deferred callback */
31 + struct work_struct buffer_to_host_work;
33 struct vchiq_mmal_instance *instance;
35 @@ -168,6 +170,9 @@ struct vchiq_mmal_instance {
36 /* component to use next */
38 struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
40 + /* ordered workqueue to process all bulk operations */
41 + struct workqueue_struct *bulk_wq;
44 static struct mmal_msg_context *
45 @@ -251,7 +256,44 @@ static void buffer_work_cb(struct work_s
46 msg_context->u.bulk.mmal_flags,
47 msg_context->u.bulk.dts,
48 msg_context->u.bulk.pts);
51 +/* workqueue scheduled callback to handle receiving buffers
53 + * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
54 + * If we block in the service_callback context then we can't process the
55 + * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
56 + * vchi_bulk_queue_receive() call to complete.
58 +static void buffer_to_host_work_cb(struct work_struct *work)
60 + struct mmal_msg_context *msg_context =
61 + container_of(work, struct mmal_msg_context,
62 + u.bulk.buffer_to_host_work);
63 + struct vchiq_mmal_instance *instance = msg_context->instance;
64 + unsigned long len = msg_context->u.bulk.buffer_used;
68 + /* Dummy receive to ensure the buffers remain in order */
70 + /* queue the bulk submission */
71 + vchi_service_use(instance->handle);
72 + ret = vchi_bulk_queue_receive(instance->handle,
73 + msg_context->u.bulk.buffer->buffer,
74 + /* Actual receive needs to be a multiple
78 + VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
79 + VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
82 + vchi_service_release(instance->handle);
85 + pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
86 + __func__, msg_context, ret);
89 /* enqueue a bulk receive for a given message context */
90 @@ -260,7 +302,6 @@ static int bulk_receive(struct vchiq_mma
91 struct mmal_msg_context *msg_context)
96 rd_len = msg->u.buffer_from_host.buffer_header.length;
98 @@ -294,45 +335,10 @@ static int bulk_receive(struct vchiq_mma
99 msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
100 msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
102 - /* queue the bulk submission */
103 - vchi_service_use(instance->handle);
104 - ret = vchi_bulk_queue_receive(instance->handle,
105 - msg_context->u.bulk.buffer->buffer,
106 - /* Actual receive needs to be a multiple
110 - VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
111 - VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
114 - vchi_service_release(instance->handle);
115 + queue_work(msg_context->instance->bulk_wq,
116 + &msg_context->u.bulk.buffer_to_host_work);
121 -/* enque a dummy bulk receive for a given message context */
122 -static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
123 - struct mmal_msg_context *msg_context)
127 - /* zero length indicates this was a dummy transfer */
128 - msg_context->u.bulk.buffer_used = 0;
130 - /* queue the bulk submission */
131 - vchi_service_use(instance->handle);
133 - ret = vchi_bulk_queue_receive(instance->handle,
134 - instance->bulk_scratch,
136 - VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
137 - VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
140 - vchi_service_release(instance->handle);
146 /* data in message, memcpy from packet into output buffer */
147 @@ -380,6 +386,8 @@ buffer_from_host(struct vchiq_mmal_insta
149 /* initialise work structure ready to schedule callback */
150 INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
151 + INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
152 + buffer_to_host_work_cb);
154 atomic_inc(&port->buffers_with_vpu);
156 @@ -465,7 +473,7 @@ static void buffer_to_host_cb(struct vch
157 if (msg->u.buffer_from_host.buffer_header.flags &
158 MMAL_BUFFER_HEADER_FLAG_EOS) {
159 msg_context->u.bulk.status =
160 - dummy_bulk_receive(instance, msg_context);
161 + bulk_receive(instance, msg, msg_context);
162 if (msg_context->u.bulk.status == 0)
163 return; /* successful bulk submission, bulk
164 * completion will trigger callback
165 @@ -1789,6 +1797,9 @@ int vchiq_mmal_finalise(struct vchiq_mma
167 mutex_unlock(&instance->vchiq_mutex);
169 + flush_workqueue(instance->bulk_wq);
170 + destroy_workqueue(instance->bulk_wq);
172 vfree(instance->bulk_scratch);
174 idr_destroy(&instance->context_map);
175 @@ -1858,6 +1869,11 @@ int vchiq_mmal_init(struct vchiq_mmal_in
177 params.callback_param = instance;
179 + instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
181 + if (!instance->bulk_wq)
184 status = vchi_service_open(vchi_instance, ¶ms, &instance->handle);
186 pr_err("Failed to open VCHI service connection (status=%d)\n",
187 @@ -1872,8 +1888,9 @@ int vchiq_mmal_init(struct vchiq_mmal_in
192 vchi_service_close(instance->handle);
193 + destroy_workqueue(instance->bulk_wq);
195 vfree(instance->bulk_scratch);