Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / staging / vc04_services / bcm2835-camera / mmal-vchiq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Broadcom BM2835 V4L2 driver
4  *
5  * Copyright © 2013 Raspberry Pi (Trading) Ltd.
6  *
7  * Authors: Vincent Sanders @ Collabora
8  *          Dave Stevenson @ Broadcom
9  *              (now dave.stevenson@raspberrypi.org)
10  *          Simon Mellor @ Broadcom
11  *          Luke Diamand @ Broadcom
12  *
13  * V4L2 driver MMAL vchiq interface code
14  */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/mutex.h>
21 #include <linux/mm.h>
22 #include <linux/slab.h>
23 #include <linux/completion.h>
24 #include <linux/vmalloc.h>
25 #include <media/videobuf2-vmalloc.h>
26
27 #include "mmal-common.h"
28 #include "mmal-vchiq.h"
29 #include "mmal-msg.h"
30
31 #define USE_VCHIQ_ARM
32 #include "interface/vchi/vchi.h"
33
34 /* maximum number of components supported */
35 #define VCHIQ_MMAL_MAX_COMPONENTS 4
36
37 /*#define FULL_MSG_DUMP 1*/
38
39 #ifdef DEBUG
40 static const char *const msg_type_names[] = {
41         "UNKNOWN",
42         "QUIT",
43         "SERVICE_CLOSED",
44         "GET_VERSION",
45         "COMPONENT_CREATE",
46         "COMPONENT_DESTROY",
47         "COMPONENT_ENABLE",
48         "COMPONENT_DISABLE",
49         "PORT_INFO_GET",
50         "PORT_INFO_SET",
51         "PORT_ACTION",
52         "BUFFER_FROM_HOST",
53         "BUFFER_TO_HOST",
54         "GET_STATS",
55         "PORT_PARAMETER_SET",
56         "PORT_PARAMETER_GET",
57         "EVENT_TO_HOST",
58         "GET_CORE_STATS_FOR_PORT",
59         "OPAQUE_ALLOCATOR",
60         "CONSUME_MEM",
61         "LMK",
62         "OPAQUE_ALLOCATOR_DESC",
63         "DRM_GET_LHS32",
64         "DRM_GET_TIME",
65         "BUFFER_FROM_HOST_ZEROLEN",
66         "PORT_FLUSH",
67         "HOST_LOG",
68 };
69 #endif
70
71 static const char *const port_action_type_names[] = {
72         "UNKNOWN",
73         "ENABLE",
74         "DISABLE",
75         "FLUSH",
76         "CONNECT",
77         "DISCONNECT",
78         "SET_REQUIREMENTS",
79 };
80
81 #if defined(DEBUG)
82 #if defined(FULL_MSG_DUMP)
83 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)                               \
84         do {                                                            \
85                 pr_debug(TITLE" type:%s(%d) length:%d\n",               \
86                          msg_type_names[(MSG)->h.type],                 \
87                          (MSG)->h.type, (MSG_LEN));                     \
88                 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
89                                16, 4, (MSG),                            \
90                                sizeof(struct mmal_msg_header), 1);      \
91                 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
92                                16, 4,                                   \
93                                ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
94                                (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
95         } while (0)
96 #else
97 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)                               \
98         {                                                               \
99                 pr_debug(TITLE" type:%s(%d) length:%d\n",               \
100                          msg_type_names[(MSG)->h.type],                 \
101                          (MSG)->h.type, (MSG_LEN));                     \
102         }
103 #endif
104 #else
105 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
106 #endif
107
108 struct vchiq_mmal_instance;
109
110 /* normal message context */
111 struct mmal_msg_context {
112         struct vchiq_mmal_instance *instance;
113
114         /* Index in the context_map idr so that we can find the
115          * mmal_msg_context again when servicing the VCHI reply.
116          */
117         int handle;
118
119         union {
120                 struct {
121                         /* work struct for buffer_cb callback */
122                         struct work_struct work;
123                         /* work struct for deferred callback */
124                         struct work_struct buffer_to_host_work;
125                         /* mmal instance */
126                         struct vchiq_mmal_instance *instance;
127                         /* mmal port */
128                         struct vchiq_mmal_port *port;
129                         /* actual buffer used to store bulk reply */
130                         struct mmal_buffer *buffer;
131                         /* amount of buffer used */
132                         unsigned long buffer_used;
133                         /* MMAL buffer flags */
134                         u32 mmal_flags;
135                         /* Presentation and Decode timestamps */
136                         s64 pts;
137                         s64 dts;
138
139                         int status;     /* context status */
140
141                 } bulk;         /* bulk data */
142
143                 struct {
144                         /* message handle to release */
145                         struct vchi_held_msg msg_handle;
146                         /* pointer to received message */
147                         struct mmal_msg *msg;
148                         /* received message length */
149                         u32 msg_len;
150                         /* completion upon reply */
151                         struct completion cmplt;
152                 } sync;         /* synchronous response */
153         } u;
154
155 };
156
157 struct vchiq_mmal_instance {
158         VCHI_SERVICE_HANDLE_T handle;
159
160         /* ensure serialised access to service */
161         struct mutex vchiq_mutex;
162
163         /* vmalloc page to receive scratch bulk xfers into */
164         void *bulk_scratch;
165
166         struct idr context_map;
167         /* protect accesses to context_map */
168         struct mutex context_map_lock;
169
170         /* component to use next */
171         int component_idx;
172         struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
173
174         /* ordered workqueue to process all bulk operations */
175         struct workqueue_struct *bulk_wq;
176 };
177
178 static struct mmal_msg_context *
179 get_msg_context(struct vchiq_mmal_instance *instance)
180 {
181         struct mmal_msg_context *msg_context;
182         int handle;
183
184         /* todo: should this be allocated from a pool to avoid kzalloc */
185         msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
186
187         if (!msg_context)
188                 return ERR_PTR(-ENOMEM);
189
190         /* Create an ID that will be passed along with our message so
191          * that when we service the VCHI reply, we can look up what
192          * message is being replied to.
193          */
194         mutex_lock(&instance->context_map_lock);
195         handle = idr_alloc(&instance->context_map, msg_context,
196                            0, 0, GFP_KERNEL);
197         mutex_unlock(&instance->context_map_lock);
198
199         if (handle < 0) {
200                 kfree(msg_context);
201                 return ERR_PTR(handle);
202         }
203
204         msg_context->instance = instance;
205         msg_context->handle = handle;
206
207         return msg_context;
208 }
209
210 static struct mmal_msg_context *
211 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
212 {
213         return idr_find(&instance->context_map, handle);
214 }
215
216 static void
217 release_msg_context(struct mmal_msg_context *msg_context)
218 {
219         struct vchiq_mmal_instance *instance = msg_context->instance;
220
221         mutex_lock(&instance->context_map_lock);
222         idr_remove(&instance->context_map, msg_context->handle);
223         mutex_unlock(&instance->context_map_lock);
224         kfree(msg_context);
225 }
226
227 /* deals with receipt of event to host message */
228 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
229                              struct mmal_msg *msg, u32 msg_len)
230 {
231         pr_debug("unhandled event\n");
232         pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
233                  msg->u.event_to_host.client_component,
234                  msg->u.event_to_host.port_type,
235                  msg->u.event_to_host.port_num,
236                  msg->u.event_to_host.cmd, msg->u.event_to_host.length);
237 }
238
239 /* workqueue scheduled callback
240  *
241  * we do this because it is important we do not call any other vchiq
242  * sync calls from witin the message delivery thread
243  */
244 static void buffer_work_cb(struct work_struct *work)
245 {
246         struct mmal_msg_context *msg_context =
247                 container_of(work, struct mmal_msg_context, u.bulk.work);
248
249         atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
250
251         msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
252                                             msg_context->u.bulk.port,
253                                             msg_context->u.bulk.status,
254                                             msg_context->u.bulk.buffer,
255                                             msg_context->u.bulk.buffer_used,
256                                             msg_context->u.bulk.mmal_flags,
257                                             msg_context->u.bulk.dts,
258                                             msg_context->u.bulk.pts);
259 }
260
261 /* workqueue scheduled callback to handle receiving buffers
262  *
263  * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
264  * If we block in the service_callback context then we can't process the
265  * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
266  * vchi_bulk_queue_receive() call to complete.
267  */
268 static void buffer_to_host_work_cb(struct work_struct *work)
269 {
270         struct mmal_msg_context *msg_context =
271                 container_of(work, struct mmal_msg_context,
272                              u.bulk.buffer_to_host_work);
273         struct vchiq_mmal_instance *instance = msg_context->instance;
274         unsigned long len = msg_context->u.bulk.buffer_used;
275         int ret;
276
277         if (!len)
278                 /* Dummy receive to ensure the buffers remain in order */
279                 len = 8;
280         /* queue the bulk submission */
281         vchi_service_use(instance->handle);
282         ret = vchi_bulk_queue_receive(instance->handle,
283                                       msg_context->u.bulk.buffer->buffer,
284                                       /* Actual receive needs to be a multiple
285                                        * of 4 bytes
286                                        */
287                                       (len + 3) & ~3,
288                                       VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
289                                       VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
290                                       msg_context);
291
292         vchi_service_release(instance->handle);
293
294         if (ret != 0)
295                 pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
296                        __func__, msg_context, ret);
297 }
298
299 /* enqueue a bulk receive for a given message context */
300 static int bulk_receive(struct vchiq_mmal_instance *instance,
301                         struct mmal_msg *msg,
302                         struct mmal_msg_context *msg_context)
303 {
304         unsigned long rd_len;
305
306         rd_len = msg->u.buffer_from_host.buffer_header.length;
307
308         if (!msg_context->u.bulk.buffer) {
309                 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
310
311                 /* todo: this is a serious error, we should never have
312                  * committed a buffer_to_host operation to the mmal
313                  * port without the buffer to back it up (underflow
314                  * handling) and there is no obvious way to deal with
315                  * this - how is the mmal servie going to react when
316                  * we fail to do the xfer and reschedule a buffer when
317                  * it arrives? perhaps a starved flag to indicate a
318                  * waiting bulk receive?
319                  */
320
321                 return -EINVAL;
322         }
323
324         /* ensure we do not overrun the available buffer */
325         if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
326                 rd_len = msg_context->u.bulk.buffer->buffer_size;
327                 pr_warn("short read as not enough receive buffer space\n");
328                 /* todo: is this the correct response, what happens to
329                  * the rest of the message data?
330                  */
331         }
332
333         /* store length */
334         msg_context->u.bulk.buffer_used = rd_len;
335         msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
336         msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
337
338         queue_work(msg_context->instance->bulk_wq,
339                    &msg_context->u.bulk.buffer_to_host_work);
340
341         return 0;
342 }
343
344 /* data in message, memcpy from packet into output buffer */
345 static int inline_receive(struct vchiq_mmal_instance *instance,
346                           struct mmal_msg *msg,
347                           struct mmal_msg_context *msg_context)
348 {
349         memcpy(msg_context->u.bulk.buffer->buffer,
350                msg->u.buffer_from_host.short_data,
351                msg->u.buffer_from_host.payload_in_message);
352
353         msg_context->u.bulk.buffer_used =
354             msg->u.buffer_from_host.payload_in_message;
355
356         return 0;
357 }
358
359 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
360 static int
361 buffer_from_host(struct vchiq_mmal_instance *instance,
362                  struct vchiq_mmal_port *port, struct mmal_buffer *buf)
363 {
364         struct mmal_msg_context *msg_context;
365         struct mmal_msg m;
366         int ret;
367
368         if (!port->enabled)
369                 return -EINVAL;
370
371         pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
372
373         /* get context */
374         if (!buf->msg_context) {
375                 pr_err("%s: msg_context not allocated, buf %p\n", __func__,
376                        buf);
377                 return -EINVAL;
378         }
379         msg_context = buf->msg_context;
380
381         /* store bulk message context for when data arrives */
382         msg_context->u.bulk.instance = instance;
383         msg_context->u.bulk.port = port;
384         msg_context->u.bulk.buffer = buf;
385         msg_context->u.bulk.buffer_used = 0;
386
387         /* initialise work structure ready to schedule callback */
388         INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
389         INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
390                   buffer_to_host_work_cb);
391
392         atomic_inc(&port->buffers_with_vpu);
393
394         /* prep the buffer from host message */
395         memset(&m, 0xbc, sizeof(m));    /* just to make debug clearer */
396
397         m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
398         m.h.magic = MMAL_MAGIC;
399         m.h.context = msg_context->handle;
400         m.h.status = 0;
401
402         /* drvbuf is our private data passed back */
403         m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
404         m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
405         m.u.buffer_from_host.drvbuf.port_handle = port->handle;
406         m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
407
408         /* buffer header */
409         m.u.buffer_from_host.buffer_header.cmd = 0;
410         m.u.buffer_from_host.buffer_header.data =
411                 (u32)(unsigned long)buf->buffer;
412         m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
413         m.u.buffer_from_host.buffer_header.length = 0;  /* nothing used yet */
414         m.u.buffer_from_host.buffer_header.offset = 0;  /* no offset */
415         m.u.buffer_from_host.buffer_header.flags = 0;   /* no flags */
416         m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
417         m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
418
419         /* clear buffer type sepecific data */
420         memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
421                sizeof(m.u.buffer_from_host.buffer_header_type_specific));
422
423         /* no payload in message */
424         m.u.buffer_from_host.payload_in_message = 0;
425
426         vchi_service_use(instance->handle);
427
428         ret = vchi_queue_kernel_message(instance->handle,
429                                         &m,
430                                         sizeof(struct mmal_msg_header) +
431                                         sizeof(m.u.buffer_from_host));
432
433         vchi_service_release(instance->handle);
434
435         return ret;
436 }
437
438 /* deals with receipt of buffer to host message */
439 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
440                               struct mmal_msg *msg, u32 msg_len)
441 {
442         struct mmal_msg_context *msg_context;
443         u32 handle;
444
445         pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
446                  __func__, instance, msg, msg_len);
447
448         if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
449                 handle = msg->u.buffer_from_host.drvbuf.client_context;
450                 msg_context = lookup_msg_context(instance, handle);
451
452                 if (!msg_context) {
453                         pr_err("drvbuf.client_context(%u) is invalid\n",
454                                handle);
455                         return;
456                 }
457         } else {
458                 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
459                 return;
460         }
461
462         msg_context->u.bulk.mmal_flags =
463                                 msg->u.buffer_from_host.buffer_header.flags;
464
465         if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
466                 /* message reception had an error */
467                 pr_warn("error %d in reply\n", msg->h.status);
468
469                 msg_context->u.bulk.status = msg->h.status;
470
471         } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
472                 /* empty buffer */
473                 if (msg->u.buffer_from_host.buffer_header.flags &
474                     MMAL_BUFFER_HEADER_FLAG_EOS) {
475                         msg_context->u.bulk.status =
476                             bulk_receive(instance, msg, msg_context);
477                         if (msg_context->u.bulk.status == 0)
478                                 return; /* successful bulk submission, bulk
479                                          * completion will trigger callback
480                                          */
481                 } else {
482                         /* do callback with empty buffer - not EOS though */
483                         msg_context->u.bulk.status = 0;
484                         msg_context->u.bulk.buffer_used = 0;
485                 }
486         } else if (msg->u.buffer_from_host.payload_in_message == 0) {
487                 /* data is not in message, queue a bulk receive */
488                 msg_context->u.bulk.status =
489                     bulk_receive(instance, msg, msg_context);
490                 if (msg_context->u.bulk.status == 0)
491                         return; /* successful bulk submission, bulk
492                                  * completion will trigger callback
493                                  */
494
495                 /* failed to submit buffer, this will end badly */
496                 pr_err("error %d on bulk submission\n",
497                        msg_context->u.bulk.status);
498
499         } else if (msg->u.buffer_from_host.payload_in_message <=
500                    MMAL_VC_SHORT_DATA) {
501                 /* data payload within message */
502                 msg_context->u.bulk.status = inline_receive(instance, msg,
503                                                             msg_context);
504         } else {
505                 pr_err("message with invalid short payload\n");
506
507                 /* signal error */
508                 msg_context->u.bulk.status = -EINVAL;
509                 msg_context->u.bulk.buffer_used =
510                     msg->u.buffer_from_host.payload_in_message;
511         }
512
513         /* schedule the port callback */
514         schedule_work(&msg_context->u.bulk.work);
515 }
516
517 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
518                             struct mmal_msg_context *msg_context)
519 {
520         msg_context->u.bulk.status = 0;
521
522         /* schedule the port callback */
523         schedule_work(&msg_context->u.bulk.work);
524 }
525
526 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
527                           struct mmal_msg_context *msg_context)
528 {
529         pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
530
531         msg_context->u.bulk.status = -EINTR;
532
533         schedule_work(&msg_context->u.bulk.work);
534 }
535
536 /* incoming event service callback */
537 static void service_callback(void *param,
538                              const VCHI_CALLBACK_REASON_T reason,
539                              void *bulk_ctx)
540 {
541         struct vchiq_mmal_instance *instance = param;
542         int status;
543         u32 msg_len;
544         struct mmal_msg *msg;
545         struct vchi_held_msg msg_handle;
546         struct mmal_msg_context *msg_context;
547
548         if (!instance) {
549                 pr_err("Message callback passed NULL instance\n");
550                 return;
551         }
552
553         switch (reason) {
554         case VCHI_CALLBACK_MSG_AVAILABLE:
555                 status = vchi_msg_hold(instance->handle, (void **)&msg,
556                                        &msg_len, VCHI_FLAGS_NONE, &msg_handle);
557                 if (status) {
558                         pr_err("Unable to dequeue a message (%d)\n", status);
559                         break;
560                 }
561
562                 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
563
564                 /* handling is different for buffer messages */
565                 switch (msg->h.type) {
566                 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
567                         vchi_held_msg_release(&msg_handle);
568                         break;
569
570                 case MMAL_MSG_TYPE_EVENT_TO_HOST:
571                         event_to_host_cb(instance, msg, msg_len);
572                         vchi_held_msg_release(&msg_handle);
573
574                         break;
575
576                 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
577                         buffer_to_host_cb(instance, msg, msg_len);
578                         vchi_held_msg_release(&msg_handle);
579                         break;
580
581                 default:
582                         /* messages dependent on header context to complete */
583                         if (!msg->h.context) {
584                                 pr_err("received message context was null!\n");
585                                 vchi_held_msg_release(&msg_handle);
586                                 break;
587                         }
588
589                         msg_context = lookup_msg_context(instance,
590                                                          msg->h.context);
591                         if (!msg_context) {
592                                 pr_err("received invalid message context %u!\n",
593                                        msg->h.context);
594                                 vchi_held_msg_release(&msg_handle);
595                                 break;
596                         }
597
598                         /* fill in context values */
599                         msg_context->u.sync.msg_handle = msg_handle;
600                         msg_context->u.sync.msg = msg;
601                         msg_context->u.sync.msg_len = msg_len;
602
603                         /* todo: should this check (completion_done()
604                          * == 1) for no one waiting? or do we need a
605                          * flag to tell us the completion has been
606                          * interrupted so we can free the message and
607                          * its context. This probably also solves the
608                          * message arriving after interruption todo
609                          * below
610                          */
611
612                         /* complete message so caller knows it happened */
613                         complete(&msg_context->u.sync.cmplt);
614                         break;
615                 }
616
617                 break;
618
619         case VCHI_CALLBACK_BULK_RECEIVED:
620                 bulk_receive_cb(instance, bulk_ctx);
621                 break;
622
623         case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
624                 bulk_abort_cb(instance, bulk_ctx);
625                 break;
626
627         case VCHI_CALLBACK_SERVICE_CLOSED:
628                 /* TODO: consider if this requires action if received when
629                  * driver is not explicitly closing the service
630                  */
631                 break;
632
633         default:
634                 pr_err("Received unhandled message reason %d\n", reason);
635                 break;
636         }
637 }
638
639 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
640                                      struct mmal_msg *msg,
641                                      unsigned int payload_len,
642                                      struct mmal_msg **msg_out,
643                                      struct vchi_held_msg *msg_handle_out)
644 {
645         struct mmal_msg_context *msg_context;
646         int ret;
647         unsigned long timeout;
648
649         /* payload size must not cause message to exceed max size */
650         if (payload_len >
651             (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
652                 pr_err("payload length %d exceeds max:%d\n", payload_len,
653                        (int)(MMAL_MSG_MAX_SIZE -
654                             sizeof(struct mmal_msg_header)));
655                 return -EINVAL;
656         }
657
658         msg_context = get_msg_context(instance);
659         if (IS_ERR(msg_context))
660                 return PTR_ERR(msg_context);
661
662         init_completion(&msg_context->u.sync.cmplt);
663
664         msg->h.magic = MMAL_MAGIC;
665         msg->h.context = msg_context->handle;
666         msg->h.status = 0;
667
668         DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
669                      ">>> sync message");
670
671         vchi_service_use(instance->handle);
672
673         ret = vchi_queue_kernel_message(instance->handle,
674                                         msg,
675                                         sizeof(struct mmal_msg_header) +
676                                         payload_len);
677
678         vchi_service_release(instance->handle);
679
680         if (ret) {
681                 pr_err("error %d queuing message\n", ret);
682                 release_msg_context(msg_context);
683                 return ret;
684         }
685
686         timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
687                                               3 * HZ);
688         if (timeout == 0) {
689                 pr_err("timed out waiting for sync completion\n");
690                 ret = -ETIME;
691                 /* todo: what happens if the message arrives after aborting */
692                 release_msg_context(msg_context);
693                 return ret;
694         }
695
696         *msg_out = msg_context->u.sync.msg;
697         *msg_handle_out = msg_context->u.sync.msg_handle;
698         release_msg_context(msg_context);
699
700         return 0;
701 }
702
703 static void dump_port_info(struct vchiq_mmal_port *port)
704 {
705         pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
706
707         pr_debug("buffer minimum num:%d size:%d align:%d\n",
708                  port->minimum_buffer.num,
709                  port->minimum_buffer.size, port->minimum_buffer.alignment);
710
711         pr_debug("buffer recommended num:%d size:%d align:%d\n",
712                  port->recommended_buffer.num,
713                  port->recommended_buffer.size,
714                  port->recommended_buffer.alignment);
715
716         pr_debug("buffer current values num:%d size:%d align:%d\n",
717                  port->current_buffer.num,
718                  port->current_buffer.size, port->current_buffer.alignment);
719
720         pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
721                  port->format.type,
722                  port->format.encoding, port->format.encoding_variant);
723
724         pr_debug("                  bitrate:%d flags:0x%x\n",
725                  port->format.bitrate, port->format.flags);
726
727         if (port->format.type == MMAL_ES_TYPE_VIDEO) {
728                 pr_debug
729                     ("es video format: width:%d height:%d colourspace:0x%x\n",
730                      port->es.video.width, port->es.video.height,
731                      port->es.video.color_space);
732
733                 pr_debug("               : crop xywh %d,%d,%d,%d\n",
734                          port->es.video.crop.x,
735                          port->es.video.crop.y,
736                          port->es.video.crop.width, port->es.video.crop.height);
737                 pr_debug("               : framerate %d/%d  aspect %d/%d\n",
738                          port->es.video.frame_rate.num,
739                          port->es.video.frame_rate.den,
740                          port->es.video.par.num, port->es.video.par.den);
741         }
742 }
743
744 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
745 {
746         /* todo do readonly fields need setting at all? */
747         p->type = port->type;
748         p->index = port->index;
749         p->index_all = 0;
750         p->is_enabled = port->enabled;
751         p->buffer_num_min = port->minimum_buffer.num;
752         p->buffer_size_min = port->minimum_buffer.size;
753         p->buffer_alignment_min = port->minimum_buffer.alignment;
754         p->buffer_num_recommended = port->recommended_buffer.num;
755         p->buffer_size_recommended = port->recommended_buffer.size;
756
757         /* only three writable fields in a port */
758         p->buffer_num = port->current_buffer.num;
759         p->buffer_size = port->current_buffer.size;
760         p->userdata = (u32)(unsigned long)port;
761 }
762
763 static int port_info_set(struct vchiq_mmal_instance *instance,
764                          struct vchiq_mmal_port *port)
765 {
766         int ret;
767         struct mmal_msg m;
768         struct mmal_msg *rmsg;
769         struct vchi_held_msg rmsg_handle;
770
771         pr_debug("setting port info port %p\n", port);
772         if (!port)
773                 return -1;
774         dump_port_info(port);
775
776         m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
777
778         m.u.port_info_set.component_handle = port->component->handle;
779         m.u.port_info_set.port_type = port->type;
780         m.u.port_info_set.port_index = port->index;
781
782         port_to_mmal_msg(port, &m.u.port_info_set.port);
783
784         /* elementary stream format setup */
785         m.u.port_info_set.format.type = port->format.type;
786         m.u.port_info_set.format.encoding = port->format.encoding;
787         m.u.port_info_set.format.encoding_variant =
788             port->format.encoding_variant;
789         m.u.port_info_set.format.bitrate = port->format.bitrate;
790         m.u.port_info_set.format.flags = port->format.flags;
791
792         memcpy(&m.u.port_info_set.es, &port->es,
793                sizeof(union mmal_es_specific_format));
794
795         m.u.port_info_set.format.extradata_size = port->format.extradata_size;
796         memcpy(&m.u.port_info_set.extradata, port->format.extradata,
797                port->format.extradata_size);
798
799         ret = send_synchronous_mmal_msg(instance, &m,
800                                         sizeof(m.u.port_info_set),
801                                         &rmsg, &rmsg_handle);
802         if (ret)
803                 return ret;
804
805         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
806                 /* got an unexpected message type in reply */
807                 ret = -EINVAL;
808                 goto release_msg;
809         }
810
811         /* return operation status */
812         ret = -rmsg->u.port_info_get_reply.status;
813
814         pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
815                  port->component->handle, port->handle);
816
817 release_msg:
818         vchi_held_msg_release(&rmsg_handle);
819
820         return ret;
821 }
822
823 /* use port info get message to retrieve port information */
824 static int port_info_get(struct vchiq_mmal_instance *instance,
825                          struct vchiq_mmal_port *port)
826 {
827         int ret;
828         struct mmal_msg m;
829         struct mmal_msg *rmsg;
830         struct vchi_held_msg rmsg_handle;
831
832         /* port info time */
833         m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
834         m.u.port_info_get.component_handle = port->component->handle;
835         m.u.port_info_get.port_type = port->type;
836         m.u.port_info_get.index = port->index;
837
838         ret = send_synchronous_mmal_msg(instance, &m,
839                                         sizeof(m.u.port_info_get),
840                                         &rmsg, &rmsg_handle);
841         if (ret)
842                 return ret;
843
844         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
845                 /* got an unexpected message type in reply */
846                 ret = -EINVAL;
847                 goto release_msg;
848         }
849
850         /* return operation status */
851         ret = -rmsg->u.port_info_get_reply.status;
852         if (ret != MMAL_MSG_STATUS_SUCCESS)
853                 goto release_msg;
854
855         if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
856                 port->enabled = 0;
857         else
858                 port->enabled = 1;
859
860         /* copy the values out of the message */
861         port->handle = rmsg->u.port_info_get_reply.port_handle;
862
863         /* port type and index cached to use on port info set because
864          * it does not use a port handle
865          */
866         port->type = rmsg->u.port_info_get_reply.port_type;
867         port->index = rmsg->u.port_info_get_reply.port_index;
868
869         port->minimum_buffer.num =
870             rmsg->u.port_info_get_reply.port.buffer_num_min;
871         port->minimum_buffer.size =
872             rmsg->u.port_info_get_reply.port.buffer_size_min;
873         port->minimum_buffer.alignment =
874             rmsg->u.port_info_get_reply.port.buffer_alignment_min;
875
876         port->recommended_buffer.alignment =
877             rmsg->u.port_info_get_reply.port.buffer_alignment_min;
878         port->recommended_buffer.num =
879             rmsg->u.port_info_get_reply.port.buffer_num_recommended;
880
881         port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
882         port->current_buffer.size =
883             rmsg->u.port_info_get_reply.port.buffer_size;
884
885         /* stream format */
886         port->format.type = rmsg->u.port_info_get_reply.format.type;
887         port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
888         port->format.encoding_variant =
889             rmsg->u.port_info_get_reply.format.encoding_variant;
890         port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
891         port->format.flags = rmsg->u.port_info_get_reply.format.flags;
892
893         /* elementary stream format */
894         memcpy(&port->es,
895                &rmsg->u.port_info_get_reply.es,
896                sizeof(union mmal_es_specific_format));
897         port->format.es = &port->es;
898
899         port->format.extradata_size =
900             rmsg->u.port_info_get_reply.format.extradata_size;
901         memcpy(port->format.extradata,
902                rmsg->u.port_info_get_reply.extradata,
903                port->format.extradata_size);
904
905         pr_debug("received port info\n");
906         dump_port_info(port);
907
908 release_msg:
909
910         pr_debug("%s:result:%d component:0x%x port:%d\n",
911                  __func__, ret, port->component->handle, port->handle);
912
913         vchi_held_msg_release(&rmsg_handle);
914
915         return ret;
916 }
917
918 /* create comonent on vc */
919 static int create_component(struct vchiq_mmal_instance *instance,
920                             struct vchiq_mmal_component *component,
921                             const char *name)
922 {
923         int ret;
924         struct mmal_msg m;
925         struct mmal_msg *rmsg;
926         struct vchi_held_msg rmsg_handle;
927
928         /* build component create message */
929         m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
930         m.u.component_create.client_component = (u32)(unsigned long)component;
931         strncpy(m.u.component_create.name, name,
932                 sizeof(m.u.component_create.name));
933
934         ret = send_synchronous_mmal_msg(instance, &m,
935                                         sizeof(m.u.component_create),
936                                         &rmsg, &rmsg_handle);
937         if (ret)
938                 return ret;
939
940         if (rmsg->h.type != m.h.type) {
941                 /* got an unexpected message type in reply */
942                 ret = -EINVAL;
943                 goto release_msg;
944         }
945
946         ret = -rmsg->u.component_create_reply.status;
947         if (ret != MMAL_MSG_STATUS_SUCCESS)
948                 goto release_msg;
949
950         /* a valid component response received */
951         component->handle = rmsg->u.component_create_reply.component_handle;
952         component->inputs = rmsg->u.component_create_reply.input_num;
953         component->outputs = rmsg->u.component_create_reply.output_num;
954         component->clocks = rmsg->u.component_create_reply.clock_num;
955
956         pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
957                  component->handle,
958                  component->inputs, component->outputs, component->clocks);
959
960 release_msg:
961         vchi_held_msg_release(&rmsg_handle);
962
963         return ret;
964 }
965
966 /* destroys a component on vc */
967 static int destroy_component(struct vchiq_mmal_instance *instance,
968                              struct vchiq_mmal_component *component)
969 {
970         int ret;
971         struct mmal_msg m;
972         struct mmal_msg *rmsg;
973         struct vchi_held_msg rmsg_handle;
974
975         m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
976         m.u.component_destroy.component_handle = component->handle;
977
978         ret = send_synchronous_mmal_msg(instance, &m,
979                                         sizeof(m.u.component_destroy),
980                                         &rmsg, &rmsg_handle);
981         if (ret)
982                 return ret;
983
984         if (rmsg->h.type != m.h.type) {
985                 /* got an unexpected message type in reply */
986                 ret = -EINVAL;
987                 goto release_msg;
988         }
989
990         ret = -rmsg->u.component_destroy_reply.status;
991
992 release_msg:
993
994         vchi_held_msg_release(&rmsg_handle);
995
996         return ret;
997 }
998
999 /* enable a component on vc */
1000 static int enable_component(struct vchiq_mmal_instance *instance,
1001                             struct vchiq_mmal_component *component)
1002 {
1003         int ret;
1004         struct mmal_msg m;
1005         struct mmal_msg *rmsg;
1006         struct vchi_held_msg rmsg_handle;
1007
1008         m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1009         m.u.component_enable.component_handle = component->handle;
1010
1011         ret = send_synchronous_mmal_msg(instance, &m,
1012                                         sizeof(m.u.component_enable),
1013                                         &rmsg, &rmsg_handle);
1014         if (ret)
1015                 return ret;
1016
1017         if (rmsg->h.type != m.h.type) {
1018                 /* got an unexpected message type in reply */
1019                 ret = -EINVAL;
1020                 goto release_msg;
1021         }
1022
1023         ret = -rmsg->u.component_enable_reply.status;
1024
1025 release_msg:
1026         vchi_held_msg_release(&rmsg_handle);
1027
1028         return ret;
1029 }
1030
1031 /* disable a component on vc */
1032 static int disable_component(struct vchiq_mmal_instance *instance,
1033                              struct vchiq_mmal_component *component)
1034 {
1035         int ret;
1036         struct mmal_msg m;
1037         struct mmal_msg *rmsg;
1038         struct vchi_held_msg rmsg_handle;
1039
1040         m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1041         m.u.component_disable.component_handle = component->handle;
1042
1043         ret = send_synchronous_mmal_msg(instance, &m,
1044                                         sizeof(m.u.component_disable),
1045                                         &rmsg, &rmsg_handle);
1046         if (ret)
1047                 return ret;
1048
1049         if (rmsg->h.type != m.h.type) {
1050                 /* got an unexpected message type in reply */
1051                 ret = -EINVAL;
1052                 goto release_msg;
1053         }
1054
1055         ret = -rmsg->u.component_disable_reply.status;
1056
1057 release_msg:
1058
1059         vchi_held_msg_release(&rmsg_handle);
1060
1061         return ret;
1062 }
1063
1064 /* get version of mmal implementation */
1065 static int get_version(struct vchiq_mmal_instance *instance,
1066                        u32 *major_out, u32 *minor_out)
1067 {
1068         int ret;
1069         struct mmal_msg m;
1070         struct mmal_msg *rmsg;
1071         struct vchi_held_msg rmsg_handle;
1072
1073         m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1074
1075         ret = send_synchronous_mmal_msg(instance, &m,
1076                                         sizeof(m.u.version),
1077                                         &rmsg, &rmsg_handle);
1078         if (ret)
1079                 return ret;
1080
1081         if (rmsg->h.type != m.h.type) {
1082                 /* got an unexpected message type in reply */
1083                 ret = -EINVAL;
1084                 goto release_msg;
1085         }
1086
1087         *major_out = rmsg->u.version.major;
1088         *minor_out = rmsg->u.version.minor;
1089
1090 release_msg:
1091         vchi_held_msg_release(&rmsg_handle);
1092
1093         return ret;
1094 }
1095
1096 /* do a port action with a port as a parameter */
1097 static int port_action_port(struct vchiq_mmal_instance *instance,
1098                             struct vchiq_mmal_port *port,
1099                             enum mmal_msg_port_action_type action_type)
1100 {
1101         int ret;
1102         struct mmal_msg m;
1103         struct mmal_msg *rmsg;
1104         struct vchi_held_msg rmsg_handle;
1105
1106         m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1107         m.u.port_action_port.component_handle = port->component->handle;
1108         m.u.port_action_port.port_handle = port->handle;
1109         m.u.port_action_port.action = action_type;
1110
1111         port_to_mmal_msg(port, &m.u.port_action_port.port);
1112
1113         ret = send_synchronous_mmal_msg(instance, &m,
1114                                         sizeof(m.u.port_action_port),
1115                                         &rmsg, &rmsg_handle);
1116         if (ret)
1117                 return ret;
1118
1119         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1120                 /* got an unexpected message type in reply */
1121                 ret = -EINVAL;
1122                 goto release_msg;
1123         }
1124
1125         ret = -rmsg->u.port_action_reply.status;
1126
1127         pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1128                  __func__,
1129                  ret, port->component->handle, port->handle,
1130                  port_action_type_names[action_type], action_type);
1131
1132 release_msg:
1133         vchi_held_msg_release(&rmsg_handle);
1134
1135         return ret;
1136 }
1137
1138 /* do a port action with handles as parameters */
1139 static int port_action_handle(struct vchiq_mmal_instance *instance,
1140                               struct vchiq_mmal_port *port,
1141                               enum mmal_msg_port_action_type action_type,
1142                               u32 connect_component_handle,
1143                               u32 connect_port_handle)
1144 {
1145         int ret;
1146         struct mmal_msg m;
1147         struct mmal_msg *rmsg;
1148         struct vchi_held_msg rmsg_handle;
1149
1150         m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1151
1152         m.u.port_action_handle.component_handle = port->component->handle;
1153         m.u.port_action_handle.port_handle = port->handle;
1154         m.u.port_action_handle.action = action_type;
1155
1156         m.u.port_action_handle.connect_component_handle =
1157             connect_component_handle;
1158         m.u.port_action_handle.connect_port_handle = connect_port_handle;
1159
1160         ret = send_synchronous_mmal_msg(instance, &m,
1161                                         sizeof(m.u.port_action_handle),
1162                                         &rmsg, &rmsg_handle);
1163         if (ret)
1164                 return ret;
1165
1166         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1167                 /* got an unexpected message type in reply */
1168                 ret = -EINVAL;
1169                 goto release_msg;
1170         }
1171
1172         ret = -rmsg->u.port_action_reply.status;
1173
1174         pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1175                  __func__,
1176                  ret, port->component->handle, port->handle,
1177                  port_action_type_names[action_type],
1178                  action_type, connect_component_handle, connect_port_handle);
1179
1180 release_msg:
1181         vchi_held_msg_release(&rmsg_handle);
1182
1183         return ret;
1184 }
1185
1186 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1187                               struct vchiq_mmal_port *port,
1188                               u32 parameter_id, void *value, u32 value_size)
1189 {
1190         int ret;
1191         struct mmal_msg m;
1192         struct mmal_msg *rmsg;
1193         struct vchi_held_msg rmsg_handle;
1194
1195         m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1196
1197         m.u.port_parameter_set.component_handle = port->component->handle;
1198         m.u.port_parameter_set.port_handle = port->handle;
1199         m.u.port_parameter_set.id = parameter_id;
1200         m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1201         memcpy(&m.u.port_parameter_set.value, value, value_size);
1202
1203         ret = send_synchronous_mmal_msg(instance, &m,
1204                                         (4 * sizeof(u32)) + value_size,
1205                                         &rmsg, &rmsg_handle);
1206         if (ret)
1207                 return ret;
1208
1209         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1210                 /* got an unexpected message type in reply */
1211                 ret = -EINVAL;
1212                 goto release_msg;
1213         }
1214
1215         ret = -rmsg->u.port_parameter_set_reply.status;
1216
1217         pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1218                  __func__,
1219                  ret, port->component->handle, port->handle, parameter_id);
1220
1221 release_msg:
1222         vchi_held_msg_release(&rmsg_handle);
1223
1224         return ret;
1225 }
1226
1227 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1228                               struct vchiq_mmal_port *port,
1229                               u32 parameter_id, void *value, u32 *value_size)
1230 {
1231         int ret;
1232         struct mmal_msg m;
1233         struct mmal_msg *rmsg;
1234         struct vchi_held_msg rmsg_handle;
1235
1236         m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1237
1238         m.u.port_parameter_get.component_handle = port->component->handle;
1239         m.u.port_parameter_get.port_handle = port->handle;
1240         m.u.port_parameter_get.id = parameter_id;
1241         m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1242
1243         ret = send_synchronous_mmal_msg(instance, &m,
1244                                         sizeof(struct
1245                                                mmal_msg_port_parameter_get),
1246                                         &rmsg, &rmsg_handle);
1247         if (ret)
1248                 return ret;
1249
1250         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1251                 /* got an unexpected message type in reply */
1252                 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1253                 ret = -EINVAL;
1254                 goto release_msg;
1255         }
1256
1257         ret = -rmsg->u.port_parameter_get_reply.status;
1258         /* port_parameter_get_reply.size includes the header,
1259          * whilst *value_size doesn't.
1260          */
1261         rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1262
1263         if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1264                 /* Copy only as much as we have space for
1265                  * but report true size of parameter
1266                  */
1267                 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1268                        *value_size);
1269                 *value_size = rmsg->u.port_parameter_get_reply.size;
1270         } else {
1271                 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1272                        rmsg->u.port_parameter_get_reply.size);
1273         }
1274
1275         pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1276                  ret, port->component->handle, port->handle, parameter_id);
1277
1278 release_msg:
1279         vchi_held_msg_release(&rmsg_handle);
1280
1281         return ret;
1282 }
1283
1284 /* disables a port and drains buffers from it */
1285 static int port_disable(struct vchiq_mmal_instance *instance,
1286                         struct vchiq_mmal_port *port)
1287 {
1288         int ret;
1289         struct list_head *q, *buf_head;
1290         unsigned long flags = 0;
1291
1292         if (!port->enabled)
1293                 return 0;
1294
1295         port->enabled = 0;
1296
1297         ret = port_action_port(instance, port,
1298                                MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1299         if (ret == 0) {
1300                 /*
1301                  * Drain all queued buffers on port. This should only
1302                  * apply to buffers that have been queued before the port
1303                  * has been enabled. If the port has been enabled and buffers
1304                  * passed, then the buffers should have been removed from this
1305                  * list, and we should get the relevant callbacks via VCHIQ
1306                  * to release the buffers.
1307                  */
1308                 spin_lock_irqsave(&port->slock, flags);
1309
1310                 list_for_each_safe(buf_head, q, &port->buffers) {
1311                         struct mmal_buffer *mmalbuf;
1312
1313                         mmalbuf = list_entry(buf_head, struct mmal_buffer,
1314                                              list);
1315                         list_del(buf_head);
1316                         if (port->buffer_cb)
1317                                 port->buffer_cb(instance,
1318                                                 port, 0, mmalbuf, 0, 0,
1319                                                 MMAL_TIME_UNKNOWN,
1320                                                 MMAL_TIME_UNKNOWN);
1321                 }
1322
1323                 spin_unlock_irqrestore(&port->slock, flags);
1324
1325                 ret = port_info_get(instance, port);
1326         }
1327
1328         return ret;
1329 }
1330
1331 /* enable a port */
1332 static int port_enable(struct vchiq_mmal_instance *instance,
1333                        struct vchiq_mmal_port *port)
1334 {
1335         unsigned int hdr_count;
1336         struct list_head *q, *buf_head;
1337         int ret;
1338
1339         if (port->enabled)
1340                 return 0;
1341
1342         ret = port_action_port(instance, port,
1343                                MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1344         if (ret)
1345                 goto done;
1346
1347         port->enabled = 1;
1348
1349         if (port->buffer_cb) {
1350                 /* send buffer headers to videocore */
1351                 hdr_count = 1;
1352                 list_for_each_safe(buf_head, q, &port->buffers) {
1353                         struct mmal_buffer *mmalbuf;
1354
1355                         mmalbuf = list_entry(buf_head, struct mmal_buffer,
1356                                              list);
1357                         ret = buffer_from_host(instance, port, mmalbuf);
1358                         if (ret)
1359                                 goto done;
1360
1361                         list_del(buf_head);
1362                         hdr_count++;
1363                         if (hdr_count > port->current_buffer.num)
1364                                 break;
1365                 }
1366         }
1367
1368         ret = port_info_get(instance, port);
1369
1370 done:
1371         return ret;
1372 }
1373
1374 /* ------------------------------------------------------------------
1375  * Exported API
1376  *------------------------------------------------------------------
1377  */
1378
1379 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1380                                struct vchiq_mmal_port *port)
1381 {
1382         int ret;
1383
1384         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1385                 return -EINTR;
1386
1387         ret = port_info_set(instance, port);
1388         if (ret)
1389                 goto release_unlock;
1390
1391         /* read what has actually been set */
1392         ret = port_info_get(instance, port);
1393
1394 release_unlock:
1395         mutex_unlock(&instance->vchiq_mutex);
1396
1397         return ret;
1398 }
1399
1400 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1401                                   struct vchiq_mmal_port *port,
1402                                   u32 parameter, void *value, u32 value_size)
1403 {
1404         int ret;
1405
1406         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1407                 return -EINTR;
1408
1409         ret = port_parameter_set(instance, port, parameter, value, value_size);
1410
1411         mutex_unlock(&instance->vchiq_mutex);
1412
1413         return ret;
1414 }
1415
1416 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1417                                   struct vchiq_mmal_port *port,
1418                                   u32 parameter, void *value, u32 *value_size)
1419 {
1420         int ret;
1421
1422         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1423                 return -EINTR;
1424
1425         ret = port_parameter_get(instance, port, parameter, value, value_size);
1426
1427         mutex_unlock(&instance->vchiq_mutex);
1428
1429         return ret;
1430 }
1431
1432 /* enable a port
1433  *
1434  * enables a port and queues buffers for satisfying callbacks if we
1435  * provide a callback handler
1436  */
1437 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1438                            struct vchiq_mmal_port *port,
1439                            vchiq_mmal_buffer_cb buffer_cb)
1440 {
1441         int ret;
1442
1443         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1444                 return -EINTR;
1445
1446         /* already enabled - noop */
1447         if (port->enabled) {
1448                 ret = 0;
1449                 goto unlock;
1450         }
1451
1452         port->buffer_cb = buffer_cb;
1453
1454         ret = port_enable(instance, port);
1455
1456 unlock:
1457         mutex_unlock(&instance->vchiq_mutex);
1458
1459         return ret;
1460 }
1461
1462 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1463                             struct vchiq_mmal_port *port)
1464 {
1465         int ret;
1466
1467         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1468                 return -EINTR;
1469
1470         if (!port->enabled) {
1471                 mutex_unlock(&instance->vchiq_mutex);
1472                 return 0;
1473         }
1474
1475         ret = port_disable(instance, port);
1476
1477         mutex_unlock(&instance->vchiq_mutex);
1478
1479         return ret;
1480 }
1481
1482 /* ports will be connected in a tunneled manner so data buffers
1483  * are not handled by client.
1484  */
1485 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1486                                    struct vchiq_mmal_port *src,
1487                                    struct vchiq_mmal_port *dst)
1488 {
1489         int ret;
1490
1491         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1492                 return -EINTR;
1493
1494         /* disconnect ports if connected */
1495         if (src->connected) {
1496                 ret = port_disable(instance, src);
1497                 if (ret) {
1498                         pr_err("failed disabling src port(%d)\n", ret);
1499                         goto release_unlock;
1500                 }
1501
1502                 /* do not need to disable the destination port as they
1503                  * are connected and it is done automatically
1504                  */
1505
1506                 ret = port_action_handle(instance, src,
1507                                          MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1508                                          src->connected->component->handle,
1509                                          src->connected->handle);
1510                 if (ret < 0) {
1511                         pr_err("failed disconnecting src port\n");
1512                         goto release_unlock;
1513                 }
1514                 src->connected->enabled = 0;
1515                 src->connected = NULL;
1516         }
1517
1518         if (!dst) {
1519                 /* do not make new connection */
1520                 ret = 0;
1521                 pr_debug("not making new connection\n");
1522                 goto release_unlock;
1523         }
1524
1525         /* copy src port format to dst */
1526         dst->format.encoding = src->format.encoding;
1527         dst->es.video.width = src->es.video.width;
1528         dst->es.video.height = src->es.video.height;
1529         dst->es.video.crop.x = src->es.video.crop.x;
1530         dst->es.video.crop.y = src->es.video.crop.y;
1531         dst->es.video.crop.width = src->es.video.crop.width;
1532         dst->es.video.crop.height = src->es.video.crop.height;
1533         dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1534         dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1535
1536         /* set new format */
1537         ret = port_info_set(instance, dst);
1538         if (ret) {
1539                 pr_debug("setting port info failed\n");
1540                 goto release_unlock;
1541         }
1542
1543         /* read what has actually been set */
1544         ret = port_info_get(instance, dst);
1545         if (ret) {
1546                 pr_debug("read back port info failed\n");
1547                 goto release_unlock;
1548         }
1549
1550         /* connect two ports together */
1551         ret = port_action_handle(instance, src,
1552                                  MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1553                                  dst->component->handle, dst->handle);
1554         if (ret < 0) {
1555                 pr_debug("connecting port %d:%d to %d:%d failed\n",
1556                          src->component->handle, src->handle,
1557                          dst->component->handle, dst->handle);
1558                 goto release_unlock;
1559         }
1560         src->connected = dst;
1561
1562 release_unlock:
1563
1564         mutex_unlock(&instance->vchiq_mutex);
1565
1566         return ret;
1567 }
1568
1569 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1570                              struct vchiq_mmal_port *port,
1571                              struct mmal_buffer *buffer)
1572 {
1573         unsigned long flags = 0;
1574         int ret;
1575
1576         ret = buffer_from_host(instance, port, buffer);
1577         if (ret == -EINVAL) {
1578                 /* Port is disabled. Queue for when it is enabled. */
1579                 spin_lock_irqsave(&port->slock, flags);
1580                 list_add_tail(&buffer->list, &port->buffers);
1581                 spin_unlock_irqrestore(&port->slock, flags);
1582         }
1583
1584         return 0;
1585 }
1586
1587 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1588                           struct mmal_buffer *buf)
1589 {
1590         struct mmal_msg_context *msg_context = get_msg_context(instance);
1591
1592         if (IS_ERR(msg_context))
1593                 return (PTR_ERR(msg_context));
1594
1595         buf->msg_context = msg_context;
1596         return 0;
1597 }
1598
1599 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1600 {
1601         struct mmal_msg_context *msg_context = buf->msg_context;
1602
1603         if (msg_context)
1604                 release_msg_context(msg_context);
1605         buf->msg_context = NULL;
1606
1607         return 0;
1608 }
1609
1610 /* Initialise a mmal component and its ports
1611  *
1612  */
1613 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1614                               const char *name,
1615                               struct vchiq_mmal_component **component_out)
1616 {
1617         int ret;
1618         int idx;                /* port index */
1619         struct vchiq_mmal_component *component;
1620
1621         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1622                 return -EINTR;
1623
1624         if (instance->component_idx == VCHIQ_MMAL_MAX_COMPONENTS) {
1625                 ret = -EINVAL;  /* todo is this correct error? */
1626                 goto unlock;
1627         }
1628
1629         component = &instance->component[instance->component_idx];
1630
1631         ret = create_component(instance, component, name);
1632         if (ret < 0) {
1633                 pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1634                        __func__, ret);
1635                 goto unlock;
1636         }
1637
1638         /* ports info needs gathering */
1639         component->control.type = MMAL_PORT_TYPE_CONTROL;
1640         component->control.index = 0;
1641         component->control.component = component;
1642         spin_lock_init(&component->control.slock);
1643         INIT_LIST_HEAD(&component->control.buffers);
1644         ret = port_info_get(instance, &component->control);
1645         if (ret < 0)
1646                 goto release_component;
1647
1648         for (idx = 0; idx < component->inputs; idx++) {
1649                 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1650                 component->input[idx].index = idx;
1651                 component->input[idx].component = component;
1652                 spin_lock_init(&component->input[idx].slock);
1653                 INIT_LIST_HEAD(&component->input[idx].buffers);
1654                 ret = port_info_get(instance, &component->input[idx]);
1655                 if (ret < 0)
1656                         goto release_component;
1657         }
1658
1659         for (idx = 0; idx < component->outputs; idx++) {
1660                 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1661                 component->output[idx].index = idx;
1662                 component->output[idx].component = component;
1663                 spin_lock_init(&component->output[idx].slock);
1664                 INIT_LIST_HEAD(&component->output[idx].buffers);
1665                 ret = port_info_get(instance, &component->output[idx]);
1666                 if (ret < 0)
1667                         goto release_component;
1668         }
1669
1670         for (idx = 0; idx < component->clocks; idx++) {
1671                 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1672                 component->clock[idx].index = idx;
1673                 component->clock[idx].component = component;
1674                 spin_lock_init(&component->clock[idx].slock);
1675                 INIT_LIST_HEAD(&component->clock[idx].buffers);
1676                 ret = port_info_get(instance, &component->clock[idx]);
1677                 if (ret < 0)
1678                         goto release_component;
1679         }
1680
1681         instance->component_idx++;
1682
1683         *component_out = component;
1684
1685         mutex_unlock(&instance->vchiq_mutex);
1686
1687         return 0;
1688
1689 release_component:
1690         destroy_component(instance, component);
1691 unlock:
1692         mutex_unlock(&instance->vchiq_mutex);
1693
1694         return ret;
1695 }
1696
1697 /*
1698  * cause a mmal component to be destroyed
1699  */
1700 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1701                                   struct vchiq_mmal_component *component)
1702 {
1703         int ret;
1704
1705         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1706                 return -EINTR;
1707
1708         if (component->enabled)
1709                 ret = disable_component(instance, component);
1710
1711         ret = destroy_component(instance, component);
1712
1713         mutex_unlock(&instance->vchiq_mutex);
1714
1715         return ret;
1716 }
1717
1718 /*
1719  * cause a mmal component to be enabled
1720  */
1721 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1722                                 struct vchiq_mmal_component *component)
1723 {
1724         int ret;
1725
1726         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1727                 return -EINTR;
1728
1729         if (component->enabled) {
1730                 mutex_unlock(&instance->vchiq_mutex);
1731                 return 0;
1732         }
1733
1734         ret = enable_component(instance, component);
1735         if (ret == 0)
1736                 component->enabled = true;
1737
1738         mutex_unlock(&instance->vchiq_mutex);
1739
1740         return ret;
1741 }
1742
1743 /*
1744  * cause a mmal component to be enabled
1745  */
1746 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1747                                  struct vchiq_mmal_component *component)
1748 {
1749         int ret;
1750
1751         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1752                 return -EINTR;
1753
1754         if (!component->enabled) {
1755                 mutex_unlock(&instance->vchiq_mutex);
1756                 return 0;
1757         }
1758
1759         ret = disable_component(instance, component);
1760         if (ret == 0)
1761                 component->enabled = 0;
1762
1763         mutex_unlock(&instance->vchiq_mutex);
1764
1765         return ret;
1766 }
1767
1768 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1769                        u32 *major_out, u32 *minor_out)
1770 {
1771         int ret;
1772
1773         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1774                 return -EINTR;
1775
1776         ret = get_version(instance, major_out, minor_out);
1777
1778         mutex_unlock(&instance->vchiq_mutex);
1779
1780         return ret;
1781 }
1782
1783 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1784 {
1785         int status = 0;
1786
1787         if (!instance)
1788                 return -EINVAL;
1789
1790         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1791                 return -EINTR;
1792
1793         vchi_service_use(instance->handle);
1794
1795         status = vchi_service_close(instance->handle);
1796         if (status != 0)
1797                 pr_err("mmal-vchiq: VCHIQ close failed\n");
1798
1799         mutex_unlock(&instance->vchiq_mutex);
1800
1801         flush_workqueue(instance->bulk_wq);
1802         destroy_workqueue(instance->bulk_wq);
1803
1804         vfree(instance->bulk_scratch);
1805
1806         idr_destroy(&instance->context_map);
1807
1808         kfree(instance);
1809
1810         return status;
1811 }
1812
1813 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1814 {
1815         int status;
1816         struct vchiq_mmal_instance *instance;
1817         static VCHI_INSTANCE_T vchi_instance;
1818         struct service_creation params = {
1819                 .version                = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
1820                 .service_id             = VC_MMAL_SERVER_NAME,
1821                 .callback               = service_callback,
1822                 .callback_param         = NULL,
1823         };
1824
1825         /* compile time checks to ensure structure size as they are
1826          * directly (de)serialised from memory.
1827          */
1828
1829         /* ensure the header structure has packed to the correct size */
1830         BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1831
1832         /* ensure message structure does not exceed maximum length */
1833         BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1834
1835         /* mmal port struct is correct size */
1836         BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1837
1838         /* create a vchi instance */
1839         status = vchi_initialise(&vchi_instance);
1840         if (status) {
1841                 pr_err("Failed to initialise VCHI instance (status=%d)\n",
1842                        status);
1843                 return -EIO;
1844         }
1845
1846         status = vchi_connect(vchi_instance);
1847         if (status) {
1848                 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1849                 return -EIO;
1850         }
1851
1852         instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1853
1854         if (!instance)
1855                 return -ENOMEM;
1856
1857         mutex_init(&instance->vchiq_mutex);
1858
1859         instance->bulk_scratch = vmalloc(PAGE_SIZE);
1860
1861         mutex_init(&instance->context_map_lock);
1862         idr_init_base(&instance->context_map, 1);
1863
1864         params.callback_param = instance;
1865
1866         instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
1867                                                     WQ_MEM_RECLAIM);
1868         if (!instance->bulk_wq)
1869                 goto err_free;
1870
1871         status = vchi_service_open(vchi_instance, &params, &instance->handle);
1872         if (status) {
1873                 pr_err("Failed to open VCHI service connection (status=%d)\n",
1874                        status);
1875                 goto err_close_services;
1876         }
1877
1878         vchi_service_release(instance->handle);
1879
1880         *out_instance = instance;
1881
1882         return 0;
1883
1884 err_close_services:
1885         vchi_service_close(instance->handle);
1886         destroy_workqueue(instance->bulk_wq);
1887 err_free:
1888         vfree(instance->bulk_scratch);
1889         kfree(instance);
1890         return -ENODEV;
1891 }