1 // SPDX-License-Identifier: GPL-2.0
3 * Broadcom BM2835 V4L2 driver
5 * Copyright © 2013 Raspberry Pi (Trading) Ltd.
7 * Authors: Vincent Sanders @ Collabora
8 * Dave Stevenson @ Broadcom
9 * (now dave.stevenson@raspberrypi.org)
10 * Simon Mellor @ Broadcom
11 * Luke Diamand @ Broadcom
13 * V4L2 driver MMAL vchiq interface code
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/mutex.h>
22 #include <linux/slab.h>
23 #include <linux/completion.h>
24 #include <linux/vmalloc.h>
25 #include <media/videobuf2-vmalloc.h>
27 #include "mmal-common.h"
28 #include "mmal-vchiq.h"
32 #include "interface/vchi/vchi.h"
34 /* maximum number of components supported */
35 #define VCHIQ_MMAL_MAX_COMPONENTS 4
37 /*#define FULL_MSG_DUMP 1*/
40 static const char *const msg_type_names[] = {
58 "GET_CORE_STATS_FOR_PORT",
62 "OPAQUE_ALLOCATOR_DESC",
65 "BUFFER_FROM_HOST_ZEROLEN",
71 static const char *const port_action_type_names[] = {
82 #if defined(FULL_MSG_DUMP)
83 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
85 pr_debug(TITLE" type:%s(%d) length:%d\n", \
86 msg_type_names[(MSG)->h.type], \
87 (MSG)->h.type, (MSG_LEN)); \
88 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
90 sizeof(struct mmal_msg_header), 1); \
91 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
93 ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
94 (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
97 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
99 pr_debug(TITLE" type:%s(%d) length:%d\n", \
100 msg_type_names[(MSG)->h.type], \
101 (MSG)->h.type, (MSG_LEN)); \
105 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
108 struct vchiq_mmal_instance;
110 /* normal message context */
111 struct mmal_msg_context {
112 struct vchiq_mmal_instance *instance;
114 /* Index in the context_map idr so that we can find the
115 * mmal_msg_context again when servicing the VCHI reply.
121 /* work struct for buffer_cb callback */
122 struct work_struct work;
123 /* work struct for deferred callback */
124 struct work_struct buffer_to_host_work;
126 struct vchiq_mmal_instance *instance;
128 struct vchiq_mmal_port *port;
129 /* actual buffer used to store bulk reply */
130 struct mmal_buffer *buffer;
131 /* amount of buffer used */
132 unsigned long buffer_used;
133 /* MMAL buffer flags */
135 /* Presentation and Decode timestamps */
139 int status; /* context status */
141 } bulk; /* bulk data */
144 /* message handle to release */
145 struct vchi_held_msg msg_handle;
146 /* pointer to received message */
147 struct mmal_msg *msg;
148 /* received message length */
150 /* completion upon reply */
151 struct completion cmplt;
152 } sync; /* synchronous response */
157 struct vchiq_mmal_instance {
158 VCHI_SERVICE_HANDLE_T handle;
160 /* ensure serialised access to service */
161 struct mutex vchiq_mutex;
163 /* vmalloc page to receive scratch bulk xfers into */
166 struct idr context_map;
167 /* protect accesses to context_map */
168 struct mutex context_map_lock;
170 /* component to use next */
172 struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
174 /* ordered workqueue to process all bulk operations */
175 struct workqueue_struct *bulk_wq;
178 static struct mmal_msg_context *
179 get_msg_context(struct vchiq_mmal_instance *instance)
181 struct mmal_msg_context *msg_context;
184 /* todo: should this be allocated from a pool to avoid kzalloc */
185 msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
188 return ERR_PTR(-ENOMEM);
190 /* Create an ID that will be passed along with our message so
191 * that when we service the VCHI reply, we can look up what
192 * message is being replied to.
194 mutex_lock(&instance->context_map_lock);
195 handle = idr_alloc(&instance->context_map, msg_context,
197 mutex_unlock(&instance->context_map_lock);
201 return ERR_PTR(handle);
204 msg_context->instance = instance;
205 msg_context->handle = handle;
210 static struct mmal_msg_context *
211 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
213 return idr_find(&instance->context_map, handle);
217 release_msg_context(struct mmal_msg_context *msg_context)
219 struct vchiq_mmal_instance *instance = msg_context->instance;
221 mutex_lock(&instance->context_map_lock);
222 idr_remove(&instance->context_map, msg_context->handle);
223 mutex_unlock(&instance->context_map_lock);
227 /* deals with receipt of event to host message */
228 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
229 struct mmal_msg *msg, u32 msg_len)
231 pr_debug("unhandled event\n");
232 pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
233 msg->u.event_to_host.client_component,
234 msg->u.event_to_host.port_type,
235 msg->u.event_to_host.port_num,
236 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
239 /* workqueue scheduled callback
241 * we do this because it is important we do not call any other vchiq
242 * sync calls from witin the message delivery thread
244 static void buffer_work_cb(struct work_struct *work)
246 struct mmal_msg_context *msg_context =
247 container_of(work, struct mmal_msg_context, u.bulk.work);
249 atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
251 msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
252 msg_context->u.bulk.port,
253 msg_context->u.bulk.status,
254 msg_context->u.bulk.buffer,
255 msg_context->u.bulk.buffer_used,
256 msg_context->u.bulk.mmal_flags,
257 msg_context->u.bulk.dts,
258 msg_context->u.bulk.pts);
261 /* workqueue scheduled callback to handle receiving buffers
263 * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
264 * If we block in the service_callback context then we can't process the
265 * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
266 * vchi_bulk_queue_receive() call to complete.
268 static void buffer_to_host_work_cb(struct work_struct *work)
270 struct mmal_msg_context *msg_context =
271 container_of(work, struct mmal_msg_context,
272 u.bulk.buffer_to_host_work);
273 struct vchiq_mmal_instance *instance = msg_context->instance;
274 unsigned long len = msg_context->u.bulk.buffer_used;
278 /* Dummy receive to ensure the buffers remain in order */
280 /* queue the bulk submission */
281 vchi_service_use(instance->handle);
282 ret = vchi_bulk_queue_receive(instance->handle,
283 msg_context->u.bulk.buffer->buffer,
284 /* Actual receive needs to be a multiple
288 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
289 VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
292 vchi_service_release(instance->handle);
295 pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
296 __func__, msg_context, ret);
299 /* enqueue a bulk receive for a given message context */
300 static int bulk_receive(struct vchiq_mmal_instance *instance,
301 struct mmal_msg *msg,
302 struct mmal_msg_context *msg_context)
304 unsigned long rd_len;
306 rd_len = msg->u.buffer_from_host.buffer_header.length;
308 if (!msg_context->u.bulk.buffer) {
309 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
311 /* todo: this is a serious error, we should never have
312 * committed a buffer_to_host operation to the mmal
313 * port without the buffer to back it up (underflow
314 * handling) and there is no obvious way to deal with
315 * this - how is the mmal servie going to react when
316 * we fail to do the xfer and reschedule a buffer when
317 * it arrives? perhaps a starved flag to indicate a
318 * waiting bulk receive?
324 /* ensure we do not overrun the available buffer */
325 if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
326 rd_len = msg_context->u.bulk.buffer->buffer_size;
327 pr_warn("short read as not enough receive buffer space\n");
328 /* todo: is this the correct response, what happens to
329 * the rest of the message data?
334 msg_context->u.bulk.buffer_used = rd_len;
335 msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
336 msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
338 queue_work(msg_context->instance->bulk_wq,
339 &msg_context->u.bulk.buffer_to_host_work);
344 /* data in message, memcpy from packet into output buffer */
345 static int inline_receive(struct vchiq_mmal_instance *instance,
346 struct mmal_msg *msg,
347 struct mmal_msg_context *msg_context)
349 memcpy(msg_context->u.bulk.buffer->buffer,
350 msg->u.buffer_from_host.short_data,
351 msg->u.buffer_from_host.payload_in_message);
353 msg_context->u.bulk.buffer_used =
354 msg->u.buffer_from_host.payload_in_message;
359 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
361 buffer_from_host(struct vchiq_mmal_instance *instance,
362 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
364 struct mmal_msg_context *msg_context;
371 pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
374 if (!buf->msg_context) {
375 pr_err("%s: msg_context not allocated, buf %p\n", __func__,
379 msg_context = buf->msg_context;
381 /* store bulk message context for when data arrives */
382 msg_context->u.bulk.instance = instance;
383 msg_context->u.bulk.port = port;
384 msg_context->u.bulk.buffer = buf;
385 msg_context->u.bulk.buffer_used = 0;
387 /* initialise work structure ready to schedule callback */
388 INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
389 INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
390 buffer_to_host_work_cb);
392 atomic_inc(&port->buffers_with_vpu);
394 /* prep the buffer from host message */
395 memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
397 m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
398 m.h.magic = MMAL_MAGIC;
399 m.h.context = msg_context->handle;
402 /* drvbuf is our private data passed back */
403 m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
404 m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
405 m.u.buffer_from_host.drvbuf.port_handle = port->handle;
406 m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
409 m.u.buffer_from_host.buffer_header.cmd = 0;
410 m.u.buffer_from_host.buffer_header.data =
411 (u32)(unsigned long)buf->buffer;
412 m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
413 m.u.buffer_from_host.buffer_header.length = 0; /* nothing used yet */
414 m.u.buffer_from_host.buffer_header.offset = 0; /* no offset */
415 m.u.buffer_from_host.buffer_header.flags = 0; /* no flags */
416 m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
417 m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
419 /* clear buffer type sepecific data */
420 memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
421 sizeof(m.u.buffer_from_host.buffer_header_type_specific));
423 /* no payload in message */
424 m.u.buffer_from_host.payload_in_message = 0;
426 vchi_service_use(instance->handle);
428 ret = vchi_queue_kernel_message(instance->handle,
430 sizeof(struct mmal_msg_header) +
431 sizeof(m.u.buffer_from_host));
433 vchi_service_release(instance->handle);
438 /* deals with receipt of buffer to host message */
439 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
440 struct mmal_msg *msg, u32 msg_len)
442 struct mmal_msg_context *msg_context;
445 pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
446 __func__, instance, msg, msg_len);
448 if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
449 handle = msg->u.buffer_from_host.drvbuf.client_context;
450 msg_context = lookup_msg_context(instance, handle);
453 pr_err("drvbuf.client_context(%u) is invalid\n",
458 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
462 msg_context->u.bulk.mmal_flags =
463 msg->u.buffer_from_host.buffer_header.flags;
465 if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
466 /* message reception had an error */
467 pr_warn("error %d in reply\n", msg->h.status);
469 msg_context->u.bulk.status = msg->h.status;
471 } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
473 if (msg->u.buffer_from_host.buffer_header.flags &
474 MMAL_BUFFER_HEADER_FLAG_EOS) {
475 msg_context->u.bulk.status =
476 bulk_receive(instance, msg, msg_context);
477 if (msg_context->u.bulk.status == 0)
478 return; /* successful bulk submission, bulk
479 * completion will trigger callback
482 /* do callback with empty buffer - not EOS though */
483 msg_context->u.bulk.status = 0;
484 msg_context->u.bulk.buffer_used = 0;
486 } else if (msg->u.buffer_from_host.payload_in_message == 0) {
487 /* data is not in message, queue a bulk receive */
488 msg_context->u.bulk.status =
489 bulk_receive(instance, msg, msg_context);
490 if (msg_context->u.bulk.status == 0)
491 return; /* successful bulk submission, bulk
492 * completion will trigger callback
495 /* failed to submit buffer, this will end badly */
496 pr_err("error %d on bulk submission\n",
497 msg_context->u.bulk.status);
499 } else if (msg->u.buffer_from_host.payload_in_message <=
500 MMAL_VC_SHORT_DATA) {
501 /* data payload within message */
502 msg_context->u.bulk.status = inline_receive(instance, msg,
505 pr_err("message with invalid short payload\n");
508 msg_context->u.bulk.status = -EINVAL;
509 msg_context->u.bulk.buffer_used =
510 msg->u.buffer_from_host.payload_in_message;
513 /* schedule the port callback */
514 schedule_work(&msg_context->u.bulk.work);
517 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
518 struct mmal_msg_context *msg_context)
520 msg_context->u.bulk.status = 0;
522 /* schedule the port callback */
523 schedule_work(&msg_context->u.bulk.work);
526 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
527 struct mmal_msg_context *msg_context)
529 pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
531 msg_context->u.bulk.status = -EINTR;
533 schedule_work(&msg_context->u.bulk.work);
536 /* incoming event service callback */
537 static void service_callback(void *param,
538 const VCHI_CALLBACK_REASON_T reason,
541 struct vchiq_mmal_instance *instance = param;
544 struct mmal_msg *msg;
545 struct vchi_held_msg msg_handle;
546 struct mmal_msg_context *msg_context;
549 pr_err("Message callback passed NULL instance\n");
554 case VCHI_CALLBACK_MSG_AVAILABLE:
555 status = vchi_msg_hold(instance->handle, (void **)&msg,
556 &msg_len, VCHI_FLAGS_NONE, &msg_handle);
558 pr_err("Unable to dequeue a message (%d)\n", status);
562 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
564 /* handling is different for buffer messages */
565 switch (msg->h.type) {
566 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
567 vchi_held_msg_release(&msg_handle);
570 case MMAL_MSG_TYPE_EVENT_TO_HOST:
571 event_to_host_cb(instance, msg, msg_len);
572 vchi_held_msg_release(&msg_handle);
576 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
577 buffer_to_host_cb(instance, msg, msg_len);
578 vchi_held_msg_release(&msg_handle);
582 /* messages dependent on header context to complete */
583 if (!msg->h.context) {
584 pr_err("received message context was null!\n");
585 vchi_held_msg_release(&msg_handle);
589 msg_context = lookup_msg_context(instance,
592 pr_err("received invalid message context %u!\n",
594 vchi_held_msg_release(&msg_handle);
598 /* fill in context values */
599 msg_context->u.sync.msg_handle = msg_handle;
600 msg_context->u.sync.msg = msg;
601 msg_context->u.sync.msg_len = msg_len;
603 /* todo: should this check (completion_done()
604 * == 1) for no one waiting? or do we need a
605 * flag to tell us the completion has been
606 * interrupted so we can free the message and
607 * its context. This probably also solves the
608 * message arriving after interruption todo
612 /* complete message so caller knows it happened */
613 complete(&msg_context->u.sync.cmplt);
619 case VCHI_CALLBACK_BULK_RECEIVED:
620 bulk_receive_cb(instance, bulk_ctx);
623 case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
624 bulk_abort_cb(instance, bulk_ctx);
627 case VCHI_CALLBACK_SERVICE_CLOSED:
628 /* TODO: consider if this requires action if received when
629 * driver is not explicitly closing the service
634 pr_err("Received unhandled message reason %d\n", reason);
639 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
640 struct mmal_msg *msg,
641 unsigned int payload_len,
642 struct mmal_msg **msg_out,
643 struct vchi_held_msg *msg_handle_out)
645 struct mmal_msg_context *msg_context;
647 unsigned long timeout;
649 /* payload size must not cause message to exceed max size */
651 (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
652 pr_err("payload length %d exceeds max:%d\n", payload_len,
653 (int)(MMAL_MSG_MAX_SIZE -
654 sizeof(struct mmal_msg_header)));
658 msg_context = get_msg_context(instance);
659 if (IS_ERR(msg_context))
660 return PTR_ERR(msg_context);
662 init_completion(&msg_context->u.sync.cmplt);
664 msg->h.magic = MMAL_MAGIC;
665 msg->h.context = msg_context->handle;
668 DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
671 vchi_service_use(instance->handle);
673 ret = vchi_queue_kernel_message(instance->handle,
675 sizeof(struct mmal_msg_header) +
678 vchi_service_release(instance->handle);
681 pr_err("error %d queuing message\n", ret);
682 release_msg_context(msg_context);
686 timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
689 pr_err("timed out waiting for sync completion\n");
691 /* todo: what happens if the message arrives after aborting */
692 release_msg_context(msg_context);
696 *msg_out = msg_context->u.sync.msg;
697 *msg_handle_out = msg_context->u.sync.msg_handle;
698 release_msg_context(msg_context);
703 static void dump_port_info(struct vchiq_mmal_port *port)
705 pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
707 pr_debug("buffer minimum num:%d size:%d align:%d\n",
708 port->minimum_buffer.num,
709 port->minimum_buffer.size, port->minimum_buffer.alignment);
711 pr_debug("buffer recommended num:%d size:%d align:%d\n",
712 port->recommended_buffer.num,
713 port->recommended_buffer.size,
714 port->recommended_buffer.alignment);
716 pr_debug("buffer current values num:%d size:%d align:%d\n",
717 port->current_buffer.num,
718 port->current_buffer.size, port->current_buffer.alignment);
720 pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
722 port->format.encoding, port->format.encoding_variant);
724 pr_debug(" bitrate:%d flags:0x%x\n",
725 port->format.bitrate, port->format.flags);
727 if (port->format.type == MMAL_ES_TYPE_VIDEO) {
729 ("es video format: width:%d height:%d colourspace:0x%x\n",
730 port->es.video.width, port->es.video.height,
731 port->es.video.color_space);
733 pr_debug(" : crop xywh %d,%d,%d,%d\n",
734 port->es.video.crop.x,
735 port->es.video.crop.y,
736 port->es.video.crop.width, port->es.video.crop.height);
737 pr_debug(" : framerate %d/%d aspect %d/%d\n",
738 port->es.video.frame_rate.num,
739 port->es.video.frame_rate.den,
740 port->es.video.par.num, port->es.video.par.den);
744 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
746 /* todo do readonly fields need setting at all? */
747 p->type = port->type;
748 p->index = port->index;
750 p->is_enabled = port->enabled;
751 p->buffer_num_min = port->minimum_buffer.num;
752 p->buffer_size_min = port->minimum_buffer.size;
753 p->buffer_alignment_min = port->minimum_buffer.alignment;
754 p->buffer_num_recommended = port->recommended_buffer.num;
755 p->buffer_size_recommended = port->recommended_buffer.size;
757 /* only three writable fields in a port */
758 p->buffer_num = port->current_buffer.num;
759 p->buffer_size = port->current_buffer.size;
760 p->userdata = (u32)(unsigned long)port;
763 static int port_info_set(struct vchiq_mmal_instance *instance,
764 struct vchiq_mmal_port *port)
768 struct mmal_msg *rmsg;
769 struct vchi_held_msg rmsg_handle;
771 pr_debug("setting port info port %p\n", port);
774 dump_port_info(port);
776 m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
778 m.u.port_info_set.component_handle = port->component->handle;
779 m.u.port_info_set.port_type = port->type;
780 m.u.port_info_set.port_index = port->index;
782 port_to_mmal_msg(port, &m.u.port_info_set.port);
784 /* elementary stream format setup */
785 m.u.port_info_set.format.type = port->format.type;
786 m.u.port_info_set.format.encoding = port->format.encoding;
787 m.u.port_info_set.format.encoding_variant =
788 port->format.encoding_variant;
789 m.u.port_info_set.format.bitrate = port->format.bitrate;
790 m.u.port_info_set.format.flags = port->format.flags;
792 memcpy(&m.u.port_info_set.es, &port->es,
793 sizeof(union mmal_es_specific_format));
795 m.u.port_info_set.format.extradata_size = port->format.extradata_size;
796 memcpy(&m.u.port_info_set.extradata, port->format.extradata,
797 port->format.extradata_size);
799 ret = send_synchronous_mmal_msg(instance, &m,
800 sizeof(m.u.port_info_set),
801 &rmsg, &rmsg_handle);
805 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
806 /* got an unexpected message type in reply */
811 /* return operation status */
812 ret = -rmsg->u.port_info_get_reply.status;
814 pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
815 port->component->handle, port->handle);
818 vchi_held_msg_release(&rmsg_handle);
823 /* use port info get message to retrieve port information */
824 static int port_info_get(struct vchiq_mmal_instance *instance,
825 struct vchiq_mmal_port *port)
829 struct mmal_msg *rmsg;
830 struct vchi_held_msg rmsg_handle;
833 m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
834 m.u.port_info_get.component_handle = port->component->handle;
835 m.u.port_info_get.port_type = port->type;
836 m.u.port_info_get.index = port->index;
838 ret = send_synchronous_mmal_msg(instance, &m,
839 sizeof(m.u.port_info_get),
840 &rmsg, &rmsg_handle);
844 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
845 /* got an unexpected message type in reply */
850 /* return operation status */
851 ret = -rmsg->u.port_info_get_reply.status;
852 if (ret != MMAL_MSG_STATUS_SUCCESS)
855 if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
860 /* copy the values out of the message */
861 port->handle = rmsg->u.port_info_get_reply.port_handle;
863 /* port type and index cached to use on port info set because
864 * it does not use a port handle
866 port->type = rmsg->u.port_info_get_reply.port_type;
867 port->index = rmsg->u.port_info_get_reply.port_index;
869 port->minimum_buffer.num =
870 rmsg->u.port_info_get_reply.port.buffer_num_min;
871 port->minimum_buffer.size =
872 rmsg->u.port_info_get_reply.port.buffer_size_min;
873 port->minimum_buffer.alignment =
874 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
876 port->recommended_buffer.alignment =
877 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
878 port->recommended_buffer.num =
879 rmsg->u.port_info_get_reply.port.buffer_num_recommended;
881 port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
882 port->current_buffer.size =
883 rmsg->u.port_info_get_reply.port.buffer_size;
886 port->format.type = rmsg->u.port_info_get_reply.format.type;
887 port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
888 port->format.encoding_variant =
889 rmsg->u.port_info_get_reply.format.encoding_variant;
890 port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
891 port->format.flags = rmsg->u.port_info_get_reply.format.flags;
893 /* elementary stream format */
895 &rmsg->u.port_info_get_reply.es,
896 sizeof(union mmal_es_specific_format));
897 port->format.es = &port->es;
899 port->format.extradata_size =
900 rmsg->u.port_info_get_reply.format.extradata_size;
901 memcpy(port->format.extradata,
902 rmsg->u.port_info_get_reply.extradata,
903 port->format.extradata_size);
905 pr_debug("received port info\n");
906 dump_port_info(port);
910 pr_debug("%s:result:%d component:0x%x port:%d\n",
911 __func__, ret, port->component->handle, port->handle);
913 vchi_held_msg_release(&rmsg_handle);
918 /* create comonent on vc */
919 static int create_component(struct vchiq_mmal_instance *instance,
920 struct vchiq_mmal_component *component,
925 struct mmal_msg *rmsg;
926 struct vchi_held_msg rmsg_handle;
928 /* build component create message */
929 m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
930 m.u.component_create.client_component = (u32)(unsigned long)component;
931 strncpy(m.u.component_create.name, name,
932 sizeof(m.u.component_create.name));
934 ret = send_synchronous_mmal_msg(instance, &m,
935 sizeof(m.u.component_create),
936 &rmsg, &rmsg_handle);
940 if (rmsg->h.type != m.h.type) {
941 /* got an unexpected message type in reply */
946 ret = -rmsg->u.component_create_reply.status;
947 if (ret != MMAL_MSG_STATUS_SUCCESS)
950 /* a valid component response received */
951 component->handle = rmsg->u.component_create_reply.component_handle;
952 component->inputs = rmsg->u.component_create_reply.input_num;
953 component->outputs = rmsg->u.component_create_reply.output_num;
954 component->clocks = rmsg->u.component_create_reply.clock_num;
956 pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
958 component->inputs, component->outputs, component->clocks);
961 vchi_held_msg_release(&rmsg_handle);
966 /* destroys a component on vc */
967 static int destroy_component(struct vchiq_mmal_instance *instance,
968 struct vchiq_mmal_component *component)
972 struct mmal_msg *rmsg;
973 struct vchi_held_msg rmsg_handle;
975 m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
976 m.u.component_destroy.component_handle = component->handle;
978 ret = send_synchronous_mmal_msg(instance, &m,
979 sizeof(m.u.component_destroy),
980 &rmsg, &rmsg_handle);
984 if (rmsg->h.type != m.h.type) {
985 /* got an unexpected message type in reply */
990 ret = -rmsg->u.component_destroy_reply.status;
994 vchi_held_msg_release(&rmsg_handle);
999 /* enable a component on vc */
1000 static int enable_component(struct vchiq_mmal_instance *instance,
1001 struct vchiq_mmal_component *component)
1005 struct mmal_msg *rmsg;
1006 struct vchi_held_msg rmsg_handle;
1008 m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1009 m.u.component_enable.component_handle = component->handle;
1011 ret = send_synchronous_mmal_msg(instance, &m,
1012 sizeof(m.u.component_enable),
1013 &rmsg, &rmsg_handle);
1017 if (rmsg->h.type != m.h.type) {
1018 /* got an unexpected message type in reply */
1023 ret = -rmsg->u.component_enable_reply.status;
1026 vchi_held_msg_release(&rmsg_handle);
1031 /* disable a component on vc */
1032 static int disable_component(struct vchiq_mmal_instance *instance,
1033 struct vchiq_mmal_component *component)
1037 struct mmal_msg *rmsg;
1038 struct vchi_held_msg rmsg_handle;
1040 m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1041 m.u.component_disable.component_handle = component->handle;
1043 ret = send_synchronous_mmal_msg(instance, &m,
1044 sizeof(m.u.component_disable),
1045 &rmsg, &rmsg_handle);
1049 if (rmsg->h.type != m.h.type) {
1050 /* got an unexpected message type in reply */
1055 ret = -rmsg->u.component_disable_reply.status;
1059 vchi_held_msg_release(&rmsg_handle);
1064 /* get version of mmal implementation */
1065 static int get_version(struct vchiq_mmal_instance *instance,
1066 u32 *major_out, u32 *minor_out)
1070 struct mmal_msg *rmsg;
1071 struct vchi_held_msg rmsg_handle;
1073 m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1075 ret = send_synchronous_mmal_msg(instance, &m,
1076 sizeof(m.u.version),
1077 &rmsg, &rmsg_handle);
1081 if (rmsg->h.type != m.h.type) {
1082 /* got an unexpected message type in reply */
1087 *major_out = rmsg->u.version.major;
1088 *minor_out = rmsg->u.version.minor;
1091 vchi_held_msg_release(&rmsg_handle);
1096 /* do a port action with a port as a parameter */
1097 static int port_action_port(struct vchiq_mmal_instance *instance,
1098 struct vchiq_mmal_port *port,
1099 enum mmal_msg_port_action_type action_type)
1103 struct mmal_msg *rmsg;
1104 struct vchi_held_msg rmsg_handle;
1106 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1107 m.u.port_action_port.component_handle = port->component->handle;
1108 m.u.port_action_port.port_handle = port->handle;
1109 m.u.port_action_port.action = action_type;
1111 port_to_mmal_msg(port, &m.u.port_action_port.port);
1113 ret = send_synchronous_mmal_msg(instance, &m,
1114 sizeof(m.u.port_action_port),
1115 &rmsg, &rmsg_handle);
1119 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1120 /* got an unexpected message type in reply */
1125 ret = -rmsg->u.port_action_reply.status;
1127 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1129 ret, port->component->handle, port->handle,
1130 port_action_type_names[action_type], action_type);
1133 vchi_held_msg_release(&rmsg_handle);
1138 /* do a port action with handles as parameters */
1139 static int port_action_handle(struct vchiq_mmal_instance *instance,
1140 struct vchiq_mmal_port *port,
1141 enum mmal_msg_port_action_type action_type,
1142 u32 connect_component_handle,
1143 u32 connect_port_handle)
1147 struct mmal_msg *rmsg;
1148 struct vchi_held_msg rmsg_handle;
1150 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1152 m.u.port_action_handle.component_handle = port->component->handle;
1153 m.u.port_action_handle.port_handle = port->handle;
1154 m.u.port_action_handle.action = action_type;
1156 m.u.port_action_handle.connect_component_handle =
1157 connect_component_handle;
1158 m.u.port_action_handle.connect_port_handle = connect_port_handle;
1160 ret = send_synchronous_mmal_msg(instance, &m,
1161 sizeof(m.u.port_action_handle),
1162 &rmsg, &rmsg_handle);
1166 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1167 /* got an unexpected message type in reply */
1172 ret = -rmsg->u.port_action_reply.status;
1174 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1176 ret, port->component->handle, port->handle,
1177 port_action_type_names[action_type],
1178 action_type, connect_component_handle, connect_port_handle);
1181 vchi_held_msg_release(&rmsg_handle);
1186 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1187 struct vchiq_mmal_port *port,
1188 u32 parameter_id, void *value, u32 value_size)
1192 struct mmal_msg *rmsg;
1193 struct vchi_held_msg rmsg_handle;
1195 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1197 m.u.port_parameter_set.component_handle = port->component->handle;
1198 m.u.port_parameter_set.port_handle = port->handle;
1199 m.u.port_parameter_set.id = parameter_id;
1200 m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1201 memcpy(&m.u.port_parameter_set.value, value, value_size);
1203 ret = send_synchronous_mmal_msg(instance, &m,
1204 (4 * sizeof(u32)) + value_size,
1205 &rmsg, &rmsg_handle);
1209 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1210 /* got an unexpected message type in reply */
1215 ret = -rmsg->u.port_parameter_set_reply.status;
1217 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1219 ret, port->component->handle, port->handle, parameter_id);
1222 vchi_held_msg_release(&rmsg_handle);
1227 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1228 struct vchiq_mmal_port *port,
1229 u32 parameter_id, void *value, u32 *value_size)
1233 struct mmal_msg *rmsg;
1234 struct vchi_held_msg rmsg_handle;
1236 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1238 m.u.port_parameter_get.component_handle = port->component->handle;
1239 m.u.port_parameter_get.port_handle = port->handle;
1240 m.u.port_parameter_get.id = parameter_id;
1241 m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1243 ret = send_synchronous_mmal_msg(instance, &m,
1245 mmal_msg_port_parameter_get),
1246 &rmsg, &rmsg_handle);
1250 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1251 /* got an unexpected message type in reply */
1252 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1257 ret = -rmsg->u.port_parameter_get_reply.status;
1258 /* port_parameter_get_reply.size includes the header,
1259 * whilst *value_size doesn't.
1261 rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1263 if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1264 /* Copy only as much as we have space for
1265 * but report true size of parameter
1267 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1269 *value_size = rmsg->u.port_parameter_get_reply.size;
1271 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1272 rmsg->u.port_parameter_get_reply.size);
1275 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1276 ret, port->component->handle, port->handle, parameter_id);
1279 vchi_held_msg_release(&rmsg_handle);
1284 /* disables a port and drains buffers from it */
1285 static int port_disable(struct vchiq_mmal_instance *instance,
1286 struct vchiq_mmal_port *port)
1289 struct list_head *q, *buf_head;
1290 unsigned long flags = 0;
1297 ret = port_action_port(instance, port,
1298 MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1301 * Drain all queued buffers on port. This should only
1302 * apply to buffers that have been queued before the port
1303 * has been enabled. If the port has been enabled and buffers
1304 * passed, then the buffers should have been removed from this
1305 * list, and we should get the relevant callbacks via VCHIQ
1306 * to release the buffers.
1308 spin_lock_irqsave(&port->slock, flags);
1310 list_for_each_safe(buf_head, q, &port->buffers) {
1311 struct mmal_buffer *mmalbuf;
1313 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1316 if (port->buffer_cb)
1317 port->buffer_cb(instance,
1318 port, 0, mmalbuf, 0, 0,
1323 spin_unlock_irqrestore(&port->slock, flags);
1325 ret = port_info_get(instance, port);
1332 static int port_enable(struct vchiq_mmal_instance *instance,
1333 struct vchiq_mmal_port *port)
1335 unsigned int hdr_count;
1336 struct list_head *q, *buf_head;
1342 ret = port_action_port(instance, port,
1343 MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1349 if (port->buffer_cb) {
1350 /* send buffer headers to videocore */
1352 list_for_each_safe(buf_head, q, &port->buffers) {
1353 struct mmal_buffer *mmalbuf;
1355 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1357 ret = buffer_from_host(instance, port, mmalbuf);
1363 if (hdr_count > port->current_buffer.num)
1368 ret = port_info_get(instance, port);
1374 /* ------------------------------------------------------------------
1376 *------------------------------------------------------------------
1379 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1380 struct vchiq_mmal_port *port)
1384 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1387 ret = port_info_set(instance, port);
1389 goto release_unlock;
1391 /* read what has actually been set */
1392 ret = port_info_get(instance, port);
1395 mutex_unlock(&instance->vchiq_mutex);
1400 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1401 struct vchiq_mmal_port *port,
1402 u32 parameter, void *value, u32 value_size)
1406 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1409 ret = port_parameter_set(instance, port, parameter, value, value_size);
1411 mutex_unlock(&instance->vchiq_mutex);
1416 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1417 struct vchiq_mmal_port *port,
1418 u32 parameter, void *value, u32 *value_size)
1422 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1425 ret = port_parameter_get(instance, port, parameter, value, value_size);
1427 mutex_unlock(&instance->vchiq_mutex);
1434 * enables a port and queues buffers for satisfying callbacks if we
1435 * provide a callback handler
1437 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1438 struct vchiq_mmal_port *port,
1439 vchiq_mmal_buffer_cb buffer_cb)
1443 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1446 /* already enabled - noop */
1447 if (port->enabled) {
1452 port->buffer_cb = buffer_cb;
1454 ret = port_enable(instance, port);
1457 mutex_unlock(&instance->vchiq_mutex);
1462 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1463 struct vchiq_mmal_port *port)
1467 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1470 if (!port->enabled) {
1471 mutex_unlock(&instance->vchiq_mutex);
1475 ret = port_disable(instance, port);
1477 mutex_unlock(&instance->vchiq_mutex);
1482 /* ports will be connected in a tunneled manner so data buffers
1483 * are not handled by client.
1485 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1486 struct vchiq_mmal_port *src,
1487 struct vchiq_mmal_port *dst)
1491 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1494 /* disconnect ports if connected */
1495 if (src->connected) {
1496 ret = port_disable(instance, src);
1498 pr_err("failed disabling src port(%d)\n", ret);
1499 goto release_unlock;
1502 /* do not need to disable the destination port as they
1503 * are connected and it is done automatically
1506 ret = port_action_handle(instance, src,
1507 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1508 src->connected->component->handle,
1509 src->connected->handle);
1511 pr_err("failed disconnecting src port\n");
1512 goto release_unlock;
1514 src->connected->enabled = 0;
1515 src->connected = NULL;
1519 /* do not make new connection */
1521 pr_debug("not making new connection\n");
1522 goto release_unlock;
1525 /* copy src port format to dst */
1526 dst->format.encoding = src->format.encoding;
1527 dst->es.video.width = src->es.video.width;
1528 dst->es.video.height = src->es.video.height;
1529 dst->es.video.crop.x = src->es.video.crop.x;
1530 dst->es.video.crop.y = src->es.video.crop.y;
1531 dst->es.video.crop.width = src->es.video.crop.width;
1532 dst->es.video.crop.height = src->es.video.crop.height;
1533 dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1534 dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1536 /* set new format */
1537 ret = port_info_set(instance, dst);
1539 pr_debug("setting port info failed\n");
1540 goto release_unlock;
1543 /* read what has actually been set */
1544 ret = port_info_get(instance, dst);
1546 pr_debug("read back port info failed\n");
1547 goto release_unlock;
1550 /* connect two ports together */
1551 ret = port_action_handle(instance, src,
1552 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1553 dst->component->handle, dst->handle);
1555 pr_debug("connecting port %d:%d to %d:%d failed\n",
1556 src->component->handle, src->handle,
1557 dst->component->handle, dst->handle);
1558 goto release_unlock;
1560 src->connected = dst;
1564 mutex_unlock(&instance->vchiq_mutex);
1569 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1570 struct vchiq_mmal_port *port,
1571 struct mmal_buffer *buffer)
1573 unsigned long flags = 0;
1576 ret = buffer_from_host(instance, port, buffer);
1577 if (ret == -EINVAL) {
1578 /* Port is disabled. Queue for when it is enabled. */
1579 spin_lock_irqsave(&port->slock, flags);
1580 list_add_tail(&buffer->list, &port->buffers);
1581 spin_unlock_irqrestore(&port->slock, flags);
1587 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1588 struct mmal_buffer *buf)
1590 struct mmal_msg_context *msg_context = get_msg_context(instance);
1592 if (IS_ERR(msg_context))
1593 return (PTR_ERR(msg_context));
1595 buf->msg_context = msg_context;
1599 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1601 struct mmal_msg_context *msg_context = buf->msg_context;
1604 release_msg_context(msg_context);
1605 buf->msg_context = NULL;
1610 /* Initialise a mmal component and its ports
1613 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1615 struct vchiq_mmal_component **component_out)
1618 int idx; /* port index */
1619 struct vchiq_mmal_component *component;
1621 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1624 if (instance->component_idx == VCHIQ_MMAL_MAX_COMPONENTS) {
1625 ret = -EINVAL; /* todo is this correct error? */
1629 component = &instance->component[instance->component_idx];
1631 ret = create_component(instance, component, name);
1633 pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1638 /* ports info needs gathering */
1639 component->control.type = MMAL_PORT_TYPE_CONTROL;
1640 component->control.index = 0;
1641 component->control.component = component;
1642 spin_lock_init(&component->control.slock);
1643 INIT_LIST_HEAD(&component->control.buffers);
1644 ret = port_info_get(instance, &component->control);
1646 goto release_component;
1648 for (idx = 0; idx < component->inputs; idx++) {
1649 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1650 component->input[idx].index = idx;
1651 component->input[idx].component = component;
1652 spin_lock_init(&component->input[idx].slock);
1653 INIT_LIST_HEAD(&component->input[idx].buffers);
1654 ret = port_info_get(instance, &component->input[idx]);
1656 goto release_component;
1659 for (idx = 0; idx < component->outputs; idx++) {
1660 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1661 component->output[idx].index = idx;
1662 component->output[idx].component = component;
1663 spin_lock_init(&component->output[idx].slock);
1664 INIT_LIST_HEAD(&component->output[idx].buffers);
1665 ret = port_info_get(instance, &component->output[idx]);
1667 goto release_component;
1670 for (idx = 0; idx < component->clocks; idx++) {
1671 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1672 component->clock[idx].index = idx;
1673 component->clock[idx].component = component;
1674 spin_lock_init(&component->clock[idx].slock);
1675 INIT_LIST_HEAD(&component->clock[idx].buffers);
1676 ret = port_info_get(instance, &component->clock[idx]);
1678 goto release_component;
1681 instance->component_idx++;
1683 *component_out = component;
1685 mutex_unlock(&instance->vchiq_mutex);
1690 destroy_component(instance, component);
1692 mutex_unlock(&instance->vchiq_mutex);
1698 * cause a mmal component to be destroyed
1700 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1701 struct vchiq_mmal_component *component)
1705 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1708 if (component->enabled)
1709 ret = disable_component(instance, component);
1711 ret = destroy_component(instance, component);
1713 mutex_unlock(&instance->vchiq_mutex);
1719 * cause a mmal component to be enabled
1721 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1722 struct vchiq_mmal_component *component)
1726 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1729 if (component->enabled) {
1730 mutex_unlock(&instance->vchiq_mutex);
1734 ret = enable_component(instance, component);
1736 component->enabled = true;
1738 mutex_unlock(&instance->vchiq_mutex);
1744 * cause a mmal component to be enabled
1746 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1747 struct vchiq_mmal_component *component)
1751 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1754 if (!component->enabled) {
1755 mutex_unlock(&instance->vchiq_mutex);
1759 ret = disable_component(instance, component);
1761 component->enabled = 0;
1763 mutex_unlock(&instance->vchiq_mutex);
1768 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1769 u32 *major_out, u32 *minor_out)
1773 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1776 ret = get_version(instance, major_out, minor_out);
1778 mutex_unlock(&instance->vchiq_mutex);
1783 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1790 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1793 vchi_service_use(instance->handle);
1795 status = vchi_service_close(instance->handle);
1797 pr_err("mmal-vchiq: VCHIQ close failed\n");
1799 mutex_unlock(&instance->vchiq_mutex);
1801 flush_workqueue(instance->bulk_wq);
1802 destroy_workqueue(instance->bulk_wq);
1804 vfree(instance->bulk_scratch);
1806 idr_destroy(&instance->context_map);
1813 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1816 struct vchiq_mmal_instance *instance;
1817 static VCHI_INSTANCE_T vchi_instance;
1818 struct service_creation params = {
1819 .version = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
1820 .service_id = VC_MMAL_SERVER_NAME,
1821 .callback = service_callback,
1822 .callback_param = NULL,
1825 /* compile time checks to ensure structure size as they are
1826 * directly (de)serialised from memory.
1829 /* ensure the header structure has packed to the correct size */
1830 BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1832 /* ensure message structure does not exceed maximum length */
1833 BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1835 /* mmal port struct is correct size */
1836 BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1838 /* create a vchi instance */
1839 status = vchi_initialise(&vchi_instance);
1841 pr_err("Failed to initialise VCHI instance (status=%d)\n",
1846 status = vchi_connect(vchi_instance);
1848 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1852 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1857 mutex_init(&instance->vchiq_mutex);
1859 instance->bulk_scratch = vmalloc(PAGE_SIZE);
1861 mutex_init(&instance->context_map_lock);
1862 idr_init_base(&instance->context_map, 1);
1864 params.callback_param = instance;
1866 instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
1868 if (!instance->bulk_wq)
1871 status = vchi_service_open(vchi_instance, ¶ms, &instance->handle);
1873 pr_err("Failed to open VCHI service connection (status=%d)\n",
1875 goto err_close_services;
1878 vchi_service_release(instance->handle);
1880 *out_instance = instance;
1885 vchi_service_close(instance->handle);
1886 destroy_workqueue(instance->bulk_wq);
1888 vfree(instance->bulk_scratch);