1 From 878c0bfd0c5f2dc0ef04874b1cba915cf208ca8f Mon Sep 17 00:00:00 2001
2 From: Dave Stevenson <dave.stevenson@raspberrypi.org>
3 Date: Tue, 25 Sep 2018 10:27:11 +0100
4 Subject: [PATCH] staging: vc04_services: Add new vc-sm-cma driver
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 This new driver allows contiguous memory blocks to be imported
10 into the VideoCore VPU memory map, and manages the lifetime of
11 those objects, only releasing the source dmabuf once the VPU has
12 confirmed it has finished with it.
14 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
16 staging: vc-sm-cma: Correct DMA configuration.
18 Now that VCHIQ is setting up the DMA configuration as our
19 parent device, don't try to configure it during probe.
21 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
23 staging: vc-sm-cma: Use a void* pointer as the handle within the kernel
25 The driver was using an unsigned int as the handle to the outside world,
26 and doing a nasty cast to the struct dmabuf when handed it back.
27 This breaks badly with a 64 bit kernel where the pointer doesn't fit
30 Switch to using a void* within the kernel. Reality is that it is
31 a struct dma_buf*, but advertising it as such to other drivers seems
32 to encourage the use of it as such, and I'm not sure on the implications
35 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
37 staging: vc-sm-cma: Fix up for 64bit builds
39 There were a number of logging lines that were using
40 inappropriate formatting under 64bit kernels.
42 The kernel_id field passed to/from the VPU was being
43 abused for storing the struct vc_sm_buffer *.
44 This breaks with 64bit kernels, so change to using an IDR.
46 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
48 staging: vc_sm_cma: Remove erroneous misc_deregister
50 Code from the misc /dev node was still present in
51 bcm2835_vc_sm_cma_remove, which caused a NULL deref.
56 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
58 staging: vc-sm-cma: Remove the debugfs directory on remove
60 Without removing that, reloading the driver fails.
62 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
64 staging: vc-sm-cma: Use devm_ allocs for sm_state.
66 Use managed allocations for sm_state, removing reliance on
69 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
71 staging: vc-sm-cma: Don't fail if debugfs calls fail.
73 Return codes from debugfs calls should never alter the
74 flow of the main code.
76 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
78 staging: vc-sm-cma: Ensure mutex and idr are destroyed
80 map_lock and kernelid_map are created in probe, but not released
81 in release should the vcsm service not connect (eg running the
84 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
86 staging: vc-sm-cma: Remove obsolete comment and make function static
88 Removes obsolete comment about wanting to pass a function
89 pointer into mmal-vchiq as we now do.
90 As the function is passed as a function pointer, the function itself
93 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
95 staging: vc-sm-cma: Add in allocation for VPU requests.
97 Module has to change from tristate to bool as all CMA functions
100 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
102 staging: vc-sm-cma: Update TODO.
104 The driver is already a platform driver, so that can be
105 deleted from the TODO.
106 There are no known issues that need to be resolved.
108 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
110 staging: vc-sm-cma: Add in userspace allocation API
112 Replacing the functionality from the older vc-sm driver,
113 add in a userspace API that allows allocation of buffers,
114 and importing of dma-bufs.
115 The driver hands out dma-buf fds, therefore much of the
116 handling around lifespan and odd mmaps from the old driver
119 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
121 staging: vcsm-cma: Add cache control ioctls
123 The old driver allowed for direct cache manipulation and that
124 was used by various clients. Replicate here.
126 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
128 staging: vcsm-cma: Alter dev node permissions to 0666
130 Until the udev rules are updated, open up access to this node by
133 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
135 staging: vcsm-cma: Drop logging level on messages in vc_sm_release_resource
137 They weren't errors but were logged as such.
139 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
141 staging: vcsm-cma: Fixup the alloc code handling of kernel_id
143 The allocation code had been copied in from an old branch prior
144 to having added the IDR for 64bit support. It was therefore pushing
145 a pointer into the kernel_id field instead of an IDR handle, the
146 lookup therefore failed, and we never released the buffer.
148 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
150 staging: vcsm-cma: Remove cache manipulation ioctl from ARM64
152 The cache flushing ioctls are used by the Pi3 HEVC hw-assisted
153 decoder as it needs finer grained flushing control than dma_ops
155 These cache calls are not present for ARM64, therefore disable
156 them. We are not actively supporting 64bit kernels at present,
157 and the use case of the HEVC decoder is fairly limited.
159 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
161 staging: vcsm-cma: Rework to use dma APIs, not CMA
163 Due to a misunderstanding of the DMA mapping APIs, I made
164 the wrong decision on how to implement this.
166 Rework to use dma_alloc_coherent instead of the CMA
167 API. This also allows it to be built as a module easily.
169 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
171 staging: vc-sm-cma: Fix the few remaining coding style issues
173 Fix a few minor checkpatch complaints to make the driver clean
175 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
177 staging: vc04_services: fix compiling in separate directory
179 The vc04_services Makefiles do not respect the O=path argument
180 correctly: include paths in CFLAGS are given relatively to object path,
181 not source path. Compiling in a separate directory yields #include
184 Signed-off-by: Marek Behún <marek.behun@nic.cz>
186 vc-sm-cma: Fix compatibility ioctl
188 This code path hasn't been used previously.
189 Fixed up after testing with kodi on 32-bit userland and 64-bit kernel
191 Signed-off-by: popcornmix <popcornmix@gmail.com>
193 drivers/staging/vc04_services/Kconfig | 1 +
194 drivers/staging/vc04_services/Makefile | 1 +
195 .../vc04_services/bcm2835-camera/Makefile | 4 +-
196 .../staging/vc04_services/vc-sm-cma/Kconfig | 10 +
197 .../staging/vc04_services/vc-sm-cma/Makefile | 8 +
198 drivers/staging/vc04_services/vc-sm-cma/TODO | 1 +
199 .../staging/vc04_services/vc-sm-cma/vc_sm.c | 1774 +++++++++++++++++
200 .../staging/vc04_services/vc-sm-cma/vc_sm.h | 84 +
201 .../vc04_services/vc-sm-cma/vc_sm_cma_vchi.c | 505 +++++
202 .../vc04_services/vc-sm-cma/vc_sm_cma_vchi.h | 63 +
203 .../vc04_services/vc-sm-cma/vc_sm_defs.h | 300 +++
204 .../vc04_services/vc-sm-cma/vc_sm_knl.h | 28 +
205 .../staging/vc04_services/vchiq-mmal/Makefile | 2 +-
206 include/linux/broadcom/vc_sm_cma_ioctl.h | 114 ++
207 14 files changed, 2892 insertions(+), 3 deletions(-)
208 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/Kconfig
209 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/Makefile
210 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/TODO
211 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm.c
212 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm.h
213 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.c
214 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.h
215 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_defs.h
216 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_knl.h
217 create mode 100644 include/linux/broadcom/vc_sm_cma_ioctl.h
219 --- a/drivers/staging/vc04_services/Kconfig
220 +++ b/drivers/staging/vc04_services/Kconfig
221 @@ -23,6 +23,7 @@ source "drivers/staging/vc04_services/bc
223 source "drivers/staging/vc04_services/bcm2835-camera/Kconfig"
224 source "drivers/staging/vc04_services/vchiq-mmal/Kconfig"
225 +source "drivers/staging/vc04_services/vc-sm-cma/Kconfig"
229 --- a/drivers/staging/vc04_services/Makefile
230 +++ b/drivers/staging/vc04_services/Makefile
231 @@ -13,6 +13,7 @@ vchiq-objs := \
232 obj-$(CONFIG_SND_BCM2835) += bcm2835-audio/
233 obj-$(CONFIG_VIDEO_BCM2835) += bcm2835-camera/
234 obj-$(CONFIG_BCM2835_VCHIQ_MMAL) += vchiq-mmal/
235 +obj-$(CONFIG_BCM_VC_SM_CMA) += vc-sm-cma/
237 ccflags-y += -Idrivers/staging/vc04_services -D__VCCOREVER__=0x04000000
239 --- a/drivers/staging/vc04_services/bcm2835-camera/Makefile
240 +++ b/drivers/staging/vc04_services/bcm2835-camera/Makefile
241 @@ -7,6 +7,6 @@ obj-$(CONFIG_VIDEO_BCM2835) += bcm2835-v
244 -I $(srctree)/$(src)/.. \
245 - -Idrivers/staging/vc04_services \
246 - -Idrivers/staging/vc04_services/vchiq-mmal \
247 + -I$(srctree)/drivers/staging/vc04_services \
248 + -I$(srctree)/drivers/staging/vc04_services/vchiq-mmal \
249 -D__VCCOREVER__=0x04000000
251 +++ b/drivers/staging/vc04_services/vc-sm-cma/Kconfig
253 +config BCM_VC_SM_CMA
254 + tristate "VideoCore Shared Memory (CMA) driver"
255 + depends on BCM2835_VCHIQ
257 + select DMA_SHARED_BUFFER
259 + Say Y here to enable the shared memory interface that
260 + supports sharing dmabufs with VideoCore.
261 + This operates over the VCHIQ interface to a service
262 + running on VideoCore.
264 +++ b/drivers/staging/vc04_services/vc-sm-cma/Makefile
266 +ccflags-y += -I$(srctree)/drivers/staging/vc04_services -I$(srctree)/drivers/staging/vc04_services/interface/vchi -I$(srctree)/drivers/staging/vc04_services/interface/vchiq_arm
267 +# -I"drivers/staging/android/ion/" -I"$(srctree)/fs/"
268 +ccflags-y += -D__VCCOREVER__=0
270 +vc-sm-cma-$(CONFIG_BCM_VC_SM_CMA) := \
271 + vc_sm.o vc_sm_cma_vchi.o
273 +obj-$(CONFIG_BCM_VC_SM_CMA) += vc-sm-cma.o
275 +++ b/drivers/staging/vc04_services/vc-sm-cma/TODO
277 +No currently outstanding tasks except some clean-up.
279 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm.c
281 +// SPDX-License-Identifier: GPL-2.0
283 + * VideoCore Shared Memory driver using CMA.
285 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
286 + * Dave Stevenson <dave.stevenson@raspberrypi.org>
288 + * Based on vmcs_sm driver from Broadcom Corporation for some API,
289 + * and taking some code for buffer allocation and dmabuf handling from
293 + * This driver has 3 main uses:
294 + * 1) Allocating buffers for the kernel or userspace that can be shared with the
296 + * 2) Importing dmabufs from elsewhere for sharing with the VPU.
297 + * 3) Allocating buffers for use by the VPU.
299 + * In the first and second cases the native handle is a dmabuf. Releasing the
300 + * resource inherently comes from releasing the dmabuf, and this will trigger
301 + * unmapping on the VPU. The underlying allocation and our buffer structure are
302 + * retained until the VPU has confirmed that it has finished with it.
304 + * For the VPU allocations the VPU is responsible for triggering the release,
305 + * and therefore the released message decrements the dma_buf refcount (with the
306 + * VPU mapping having already been marked as released).
309 +/* ---- Include Files ----------------------------------------------------- */
310 +#include <linux/cdev.h>
311 +#include <linux/device.h>
312 +#include <linux/debugfs.h>
313 +#include <linux/dma-mapping.h>
314 +#include <linux/dma-buf.h>
315 +#include <linux/errno.h>
316 +#include <linux/fs.h>
317 +#include <linux/kernel.h>
318 +#include <linux/list.h>
319 +#include <linux/miscdevice.h>
320 +#include <linux/module.h>
321 +#include <linux/mm.h>
322 +#include <linux/of_device.h>
323 +#include <linux/platform_device.h>
324 +#include <linux/proc_fs.h>
325 +#include <linux/slab.h>
326 +#include <linux/seq_file.h>
327 +#include <linux/syscalls.h>
328 +#include <linux/types.h>
329 +#include <asm/cacheflush.h>
331 +#include "vchiq_connected.h"
332 +#include "vc_sm_cma_vchi.h"
335 +#include "vc_sm_knl.h"
336 +#include <linux/broadcom/vc_sm_cma_ioctl.h>
338 +/* ---- Private Constants and Types --------------------------------------- */
340 +#define DEVICE_NAME "vcsm-cma"
341 +#define DEVICE_MINOR 0
343 +#define VC_SM_RESOURCE_NAME_DEFAULT "sm-host-resource"
345 +#define VC_SM_DIR_ROOT_NAME "vcsm-cma"
346 +#define VC_SM_STATE "state"
348 +/* Private file data associated with each opened device. */
349 +struct vc_sm_privdata_t {
350 + pid_t pid; /* PID of creator. */
352 + int restart_sys; /* Tracks restart on interrupt. */
353 + enum vc_sm_msg_type int_action; /* Interrupted action. */
354 + u32 int_trans_id; /* Interrupted transaction. */
357 +typedef int (*VC_SM_SHOW) (struct seq_file *s, void *v);
359 + VC_SM_SHOW show; /* Debug fs function hookup. */
360 + struct dentry *dir_entry; /* Debug fs directory entry. */
361 + void *priv_data; /* Private data */
364 +/* Global state information. */
366 + struct platform_device *pdev;
368 + struct miscdevice misc_dev;
370 + struct sm_instance *sm_handle; /* Handle for videocore service. */
372 + spinlock_t kernelid_map_lock; /* Spinlock protecting kernelid_map */
373 + struct idr kernelid_map;
375 + struct mutex map_lock; /* Global map lock. */
376 + struct list_head buffer_list; /* List of buffer. */
378 + struct vc_sm_privdata_t *data_knl; /* Kernel internal data tracking. */
379 + struct vc_sm_privdata_t *vpu_allocs; /* All allocations from the VPU */
380 + struct dentry *dir_root; /* Debug fs entries root. */
381 + struct sm_pde_t dir_state; /* Debug fs entries state sub-tree. */
383 + bool require_released_callback; /* VPU will send a released msg when it
384 + * has finished with a resource.
386 + u32 int_trans_id; /* Interrupted transaction. */
389 +struct vc_sm_dma_buf_attachment {
390 + struct device *dev;
391 + struct sg_table sg_table;
392 + struct list_head list;
393 + enum dma_data_direction dma_dir;
396 +/* ---- Private Variables ----------------------------------------------- */
398 +static struct sm_state_t *sm_state;
399 +static int sm_inited;
401 +/* ---- Private Function Prototypes -------------------------------------- */
403 +/* ---- Private Functions ------------------------------------------------ */
405 +static int get_kernel_id(struct vc_sm_buffer *buffer)
409 + spin_lock(&sm_state->kernelid_map_lock);
410 + handle = idr_alloc(&sm_state->kernelid_map, buffer, 0, 0, GFP_KERNEL);
411 + spin_unlock(&sm_state->kernelid_map_lock);
416 +static struct vc_sm_buffer *lookup_kernel_id(int handle)
418 + return idr_find(&sm_state->kernelid_map, handle);
421 +static void free_kernel_id(int handle)
423 + spin_lock(&sm_state->kernelid_map_lock);
424 + idr_remove(&sm_state->kernelid_map, handle);
425 + spin_unlock(&sm_state->kernelid_map_lock);
428 +static int vc_sm_cma_seq_file_show(struct seq_file *s, void *v)
430 + struct sm_pde_t *sm_pde;
432 + sm_pde = (struct sm_pde_t *)(s->private);
434 + if (sm_pde && sm_pde->show)
435 + sm_pde->show(s, v);
440 +static int vc_sm_cma_single_open(struct inode *inode, struct file *file)
442 + return single_open(file, vc_sm_cma_seq_file_show, inode->i_private);
445 +static const struct file_operations vc_sm_cma_debug_fs_fops = {
446 + .open = vc_sm_cma_single_open,
448 + .llseek = seq_lseek,
449 + .release = single_release,
452 +static int vc_sm_cma_global_state_show(struct seq_file *s, void *v)
454 + struct vc_sm_buffer *resource = NULL;
455 + int resource_count = 0;
460 + seq_printf(s, "\nVC-ServiceHandle %p\n", sm_state->sm_handle);
462 + /* Log all applicable mapping(s). */
464 + mutex_lock(&sm_state->map_lock);
465 + seq_puts(s, "\nResources\n");
466 + if (!list_empty(&sm_state->buffer_list)) {
467 + list_for_each_entry(resource, &sm_state->buffer_list,
468 + global_buffer_list) {
471 + seq_printf(s, "\nResource %p\n",
473 + seq_printf(s, " NAME %s\n",
475 + seq_printf(s, " SIZE %zu\n",
477 + seq_printf(s, " DMABUF %p\n",
478 + resource->dma_buf);
479 + if (resource->imported) {
480 + seq_printf(s, " ATTACH %p\n",
481 + resource->import.attach);
482 + seq_printf(s, " SGT %p\n",
483 + resource->import.sgt);
485 + seq_printf(s, " SGT %p\n",
486 + resource->alloc.sg_table);
488 + seq_printf(s, " DMA_ADDR %pad\n",
489 + &resource->dma_addr);
490 + seq_printf(s, " VC_HANDLE %08x\n",
491 + resource->vc_handle);
492 + seq_printf(s, " VC_MAPPING %d\n",
493 + resource->vpu_state);
496 + seq_printf(s, "\n\nTotal resource count: %d\n\n", resource_count);
498 + mutex_unlock(&sm_state->map_lock);
504 + * Adds a buffer to the private data list which tracks all the allocated
507 +static void vc_sm_add_resource(struct vc_sm_privdata_t *privdata,
508 + struct vc_sm_buffer *buffer)
510 + mutex_lock(&sm_state->map_lock);
511 + list_add(&buffer->global_buffer_list, &sm_state->buffer_list);
512 + mutex_unlock(&sm_state->map_lock);
514 + pr_debug("[%s]: added buffer %p (name %s, size %zu)\n",
515 + __func__, buffer, buffer->name, buffer->size);
519 + * Cleans up imported dmabuf.
521 +static void vc_sm_clean_up_dmabuf(struct vc_sm_buffer *buffer)
523 + if (!buffer->imported)
526 + /* Handle cleaning up imported dmabufs */
527 + mutex_lock(&buffer->lock);
528 + if (buffer->import.sgt) {
529 + dma_buf_unmap_attachment(buffer->import.attach,
530 + buffer->import.sgt,
531 + DMA_BIDIRECTIONAL);
532 + buffer->import.sgt = NULL;
534 + if (buffer->import.attach) {
535 + dma_buf_detach(buffer->dma_buf, buffer->import.attach);
536 + buffer->import.attach = NULL;
538 + mutex_unlock(&buffer->lock);
542 + * Instructs VPU to decrement the refcount on a buffer.
544 +static void vc_sm_vpu_free(struct vc_sm_buffer *buffer)
546 + if (buffer->vc_handle && buffer->vpu_state == VPU_MAPPED) {
547 + struct vc_sm_free_t free = { buffer->vc_handle, 0 };
548 + int status = vc_sm_cma_vchi_free(sm_state->sm_handle, &free,
549 + &sm_state->int_trans_id);
550 + if (status != 0 && status != -EINTR) {
551 + pr_err("[%s]: failed to free memory on videocore (status: %u, trans_id: %u)\n",
552 + __func__, status, sm_state->int_trans_id);
555 + if (sm_state->require_released_callback) {
556 + /* Need to wait for the VPU to confirm the free. */
558 + /* Retain a reference on this until the VPU has
561 + buffer->vpu_state = VPU_UNMAPPING;
563 + buffer->vpu_state = VPU_NOT_MAPPED;
564 + buffer->vc_handle = 0;
570 + * Release an allocation.
571 + * All refcounting is done via the dma buf object.
573 + * Must be called with the mutex held. The function will either release the
574 + * mutex (if defering the release) or destroy it. The caller must therefore not
575 + * reuse the buffer on return.
577 +static void vc_sm_release_resource(struct vc_sm_buffer *buffer)
579 + pr_debug("[%s]: buffer %p (name %s, size %zu), imported %u\n",
580 + __func__, buffer, buffer->name, buffer->size,
583 + if (buffer->vc_handle) {
584 + /* We've sent the unmap request but not had the response. */
585 + pr_debug("[%s]: Waiting for VPU unmap response on %p\n",
589 + if (buffer->in_use) {
590 + /* dmabuf still in use - we await the release */
591 + pr_debug("[%s]: buffer %p is still in use\n", __func__, buffer);
595 + /* Release the allocation (whether imported dmabuf or CMA allocation) */
596 + if (buffer->imported) {
597 + if (buffer->import.dma_buf)
598 + dma_buf_put(buffer->import.dma_buf);
600 + pr_err("%s: Imported dmabuf already been put for buf %p\n",
602 + buffer->import.dma_buf = NULL;
604 + dma_free_coherent(&sm_state->pdev->dev, buffer->size,
605 + buffer->cookie, buffer->dma_addr);
608 + /* Free our buffer. Start by removing it from the list */
609 + mutex_lock(&sm_state->map_lock);
610 + list_del(&buffer->global_buffer_list);
611 + mutex_unlock(&sm_state->map_lock);
613 + pr_debug("%s: Release our allocation - done\n", __func__);
614 + mutex_unlock(&buffer->lock);
616 + mutex_destroy(&buffer->lock);
622 + mutex_unlock(&buffer->lock);
625 +/* Create support for private data tracking. */
626 +static struct vc_sm_privdata_t *vc_sm_cma_create_priv_data(pid_t id)
628 + char alloc_name[32];
629 + struct vc_sm_privdata_t *file_data = NULL;
631 + /* Allocate private structure. */
632 + file_data = kzalloc(sizeof(*file_data), GFP_KERNEL);
637 + snprintf(alloc_name, sizeof(alloc_name), "%d", id);
639 + file_data->pid = id;
644 +/* Dma buf operations for use with our own allocations */
646 +static int vc_sm_dma_buf_attach(struct dma_buf *dmabuf,
647 + struct dma_buf_attachment *attachment)
650 + struct vc_sm_dma_buf_attachment *a;
651 + struct sg_table *sgt;
652 + struct vc_sm_buffer *buf = dmabuf->priv;
653 + struct scatterlist *rd, *wr;
656 + a = kzalloc(sizeof(*a), GFP_KERNEL);
660 + pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
662 + mutex_lock(&buf->lock);
664 + INIT_LIST_HEAD(&a->list);
666 + sgt = &a->sg_table;
668 + /* Copy the buf->base_sgt scatter list to the attachment, as we can't
669 + * map the same scatter list to multiple attachments at the same time.
671 + ret = sg_alloc_table(sgt, buf->alloc.sg_table->orig_nents, GFP_KERNEL);
677 + rd = buf->alloc.sg_table->sgl;
679 + for (i = 0; i < sgt->orig_nents; ++i) {
680 + sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
685 + a->dma_dir = DMA_NONE;
686 + attachment->priv = a;
688 + list_add(&a->list, &buf->attachments);
689 + mutex_unlock(&buf->lock);
694 +static void vc_sm_dma_buf_detach(struct dma_buf *dmabuf,
695 + struct dma_buf_attachment *attachment)
697 + struct vc_sm_dma_buf_attachment *a = attachment->priv;
698 + struct vc_sm_buffer *buf = dmabuf->priv;
699 + struct sg_table *sgt;
701 + pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
705 + sgt = &a->sg_table;
707 + /* release the scatterlist cache */
708 + if (a->dma_dir != DMA_NONE)
709 + dma_unmap_sg(attachment->dev, sgt->sgl, sgt->orig_nents,
711 + sg_free_table(sgt);
713 + mutex_lock(&buf->lock);
714 + list_del(&a->list);
715 + mutex_unlock(&buf->lock);
720 +static struct sg_table *vc_sm_map_dma_buf(struct dma_buf_attachment *attachment,
721 + enum dma_data_direction direction)
723 + struct vc_sm_dma_buf_attachment *a = attachment->priv;
724 + /* stealing dmabuf mutex to serialize map/unmap operations */
725 + struct mutex *lock = &attachment->dmabuf->lock;
726 + struct sg_table *table;
729 + pr_debug("%s attachment %p\n", __func__, attachment);
730 + table = &a->sg_table;
732 + /* return previously mapped sg table */
733 + if (a->dma_dir == direction) {
734 + mutex_unlock(lock);
738 + /* release any previous cache */
739 + if (a->dma_dir != DMA_NONE) {
740 + dma_unmap_sg(attachment->dev, table->sgl, table->orig_nents,
742 + a->dma_dir = DMA_NONE;
745 + /* mapping to the client with new direction */
746 + table->nents = dma_map_sg(attachment->dev, table->sgl,
747 + table->orig_nents, direction);
748 + if (!table->nents) {
749 + pr_err("failed to map scatterlist\n");
750 + mutex_unlock(lock);
751 + return ERR_PTR(-EIO);
754 + a->dma_dir = direction;
755 + mutex_unlock(lock);
757 + pr_debug("%s attachment %p\n", __func__, attachment);
761 +static void vc_sm_unmap_dma_buf(struct dma_buf_attachment *attachment,
762 + struct sg_table *table,
763 + enum dma_data_direction direction)
765 + pr_debug("%s attachment %p\n", __func__, attachment);
766 + dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
769 +static int vc_sm_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
771 + struct vc_sm_buffer *buf = dmabuf->priv;
774 + pr_debug("%s dmabuf %p, buf %p, vm_start %08lX\n", __func__, dmabuf,
775 + buf, vma->vm_start);
777 + mutex_lock(&buf->lock);
779 + /* now map it to userspace */
782 + ret = dma_mmap_coherent(&sm_state->pdev->dev, vma, buf->cookie,
783 + buf->dma_addr, buf->size);
786 + pr_err("Remapping memory failed, error: %d\n", ret);
790 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
792 + mutex_unlock(&buf->lock);
795 + pr_err("%s: failure mapping buffer to userspace\n",
801 +static void vc_sm_dma_buf_release(struct dma_buf *dmabuf)
803 + struct vc_sm_buffer *buffer;
808 + buffer = (struct vc_sm_buffer *)dmabuf->priv;
810 + mutex_lock(&buffer->lock);
812 + pr_debug("%s dmabuf %p, buffer %p\n", __func__, dmabuf, buffer);
814 + buffer->in_use = 0;
816 + /* Unmap on the VPU */
817 + vc_sm_vpu_free(buffer);
818 + pr_debug("%s vpu_free done\n", __func__);
820 + /* Unmap our dma_buf object (the vc_sm_buffer remains until released
823 + vc_sm_clean_up_dmabuf(buffer);
824 + pr_debug("%s clean_up dmabuf done\n", __func__);
826 + /* buffer->lock will be destroyed by vc_sm_release_resource if finished
827 + * with, otherwise unlocked. Do NOT unlock here.
829 + vc_sm_release_resource(buffer);
830 + pr_debug("%s done\n", __func__);
833 +static int vc_sm_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
834 + enum dma_data_direction direction)
836 + struct vc_sm_buffer *buf;
837 + struct vc_sm_dma_buf_attachment *a;
842 + buf = dmabuf->priv;
846 + mutex_lock(&buf->lock);
848 + list_for_each_entry(a, &buf->attachments, list) {
849 + dma_sync_sg_for_cpu(a->dev, a->sg_table.sgl,
850 + a->sg_table.nents, direction);
852 + mutex_unlock(&buf->lock);
857 +static int vc_sm_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
858 + enum dma_data_direction direction)
860 + struct vc_sm_buffer *buf;
861 + struct vc_sm_dma_buf_attachment *a;
865 + buf = dmabuf->priv;
869 + mutex_lock(&buf->lock);
871 + list_for_each_entry(a, &buf->attachments, list) {
872 + dma_sync_sg_for_device(a->dev, a->sg_table.sgl,
873 + a->sg_table.nents, direction);
875 + mutex_unlock(&buf->lock);
880 +static void *vc_sm_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
886 +static void vc_sm_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
892 +static const struct dma_buf_ops dma_buf_ops = {
893 + .map_dma_buf = vc_sm_map_dma_buf,
894 + .unmap_dma_buf = vc_sm_unmap_dma_buf,
895 + .mmap = vc_sm_dmabuf_mmap,
896 + .release = vc_sm_dma_buf_release,
897 + .attach = vc_sm_dma_buf_attach,
898 + .detach = vc_sm_dma_buf_detach,
899 + .begin_cpu_access = vc_sm_dma_buf_begin_cpu_access,
900 + .end_cpu_access = vc_sm_dma_buf_end_cpu_access,
901 + .map = vc_sm_dma_buf_kmap,
902 + .unmap = vc_sm_dma_buf_kunmap,
905 +/* Dma_buf operations for chaining through to an imported dma_buf */
908 +int vc_sm_import_dma_buf_attach(struct dma_buf *dmabuf,
909 + struct dma_buf_attachment *attachment)
911 + struct vc_sm_buffer *buf = dmabuf->priv;
913 + if (!buf->imported)
915 + return buf->import.dma_buf->ops->attach(buf->import.dma_buf,
920 +void vc_sm_import_dma_buf_detatch(struct dma_buf *dmabuf,
921 + struct dma_buf_attachment *attachment)
923 + struct vc_sm_buffer *buf = dmabuf->priv;
925 + if (!buf->imported)
927 + buf->import.dma_buf->ops->detach(buf->import.dma_buf, attachment);
931 +struct sg_table *vc_sm_import_map_dma_buf(struct dma_buf_attachment *attachment,
932 + enum dma_data_direction direction)
934 + struct vc_sm_buffer *buf = attachment->dmabuf->priv;
936 + if (!buf->imported)
938 + return buf->import.dma_buf->ops->map_dma_buf(attachment,
943 +void vc_sm_import_unmap_dma_buf(struct dma_buf_attachment *attachment,
944 + struct sg_table *table,
945 + enum dma_data_direction direction)
947 + struct vc_sm_buffer *buf = attachment->dmabuf->priv;
949 + if (!buf->imported)
951 + buf->import.dma_buf->ops->unmap_dma_buf(attachment, table, direction);
955 +int vc_sm_import_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
957 + struct vc_sm_buffer *buf = dmabuf->priv;
959 + pr_debug("%s: mmap dma_buf %p, buf %p, imported db %p\n", __func__,
960 + dmabuf, buf, buf->import.dma_buf);
961 + if (!buf->imported) {
962 + pr_err("%s: mmap dma_buf %p- not an imported buffer\n",
966 + return buf->import.dma_buf->ops->mmap(buf->import.dma_buf, vma);
970 +void vc_sm_import_dma_buf_release(struct dma_buf *dmabuf)
972 + struct vc_sm_buffer *buf = dmabuf->priv;
974 + pr_debug("%s: Relasing dma_buf %p\n", __func__, dmabuf);
975 + mutex_lock(&buf->lock);
976 + if (!buf->imported)
981 + vc_sm_vpu_free(buf);
983 + vc_sm_release_resource(buf);
987 +void *vc_sm_import_dma_buf_kmap(struct dma_buf *dmabuf,
988 + unsigned long offset)
990 + struct vc_sm_buffer *buf = dmabuf->priv;
992 + if (!buf->imported)
994 + return buf->import.dma_buf->ops->map(buf->import.dma_buf, offset);
998 +void vc_sm_import_dma_buf_kunmap(struct dma_buf *dmabuf,
999 + unsigned long offset, void *ptr)
1001 + struct vc_sm_buffer *buf = dmabuf->priv;
1003 + if (!buf->imported)
1005 + buf->import.dma_buf->ops->unmap(buf->import.dma_buf, offset, ptr);
1009 +int vc_sm_import_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1010 + enum dma_data_direction direction)
1012 + struct vc_sm_buffer *buf = dmabuf->priv;
1014 + if (!buf->imported)
1016 + return buf->import.dma_buf->ops->begin_cpu_access(buf->import.dma_buf,
1021 +int vc_sm_import_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1022 + enum dma_data_direction direction)
1024 + struct vc_sm_buffer *buf = dmabuf->priv;
1026 + if (!buf->imported)
1028 + return buf->import.dma_buf->ops->end_cpu_access(buf->import.dma_buf,
1032 +static const struct dma_buf_ops dma_buf_import_ops = {
1033 + .map_dma_buf = vc_sm_import_map_dma_buf,
1034 + .unmap_dma_buf = vc_sm_import_unmap_dma_buf,
1035 + .mmap = vc_sm_import_dmabuf_mmap,
1036 + .release = vc_sm_import_dma_buf_release,
1037 + .attach = vc_sm_import_dma_buf_attach,
1038 + .detach = vc_sm_import_dma_buf_detatch,
1039 + .begin_cpu_access = vc_sm_import_dma_buf_begin_cpu_access,
1040 + .end_cpu_access = vc_sm_import_dma_buf_end_cpu_access,
1041 + .map = vc_sm_import_dma_buf_kmap,
1042 + .unmap = vc_sm_import_dma_buf_kunmap,
1045 +/* Import a dma_buf to be shared with VC. */
1047 +vc_sm_cma_import_dmabuf_internal(struct vc_sm_privdata_t *private,
1048 + struct dma_buf *dma_buf,
1050 + struct dma_buf **imported_buf)
1052 + DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1053 + struct vc_sm_buffer *buffer = NULL;
1054 + struct vc_sm_import import = { };
1055 + struct vc_sm_import_result result = { };
1056 + struct dma_buf_attachment *attach = NULL;
1057 + struct sg_table *sgt = NULL;
1058 + dma_addr_t dma_addr;
1062 + /* Setup our allocation parameters */
1063 + pr_debug("%s: importing dma_buf %p/fd %d\n", __func__, dma_buf, fd);
1066 + get_dma_buf(dma_buf);
1068 + dma_buf = dma_buf_get(fd);
1073 + attach = dma_buf_attach(dma_buf, &sm_state->pdev->dev);
1074 + if (IS_ERR(attach)) {
1075 + ret = PTR_ERR(attach);
1079 + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1080 + if (IS_ERR(sgt)) {
1081 + ret = PTR_ERR(sgt);
1085 + /* Verify that the address block is contiguous */
1086 + if (sgt->nents != 1) {
1091 + /* Allocate local buffer to track this allocation. */
1092 + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1098 + import.type = VC_SM_ALLOC_NON_CACHED;
1099 + dma_addr = sg_dma_address(sgt->sgl);
1100 + import.addr = (u32)dma_addr;
1101 + if ((import.addr & 0xC0000000) != 0xC0000000) {
1102 + pr_err("%s: Expecting an uncached alias for dma_addr %pad\n",
1103 + __func__, &dma_addr);
1104 + import.addr |= 0xC0000000;
1106 + import.size = sg_dma_len(sgt->sgl);
1107 + import.allocator = current->tgid;
1108 + import.kernel_id = get_kernel_id(buffer);
1110 + memcpy(import.name, VC_SM_RESOURCE_NAME_DEFAULT,
1111 + sizeof(VC_SM_RESOURCE_NAME_DEFAULT));
1113 + pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %pad, size %u.\n",
1114 + __func__, import.name, import.type, &dma_addr, import.size);
1116 + /* Allocate the videocore buffer. */
1117 + status = vc_sm_cma_vchi_import(sm_state->sm_handle, &import, &result,
1118 + &sm_state->int_trans_id);
1119 + if (status == -EINTR) {
1120 + pr_debug("[%s]: requesting import memory action restart (trans_id: %u)\n",
1121 + __func__, sm_state->int_trans_id);
1122 + ret = -ERESTARTSYS;
1123 + private->restart_sys = -EINTR;
1124 + private->int_action = VC_SM_MSG_TYPE_IMPORT;
1126 + } else if (status || !result.res_handle) {
1127 + pr_debug("[%s]: failed to import memory on videocore (status: %u, trans_id: %u)\n",
1128 + __func__, status, sm_state->int_trans_id);
1133 + mutex_init(&buffer->lock);
1134 + INIT_LIST_HEAD(&buffer->attachments);
1135 + memcpy(buffer->name, import.name,
1136 + min(sizeof(buffer->name), sizeof(import.name) - 1));
1138 + /* Keep track of the buffer we created. */
1139 + buffer->private = private;
1140 + buffer->vc_handle = result.res_handle;
1141 + buffer->size = import.size;
1142 + buffer->vpu_state = VPU_MAPPED;
1144 + buffer->imported = 1;
1145 + buffer->import.dma_buf = dma_buf;
1147 + buffer->import.attach = attach;
1148 + buffer->import.sgt = sgt;
1149 + buffer->dma_addr = dma_addr;
1150 + buffer->in_use = 1;
1151 + buffer->kernel_id = import.kernel_id;
1154 + * We're done - we need to export a new dmabuf chaining through most
1155 + * functions, but enabling us to release our own internal references
1158 + exp_info.ops = &dma_buf_import_ops;
1159 + exp_info.size = import.size;
1160 + exp_info.flags = O_RDWR;
1161 + exp_info.priv = buffer;
1163 + buffer->dma_buf = dma_buf_export(&exp_info);
1164 + if (IS_ERR(buffer->dma_buf)) {
1165 + ret = PTR_ERR(buffer->dma_buf);
1169 + vc_sm_add_resource(private, buffer);
1171 + *imported_buf = buffer->dma_buf;
1176 + if (result.res_handle) {
1177 + struct vc_sm_free_t free = { result.res_handle, 0 };
1179 + vc_sm_cma_vchi_free(sm_state->sm_handle, &free,
1180 + &sm_state->int_trans_id);
1182 + free_kernel_id(import.kernel_id);
1185 + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
1187 + dma_buf_detach(dma_buf, attach);
1188 + dma_buf_put(dma_buf);
1192 +static int vc_sm_cma_vpu_alloc(u32 size, u32 align, const char *name,
1193 + u32 mem_handle, struct vc_sm_buffer **ret_buffer)
1195 + DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1196 + struct vc_sm_buffer *buffer = NULL;
1197 + struct sg_table *sgt;
1201 + /* Align to the user requested align */
1202 + aligned_size = ALIGN(size, align);
1203 + /* and then to a page boundary */
1204 + aligned_size = PAGE_ALIGN(aligned_size);
1206 + if (!aligned_size)
1209 + /* Allocate local buffer to track this allocation. */
1210 + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1214 + mutex_init(&buffer->lock);
1215 + /* Acquire the mutex as vc_sm_release_resource will release it in the
1218 + mutex_lock(&buffer->lock);
1220 + buffer->cookie = dma_alloc_coherent(&sm_state->pdev->dev,
1221 + aligned_size, &buffer->dma_addr,
1223 + if (!buffer->cookie) {
1224 + pr_err("[%s]: dma_alloc_coherent alloc of %d bytes failed\n",
1225 + __func__, aligned_size);
1230 + pr_debug("[%s]: alloc of %d bytes success\n",
1231 + __func__, aligned_size);
1233 + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
1239 + ret = dma_get_sgtable(&sm_state->pdev->dev, sgt, buffer->cookie,
1240 + buffer->dma_addr, buffer->size);
1242 + pr_err("failed to get scatterlist from DMA API\n");
1247 + buffer->alloc.sg_table = sgt;
1249 + INIT_LIST_HEAD(&buffer->attachments);
1251 + memcpy(buffer->name, name,
1252 + min(sizeof(buffer->name), strlen(name)));
1254 + exp_info.ops = &dma_buf_ops;
1255 + exp_info.size = aligned_size;
1256 + exp_info.flags = O_RDWR;
1257 + exp_info.priv = buffer;
1259 + buffer->dma_buf = dma_buf_export(&exp_info);
1260 + if (IS_ERR(buffer->dma_buf)) {
1261 + ret = PTR_ERR(buffer->dma_buf);
1264 + buffer->dma_addr = (u32)sg_dma_address(buffer->alloc.sg_table->sgl);
1265 + if ((buffer->dma_addr & 0xC0000000) != 0xC0000000) {
1266 + pr_warn_once("%s: Expecting an uncached alias for dma_addr %pad\n",
1267 + __func__, &buffer->dma_addr);
1268 + buffer->dma_addr |= 0xC0000000;
1270 + buffer->private = sm_state->vpu_allocs;
1272 + buffer->vc_handle = mem_handle;
1273 + buffer->vpu_state = VPU_MAPPED;
1274 + buffer->vpu_allocated = 1;
1275 + buffer->size = size;
1277 + * Create an ID that will be passed along with our message so
1278 + * that when we service the release reply, we can look up which
1279 + * resource is being released.
1281 + buffer->kernel_id = get_kernel_id(buffer);
1283 + vc_sm_add_resource(sm_state->vpu_allocs, buffer);
1285 + mutex_unlock(&buffer->lock);
1287 + *ret_buffer = buffer;
1291 + vc_sm_release_resource(buffer);
1296 +vc_sm_vpu_event(struct sm_instance *instance, struct vc_sm_result_t *reply,
1299 + switch (reply->trans_id & ~0x80000000) {
1300 + case VC_SM_MSG_TYPE_CLIENT_VERSION:
1302 + /* Acknowledge that the firmware supports the version command */
1303 + pr_debug("%s: firmware acked version msg. Require release cb\n",
1305 + sm_state->require_released_callback = true;
1308 + case VC_SM_MSG_TYPE_RELEASED:
1310 + struct vc_sm_released *release = (struct vc_sm_released *)reply;
1311 + struct vc_sm_buffer *buffer =
1312 + lookup_kernel_id(release->kernel_id);
1314 + pr_err("%s: VC released a buffer that is already released, kernel_id %d\n",
1315 + __func__, release->kernel_id);
1318 + mutex_lock(&buffer->lock);
1320 + pr_debug("%s: Released addr %08x, size %u, id %08x, mem_handle %08x\n",
1321 + __func__, release->addr, release->size,
1322 + release->kernel_id, release->vc_handle);
1324 + buffer->vc_handle = 0;
1325 + buffer->vpu_state = VPU_NOT_MAPPED;
1326 + free_kernel_id(release->kernel_id);
1328 + if (buffer->vpu_allocated) {
1329 + /* VPU allocation, so release the dmabuf which will
1330 + * trigger the clean up.
1332 + mutex_unlock(&buffer->lock);
1333 + dma_buf_put(buffer->dma_buf);
1335 + vc_sm_release_resource(buffer);
1339 + case VC_SM_MSG_TYPE_VC_MEM_REQUEST:
1341 + struct vc_sm_buffer *buffer = NULL;
1342 + struct vc_sm_vc_mem_request *req =
1343 + (struct vc_sm_vc_mem_request *)reply;
1344 + struct vc_sm_vc_mem_request_result reply;
1347 + pr_debug("%s: Request %u bytes of memory, align %d name %s, trans_id %08x\n",
1348 + __func__, req->size, req->align, req->name,
1350 + ret = vc_sm_cma_vpu_alloc(req->size, req->align, req->name,
1351 + req->vc_handle, &buffer);
1353 + reply.trans_id = req->trans_id;
1355 + reply.addr = buffer->dma_addr;
1356 + reply.kernel_id = buffer->kernel_id;
1357 + pr_debug("%s: Allocated resource buffer %p, addr %pad\n",
1358 + __func__, buffer, &buffer->dma_addr);
1360 + pr_err("%s: Allocation failed size %u, name %s, vc_handle %u\n",
1361 + __func__, req->size, req->name, req->vc_handle);
1363 + reply.kernel_id = 0;
1365 + vc_sm_vchi_client_vc_mem_req_reply(sm_state->sm_handle, &reply,
1366 + &sm_state->int_trans_id);
1371 + pr_err("%s: Unknown vpu cmd %x\n", __func__, reply->trans_id);
1376 +/* Userspace handling */
1378 + * Open the device. Creates a private state to help track all allocation
1379 + * associated with this device.
1381 +static int vc_sm_cma_open(struct inode *inode, struct file *file)
1383 + /* Make sure the device was started properly. */
1385 + pr_err("[%s]: invalid device\n", __func__);
1389 + file->private_data = vc_sm_cma_create_priv_data(current->tgid);
1390 + if (!file->private_data) {
1391 + pr_err("[%s]: failed to create data tracker\n", __func__);
1400 + * Close the vcsm-cma device.
1401 + * All allocations are file descriptors to the dmabuf objects, so we will get
1402 + * the clean up request on those as those are cleaned up.
1404 +static int vc_sm_cma_release(struct inode *inode, struct file *file)
1406 + struct vc_sm_privdata_t *file_data =
1407 + (struct vc_sm_privdata_t *)file->private_data;
1410 + /* Make sure the device was started properly. */
1411 + if (!sm_state || !file_data) {
1412 + pr_err("[%s]: invalid device\n", __func__);
1417 + pr_debug("[%s]: using private data %p\n", __func__, file_data);
1419 + /* Terminate the private data. */
1427 + * Allocate a shared memory handle and block.
1428 + * Allocation is from CMA, and then imported into the VPU mappings.
1430 +int vc_sm_cma_ioctl_alloc(struct vc_sm_privdata_t *private,
1431 + struct vc_sm_cma_ioctl_alloc *ioparam)
1433 + DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1434 + struct vc_sm_buffer *buffer = NULL;
1435 + struct vc_sm_import import = { 0 };
1436 + struct vc_sm_import_result result = { 0 };
1437 + struct dma_buf *dmabuf = NULL;
1438 + struct sg_table *sgt;
1444 + aligned_size = PAGE_ALIGN(ioparam->size);
1446 + if (!aligned_size)
1449 + /* Allocate local buffer to track this allocation. */
1450 + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1456 + buffer->cookie = dma_alloc_coherent(&sm_state->pdev->dev,
1458 + &buffer->dma_addr,
1460 + if (!buffer->cookie) {
1461 + pr_err("[%s]: dma_alloc_coherent alloc of %d bytes failed\n",
1462 + __func__, aligned_size);
1467 + import.type = VC_SM_ALLOC_NON_CACHED;
1468 + import.allocator = current->tgid;
1470 + if (*ioparam->name)
1471 + memcpy(import.name, ioparam->name, sizeof(import.name) - 1);
1473 + memcpy(import.name, VC_SM_RESOURCE_NAME_DEFAULT,
1474 + sizeof(VC_SM_RESOURCE_NAME_DEFAULT));
1476 + mutex_init(&buffer->lock);
1477 + INIT_LIST_HEAD(&buffer->attachments);
1478 + memcpy(buffer->name, import.name,
1479 + min(sizeof(buffer->name), sizeof(import.name) - 1));
1481 + exp_info.ops = &dma_buf_ops;
1482 + exp_info.size = aligned_size;
1483 + exp_info.flags = O_RDWR;
1484 + exp_info.priv = buffer;
1486 + dmabuf = dma_buf_export(&exp_info);
1487 + if (IS_ERR(dmabuf)) {
1488 + ret = PTR_ERR(dmabuf);
1491 + buffer->dma_buf = dmabuf;
1493 + import.addr = buffer->dma_addr;
1494 + import.size = aligned_size;
1495 + import.kernel_id = get_kernel_id(buffer);
1497 + /* Wrap it into a videocore buffer. */
1498 + status = vc_sm_cma_vchi_import(sm_state->sm_handle, &import, &result,
1499 + &sm_state->int_trans_id);
1500 + if (status == -EINTR) {
1501 + pr_debug("[%s]: requesting import memory action restart (trans_id: %u)\n",
1502 + __func__, sm_state->int_trans_id);
1503 + ret = -ERESTARTSYS;
1504 + private->restart_sys = -EINTR;
1505 + private->int_action = VC_SM_MSG_TYPE_IMPORT;
1507 + } else if (status || !result.res_handle) {
1508 + pr_err("[%s]: failed to import memory on videocore (status: %u, trans_id: %u)\n",
1509 + __func__, status, sm_state->int_trans_id);
1514 + /* Keep track of the buffer we created. */
1515 + buffer->private = private;
1516 + buffer->vc_handle = result.res_handle;
1517 + buffer->size = import.size;
1518 + buffer->vpu_state = VPU_MAPPED;
1519 + buffer->kernel_id = import.kernel_id;
1521 + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
1527 + ret = dma_get_sgtable(&sm_state->pdev->dev, sgt, buffer->cookie,
1528 + buffer->dma_addr, buffer->size);
1530 + /* FIXME: error handling */
1531 + pr_err("failed to get scatterlist from DMA API\n");
1536 + buffer->alloc.sg_table = sgt;
1538 + fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1542 + vc_sm_add_resource(private, buffer);
1544 + pr_debug("[%s]: Added resource as fd %d, buffer %p, private %p, dma_addr %pad\n",
1545 + __func__, fd, buffer, private, &buffer->dma_addr);
1548 + ioparam->handle = fd;
1549 + ioparam->vc_handle = buffer->vc_handle;
1550 + ioparam->dma_addr = buffer->dma_addr;
1554 + pr_err("[%s]: something failed - cleanup. ret %d\n", __func__, ret);
1557 + /* dmabuf has been exported, therefore allow dmabuf cleanup to
1560 + dma_buf_put(dmabuf);
1562 + /* No dmabuf, therefore just free the buffer here */
1563 + if (buffer->cookie)
1564 + dma_free_coherent(&sm_state->pdev->dev, buffer->size,
1565 + buffer->cookie, buffer->dma_addr);
1571 +#ifndef CONFIG_ARM64
1572 +/* Converts VCSM_CACHE_OP_* to an operating function. */
1573 +static void (*cache_op_to_func(const unsigned int cache_op))
1574 + (const void*, const void*)
1576 + switch (cache_op) {
1577 + case VC_SM_CACHE_OP_NOP:
1580 + case VC_SM_CACHE_OP_INV:
1581 + return dmac_inv_range;
1583 + case VC_SM_CACHE_OP_CLEAN:
1584 + return dmac_clean_range;
1586 + case VC_SM_CACHE_OP_FLUSH:
1587 + return dmac_flush_range;
1590 + pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
1596 + * Clean/invalid/flush cache of which buffer is already pinned (i.e. accessed).
1598 +static int clean_invalid_contig_2d(const void __user *addr,
1599 + const size_t block_count,
1600 + const size_t block_size,
1601 + const size_t stride,
1602 + const unsigned int cache_op)
1605 + void (*op_fn)(const void *start, const void *end);
1607 + if (!block_size) {
1608 + pr_err("[%s]: size cannot be 0\n", __func__);
1612 + op_fn = cache_op_to_func(cache_op);
1616 + for (i = 0; i < block_count; i ++, addr += stride)
1617 + op_fn(addr, addr + block_size);
1622 +static int vc_sm_cma_clean_invalid2(unsigned int cmdnr, unsigned long arg)
1624 + struct vc_sm_cma_ioctl_clean_invalid2 ioparam;
1625 + struct vc_sm_cma_ioctl_clean_invalid_block *block = NULL;
1628 + /* Get parameter data. */
1629 + if (copy_from_user(&ioparam, (void *)arg, sizeof(ioparam))) {
1630 + pr_err("[%s]: failed to copy-from-user header for cmd %x\n",
1634 + block = kmalloc(ioparam.op_count * sizeof(*block), GFP_KERNEL);
1638 + if (copy_from_user(block, (void *)(arg + sizeof(ioparam)),
1639 + ioparam.op_count * sizeof(*block)) != 0) {
1640 + pr_err("[%s]: failed to copy-from-user payload for cmd %x\n",
1646 + for (i = 0; i < ioparam.op_count; i++) {
1647 + const struct vc_sm_cma_ioctl_clean_invalid_block * const op =
1650 + if (op->invalidate_mode == VC_SM_CACHE_OP_NOP)
1653 + ret = clean_invalid_contig_2d((void __user *)op->start_address,
1654 + op->block_count, op->block_size,
1655 + op->inter_block_stride,
1656 + op->invalidate_mode);
1667 +static long vc_sm_cma_ioctl(struct file *file, unsigned int cmd,
1668 + unsigned long arg)
1671 + unsigned int cmdnr = _IOC_NR(cmd);
1672 + struct vc_sm_privdata_t *file_data =
1673 + (struct vc_sm_privdata_t *)file->private_data;
1675 + /* Validate we can work with this device. */
1676 + if (!sm_state || !file_data) {
1677 + pr_err("[%s]: invalid device\n", __func__);
1681 + /* Action is a re-post of a previously interrupted action? */
1682 + if (file_data->restart_sys == -EINTR) {
1683 + struct vc_sm_action_clean_t action_clean;
1685 + pr_debug("[%s]: clean up of action %u (trans_id: %u) following EINTR\n",
1686 + __func__, file_data->int_action,
1687 + file_data->int_trans_id);
1689 + action_clean.res_action = file_data->int_action;
1690 + action_clean.action_trans_id = file_data->int_trans_id;
1692 + file_data->restart_sys = 0;
1695 + /* Now process the command. */
1697 + /* New memory allocation.
1699 + case VC_SM_CMA_CMD_ALLOC:
1701 + struct vc_sm_cma_ioctl_alloc ioparam;
1703 + /* Get the parameter data. */
1704 + if (copy_from_user
1705 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
1706 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
1712 + ret = vc_sm_cma_ioctl_alloc(file_data, &ioparam);
1714 + (copy_to_user((void *)arg, &ioparam,
1715 + sizeof(ioparam)) != 0)) {
1716 + /* FIXME: Release allocation */
1717 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
1724 + case VC_SM_CMA_CMD_IMPORT_DMABUF:
1726 + struct vc_sm_cma_ioctl_import_dmabuf ioparam;
1727 + struct dma_buf *new_dmabuf;
1729 + /* Get the parameter data. */
1730 + if (copy_from_user
1731 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
1732 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
1738 + ret = vc_sm_cma_import_dmabuf_internal(file_data,
1740 + ioparam.dmabuf_fd,
1744 + struct vc_sm_buffer *buf = new_dmabuf->priv;
1746 + ioparam.size = buf->size;
1747 + ioparam.handle = dma_buf_fd(new_dmabuf,
1749 + ioparam.vc_handle = buf->vc_handle;
1750 + ioparam.dma_addr = buf->dma_addr;
1752 + if (ioparam.handle < 0 ||
1753 + (copy_to_user((void *)arg, &ioparam,
1754 + sizeof(ioparam)) != 0)) {
1755 + dma_buf_put(new_dmabuf);
1756 + /* FIXME: Release allocation */
1763 +#ifndef CONFIG_ARM64
1765 + * Flush/Invalidate the cache for a given mapping.
1766 + * Blocks must be pinned (i.e. accessed) before this call.
1768 + case VC_SM_CMA_CMD_CLEAN_INVALID2:
1769 + ret = vc_sm_cma_clean_invalid2(cmdnr, arg);
1774 + pr_debug("[%s]: cmd %x tgid %u, owner %u\n", __func__, cmdnr,
1775 + current->tgid, file_data->pid);
1784 +#ifdef CONFIG_COMPAT
1785 +struct vc_sm_cma_ioctl_clean_invalid2_32 {
1787 + struct vc_sm_cma_ioctl_clean_invalid_block_32 {
1788 + u16 invalidate_mode;
1790 + compat_uptr_t start_address;
1792 + u32 inter_block_stride;
1796 +#define VC_SM_CMA_CMD_CLEAN_INVALID2_32\
1797 + _IOR(VC_SM_CMA_MAGIC_TYPE, VC_SM_CMA_CMD_CLEAN_INVALID2,\
1798 + struct vc_sm_cma_ioctl_clean_invalid2_32)
1800 +static long vc_sm_cma_compat_ioctl(struct file *file, unsigned int cmd,
1801 + unsigned long arg)
1804 + case VC_SM_CMA_CMD_CLEAN_INVALID2_32:
1809 + return vc_sm_cma_ioctl(file, cmd, arg);
1814 +/* Device operations that we managed in this driver. */
1815 +static const struct file_operations vc_sm_ops = {
1816 + .owner = THIS_MODULE,
1817 + .unlocked_ioctl = vc_sm_cma_ioctl,
1818 +#ifdef CONFIG_COMPAT
1819 + .compat_ioctl = vc_sm_cma_compat_ioctl,
1821 + .open = vc_sm_cma_open,
1822 + .release = vc_sm_cma_release,
1825 +/* Driver load/unload functions */
1826 +/* Videocore connected. */
1827 +static void vc_sm_connected_init(void)
1830 + VCHI_INSTANCE_T vchi_instance;
1831 + struct vc_sm_version version;
1832 + struct vc_sm_result_t version_result;
1834 + pr_info("[%s]: start\n", __func__);
1837 + * Initialize and create a VCHI connection for the shared memory service
1838 + * running on videocore.
1840 + ret = vchi_initialise(&vchi_instance);
1842 + pr_err("[%s]: failed to initialise VCHI instance (ret=%d)\n",
1848 + ret = vchi_connect(vchi_instance);
1850 + pr_err("[%s]: failed to connect VCHI instance (ret=%d)\n",
1856 + /* Initialize an instance of the shared memory service. */
1857 + sm_state->sm_handle = vc_sm_cma_vchi_init(vchi_instance, 1,
1859 + if (!sm_state->sm_handle) {
1860 + pr_err("[%s]: failed to initialize shared memory service\n",
1866 + /* Create a debug fs directory entry (root). */
1867 + sm_state->dir_root = debugfs_create_dir(VC_SM_DIR_ROOT_NAME, NULL);
1869 + sm_state->dir_state.show = &vc_sm_cma_global_state_show;
1870 + sm_state->dir_state.dir_entry =
1871 + debugfs_create_file(VC_SM_STATE, 0444, sm_state->dir_root,
1872 + &sm_state->dir_state,
1873 + &vc_sm_cma_debug_fs_fops);
1875 + INIT_LIST_HEAD(&sm_state->buffer_list);
1877 + /* Create a shared memory device. */
1878 + sm_state->misc_dev.minor = MISC_DYNAMIC_MINOR;
1879 + sm_state->misc_dev.name = DEVICE_NAME;
1880 + sm_state->misc_dev.fops = &vc_sm_ops;
1881 + sm_state->misc_dev.parent = NULL;
1882 + /* Temporarily set as 666 until udev rules have been sorted */
1883 + sm_state->misc_dev.mode = 0666;
1884 + ret = misc_register(&sm_state->misc_dev);
1886 + pr_err("vcsm-cma: failed to register misc device.\n");
1887 + goto err_remove_debugfs;
1890 + sm_state->data_knl = vc_sm_cma_create_priv_data(0);
1891 + if (!sm_state->data_knl) {
1892 + pr_err("[%s]: failed to create kernel private data tracker\n",
1894 + goto err_remove_misc_dev;
1897 + version.version = 2;
1898 + ret = vc_sm_cma_vchi_client_version(sm_state->sm_handle, &version,
1900 + &sm_state->int_trans_id);
1902 + pr_err("[%s]: Failed to send version request %d\n", __func__,
1908 + pr_info("[%s]: installed successfully\n", __func__);
1911 +err_remove_misc_dev:
1912 + misc_deregister(&sm_state->misc_dev);
1913 +err_remove_debugfs:
1914 + debugfs_remove_recursive(sm_state->dir_root);
1915 + vc_sm_cma_vchi_stop(&sm_state->sm_handle);
1918 +/* Driver loading. */
1919 +static int bcm2835_vc_sm_cma_probe(struct platform_device *pdev)
1921 + pr_info("%s: Videocore shared memory driver\n", __func__);
1923 + sm_state = devm_kzalloc(&pdev->dev, sizeof(*sm_state), GFP_KERNEL);
1926 + sm_state->pdev = pdev;
1927 + mutex_init(&sm_state->map_lock);
1929 + spin_lock_init(&sm_state->kernelid_map_lock);
1930 + idr_init_base(&sm_state->kernelid_map, 1);
1932 + pdev->dev.dma_parms = devm_kzalloc(&pdev->dev,
1933 + sizeof(*pdev->dev.dma_parms),
1935 + /* dma_set_max_seg_size checks if dma_parms is NULL. */
1936 + dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
1938 + vchiq_add_connected_callback(vc_sm_connected_init);
1942 +/* Driver unloading. */
1943 +static int bcm2835_vc_sm_cma_remove(struct platform_device *pdev)
1945 + pr_debug("[%s]: start\n", __func__);
1947 + misc_deregister(&sm_state->misc_dev);
1949 + /* Remove all proc entries. */
1950 + debugfs_remove_recursive(sm_state->dir_root);
1952 + /* Stop the videocore shared memory service. */
1953 + vc_sm_cma_vchi_stop(&sm_state->sm_handle);
1957 + idr_destroy(&sm_state->kernelid_map);
1959 + /* Free the memory for the state structure. */
1960 + mutex_destroy(&sm_state->map_lock);
1963 + pr_debug("[%s]: end\n", __func__);
1967 +/* Kernel API calls */
1968 +/* Get an internal resource handle mapped from the external one. */
1969 +int vc_sm_cma_int_handle(void *handle)
1971 + struct dma_buf *dma_buf = (struct dma_buf *)handle;
1972 + struct vc_sm_buffer *buf;
1974 + /* Validate we can work with this device. */
1975 + if (!sm_state || !handle) {
1976 + pr_err("[%s]: invalid input\n", __func__);
1980 + buf = (struct vc_sm_buffer *)dma_buf->priv;
1981 + return buf->vc_handle;
1983 +EXPORT_SYMBOL_GPL(vc_sm_cma_int_handle);
1985 +/* Free a previously allocated shared memory handle and block. */
1986 +int vc_sm_cma_free(void *handle)
1988 + struct dma_buf *dma_buf = (struct dma_buf *)handle;
1990 + /* Validate we can work with this device. */
1991 + if (!sm_state || !handle) {
1992 + pr_err("[%s]: invalid input\n", __func__);
1996 + pr_debug("%s: handle %p/dmabuf %p\n", __func__, handle, dma_buf);
1998 + dma_buf_put(dma_buf);
2002 +EXPORT_SYMBOL_GPL(vc_sm_cma_free);
2004 +/* Import a dmabuf to be shared with VC. */
2005 +int vc_sm_cma_import_dmabuf(struct dma_buf *src_dmabuf, void **handle)
2007 + struct dma_buf *new_dma_buf;
2008 + struct vc_sm_buffer *buf;
2011 + /* Validate we can work with this device. */
2012 + if (!sm_state || !src_dmabuf || !handle) {
2013 + pr_err("[%s]: invalid input\n", __func__);
2017 + ret = vc_sm_cma_import_dmabuf_internal(sm_state->data_knl, src_dmabuf,
2018 + -1, &new_dma_buf);
2021 + pr_debug("%s: imported to ptr %p\n", __func__, new_dma_buf);
2022 + buf = (struct vc_sm_buffer *)new_dma_buf->priv;
2024 + /* Assign valid handle at this time.*/
2025 + *handle = new_dma_buf;
2028 + * succeeded in importing the dma_buf, but then
2029 + * failed to look it up again. How?
2030 + * Release the fd again.
2032 + pr_err("%s: imported vc_sm_cma_get_buffer failed %d\n",
2038 +EXPORT_SYMBOL_GPL(vc_sm_cma_import_dmabuf);
2040 +static struct platform_driver bcm2835_vcsm_cma_driver = {
2041 + .probe = bcm2835_vc_sm_cma_probe,
2042 + .remove = bcm2835_vc_sm_cma_remove,
2044 + .name = DEVICE_NAME,
2045 + .owner = THIS_MODULE,
2049 +module_platform_driver(bcm2835_vcsm_cma_driver);
2051 +MODULE_AUTHOR("Dave Stevenson");
2052 +MODULE_DESCRIPTION("VideoCore CMA Shared Memory Driver");
2053 +MODULE_LICENSE("GPL v2");
2054 +MODULE_ALIAS("platform:vcsm-cma");
2056 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm.h
2058 +/* SPDX-License-Identifier: GPL-2.0 */
2061 + * VideoCore Shared Memory driver using CMA.
2063 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
2070 +#include <linux/device.h>
2071 +#include <linux/dma-direction.h>
2072 +#include <linux/kref.h>
2073 +#include <linux/mm_types.h>
2074 +#include <linux/mutex.h>
2075 +#include <linux/rbtree.h>
2076 +#include <linux/sched.h>
2077 +#include <linux/shrinker.h>
2078 +#include <linux/types.h>
2079 +#include <linux/miscdevice.h>
2081 +#define VC_SM_MAX_NAME_LEN 32
2083 +enum vc_sm_vpu_mapping_state {
2089 +struct vc_sm_alloc_data {
2090 + unsigned long num_pages;
2092 + struct sg_table *sg_table;
2095 +struct vc_sm_imported {
2096 + struct dma_buf *dma_buf;
2097 + struct dma_buf_attachment *attach;
2098 + struct sg_table *sgt;
2101 +struct vc_sm_buffer {
2102 + struct list_head global_buffer_list; /* Global list of buffers. */
2104 + /* Index in the kernel_id idr so that we can find the
2105 + * mmal_msg_context again when servicing the VCHI reply.
2111 + /* Lock over all the following state for this buffer */
2112 + struct mutex lock;
2113 + struct list_head attachments;
2115 + char name[VC_SM_MAX_NAME_LEN];
2117 + int in_use:1; /* Kernel is still using this resource */
2118 + int imported:1; /* Imported dmabuf */
2120 + enum vc_sm_vpu_mapping_state vpu_state;
2121 + u32 vc_handle; /* VideoCore handle for this buffer */
2122 + int vpu_allocated; /*
2123 + * The VPU made this allocation. Release the
2124 + * local dma_buf when the VPU releases the
2128 + /* DMABUF related fields */
2129 + struct dma_buf *dma_buf;
2130 + dma_addr_t dma_addr;
2133 + struct vc_sm_privdata_t *private;
2136 + struct vc_sm_alloc_data alloc;
2137 + struct vc_sm_imported import;
2143 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.c
2145 +// SPDX-License-Identifier: GPL-2.0
2147 + * VideoCore Shared Memory CMA allocator
2149 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
2150 + * Copyright 2011-2012 Broadcom Corporation. All rights reserved.
2152 + * Based on vmcs_sm driver from Broadcom Corporation.
2156 +/* ---- Include Files ----------------------------------------------------- */
2157 +#include <linux/completion.h>
2158 +#include <linux/kernel.h>
2159 +#include <linux/kthread.h>
2160 +#include <linux/list.h>
2161 +#include <linux/mutex.h>
2162 +#include <linux/semaphore.h>
2163 +#include <linux/slab.h>
2164 +#include <linux/types.h>
2166 +#include "vc_sm_cma_vchi.h"
2168 +#define VC_SM_VER 1
2169 +#define VC_SM_MIN_VER 0
2171 +/* ---- Private Constants and Types -------------------------------------- */
2173 +/* Command blocks come from a pool */
2174 +#define SM_MAX_NUM_CMD_RSP_BLKS 32
2176 +struct sm_cmd_rsp_blk {
2177 + struct list_head head; /* To create lists */
2178 + /* To be signaled when the response is there */
2179 + struct completion cmplt;
2184 + u8 msg[VC_SM_MAX_MSG_LEN];
2192 +struct sm_instance {
2193 + u32 num_connections;
2194 + VCHI_SERVICE_HANDLE_T vchi_handle[VCHI_MAX_NUM_CONNECTIONS];
2195 + struct task_struct *io_thread;
2196 + struct completion io_cmplt;
2198 + vpu_event_cb vpu_event;
2200 + /* Mutex over the following lists */
2201 + struct mutex lock;
2203 + struct list_head cmd_list;
2204 + struct list_head rsp_list;
2205 + struct list_head dead_list;
2207 + struct sm_cmd_rsp_blk free_blk[SM_MAX_NUM_CMD_RSP_BLKS];
2209 + /* Mutex over the free_list */
2210 + struct mutex free_lock;
2211 + struct list_head free_list;
2213 + struct semaphore free_sema;
2217 +/* ---- Private Variables ------------------------------------------------ */
2219 +/* ---- Private Function Prototypes -------------------------------------- */
2221 +/* ---- Private Functions ------------------------------------------------ */
2223 +bcm2835_vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
2225 + unsigned int size)
2227 + return vchi_queue_kernel_message(handle,
2233 +sm_cmd_rsp_blk *vc_vchi_cmd_create(struct sm_instance *instance,
2234 + enum vc_sm_msg_type id, void *msg,
2235 + u32 size, int wait)
2237 + struct sm_cmd_rsp_blk *blk;
2238 + struct vc_sm_msg_hdr_t *hdr;
2240 + if (down_interruptible(&instance->free_sema)) {
2241 + blk = kmalloc(sizeof(*blk), GFP_KERNEL);
2246 + init_completion(&blk->cmplt);
2248 + mutex_lock(&instance->free_lock);
2250 + list_first_entry(&instance->free_list,
2251 + struct sm_cmd_rsp_blk, head);
2252 + list_del(&blk->head);
2253 + mutex_unlock(&instance->free_lock);
2258 + blk->length = sizeof(*hdr) + size;
2260 + hdr = (struct vc_sm_msg_hdr_t *)blk->msg;
2262 + mutex_lock(&instance->lock);
2263 + instance->trans_id++;
2265 + * Retain the top bit for identifying asynchronous events, or VPU cmds.
2267 + instance->trans_id &= ~0x80000000;
2268 + hdr->trans_id = instance->trans_id;
2269 + blk->id = instance->trans_id;
2270 + mutex_unlock(&instance->lock);
2273 + memcpy(hdr->body, msg, size);
2279 +vc_vchi_cmd_delete(struct sm_instance *instance, struct sm_cmd_rsp_blk *blk)
2286 + mutex_lock(&instance->free_lock);
2287 + list_add(&blk->head, &instance->free_list);
2288 + mutex_unlock(&instance->free_lock);
2289 + up(&instance->free_sema);
2292 +static void vc_sm_cma_vchi_rx_ack(struct sm_instance *instance,
2293 + struct sm_cmd_rsp_blk *cmd,
2294 + struct vc_sm_result_t *reply,
2297 + mutex_lock(&instance->lock);
2298 + list_for_each_entry(cmd,
2299 + &instance->rsp_list,
2301 + if (cmd->id == reply->trans_id)
2304 + mutex_unlock(&instance->lock);
2306 + if (&cmd->head == &instance->rsp_list) {
2307 + //pr_debug("%s: received response %u, throw away...",
2308 + pr_err("%s: received response %u, throw away...",
2311 + } else if (reply_len > sizeof(cmd->msg)) {
2312 + pr_err("%s: reply too big (%u) %u, throw away...",
2313 + __func__, reply_len,
2316 + memcpy(cmd->msg, reply,
2318 + complete(&cmd->cmplt);
2322 +static int vc_sm_cma_vchi_videocore_io(void *arg)
2324 + struct sm_instance *instance = arg;
2325 + struct sm_cmd_rsp_blk *cmd = NULL, *cmd_tmp;
2326 + struct vc_sm_result_t *reply;
2333 + vchi_service_release(instance->vchi_handle[0]);
2336 + if (wait_for_completion_interruptible(&instance->io_cmplt))
2339 + vchi_service_use(instance->vchi_handle[0]);
2344 + * Get new command and move it to response list
2346 + mutex_lock(&instance->lock);
2347 + if (list_empty(&instance->cmd_list)) {
2348 + /* no more commands to process */
2349 + mutex_unlock(&instance->lock);
2352 + cmd = list_first_entry(&instance->cmd_list,
2353 + struct sm_cmd_rsp_blk, head);
2354 + list_move(&cmd->head, &instance->rsp_list);
2356 + mutex_unlock(&instance->lock);
2358 + /* Send the command */
2360 + bcm2835_vchi_msg_queue(instance->vchi_handle[0],
2361 + cmd->msg, cmd->length);
2363 + pr_err("%s: failed to queue message (%d)",
2364 + __func__, status);
2367 + /* If no reply is needed then we're done */
2369 + mutex_lock(&instance->lock);
2370 + list_del(&cmd->head);
2371 + mutex_unlock(&instance->lock);
2372 + vc_vchi_cmd_delete(instance, cmd);
2377 + complete(&cmd->cmplt);
2383 + while (!vchi_msg_peek(instance->vchi_handle[0], (void **)&reply,
2384 + &reply_len, VCHI_FLAGS_NONE)) {
2385 + if (reply->trans_id & 0x80000000) {
2386 + /* Async event or cmd from the VPU */
2387 + if (instance->vpu_event)
2388 + instance->vpu_event(instance, reply,
2391 + vc_sm_cma_vchi_rx_ack(instance, cmd, reply,
2395 + vchi_msg_remove(instance->vchi_handle[0]);
2398 + /* Go through the dead list and free them */
2399 + mutex_lock(&instance->lock);
2400 + list_for_each_entry_safe(cmd, cmd_tmp, &instance->dead_list,
2402 + list_del(&cmd->head);
2403 + vc_vchi_cmd_delete(instance, cmd);
2405 + mutex_unlock(&instance->lock);
2411 +static void vc_sm_cma_vchi_callback(void *param,
2412 + const VCHI_CALLBACK_REASON_T reason,
2415 + struct sm_instance *instance = param;
2420 + case VCHI_CALLBACK_MSG_AVAILABLE:
2421 + complete(&instance->io_cmplt);
2424 + case VCHI_CALLBACK_SERVICE_CLOSED:
2425 + pr_info("%s: service CLOSED!!", __func__);
2431 +struct sm_instance *vc_sm_cma_vchi_init(VCHI_INSTANCE_T vchi_instance,
2432 + unsigned int num_connections,
2433 + vpu_event_cb vpu_event)
2436 + struct sm_instance *instance;
2439 + pr_debug("%s: start", __func__);
2441 + if (num_connections > VCHI_MAX_NUM_CONNECTIONS) {
2442 + pr_err("%s: unsupported number of connections %u (max=%u)",
2443 + __func__, num_connections, VCHI_MAX_NUM_CONNECTIONS);
2447 + /* Allocate memory for this instance */
2448 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
2450 + /* Misc initialisations */
2451 + mutex_init(&instance->lock);
2452 + init_completion(&instance->io_cmplt);
2453 + INIT_LIST_HEAD(&instance->cmd_list);
2454 + INIT_LIST_HEAD(&instance->rsp_list);
2455 + INIT_LIST_HEAD(&instance->dead_list);
2456 + INIT_LIST_HEAD(&instance->free_list);
2457 + sema_init(&instance->free_sema, SM_MAX_NUM_CMD_RSP_BLKS);
2458 + mutex_init(&instance->free_lock);
2459 + for (i = 0; i < SM_MAX_NUM_CMD_RSP_BLKS; i++) {
2460 + init_completion(&instance->free_blk[i].cmplt);
2461 + list_add(&instance->free_blk[i].head, &instance->free_list);
2464 + /* Open the VCHI service connections */
2465 + instance->num_connections = num_connections;
2466 + for (i = 0; i < num_connections; i++) {
2467 + struct service_creation params = {
2468 + .version = VCHI_VERSION_EX(VC_SM_VER, VC_SM_MIN_VER),
2469 + .service_id = VC_SM_SERVER_NAME,
2470 + .callback = vc_sm_cma_vchi_callback,
2471 + .callback_param = instance,
2474 + status = vchi_service_open(vchi_instance,
2475 + ¶ms, &instance->vchi_handle[i]);
2477 + pr_err("%s: failed to open VCHI service (%d)",
2478 + __func__, status);
2480 + goto err_close_services;
2484 + /* Create the thread which takes care of all io to/from videoocore. */
2485 + instance->io_thread = kthread_create(&vc_sm_cma_vchi_videocore_io,
2486 + (void *)instance, "SMIO");
2487 + if (!instance->io_thread) {
2488 + pr_err("%s: failed to create SMIO thread", __func__);
2490 + goto err_close_services;
2492 + instance->vpu_event = vpu_event;
2493 + set_user_nice(instance->io_thread, -10);
2494 + wake_up_process(instance->io_thread);
2496 + pr_debug("%s: success - instance %p", __func__, instance);
2499 +err_close_services:
2500 + for (i = 0; i < instance->num_connections; i++) {
2501 + if (instance->vchi_handle[i])
2502 + vchi_service_close(instance->vchi_handle[i]);
2506 + pr_debug("%s: FAILED", __func__);
2510 +int vc_sm_cma_vchi_stop(struct sm_instance **handle)
2512 + struct sm_instance *instance;
2516 + pr_err("%s: invalid pointer to handle %p", __func__, handle);
2521 + pr_err("%s: invalid handle %p", __func__, *handle);
2525 + instance = *handle;
2527 + /* Close all VCHI service connections */
2528 + for (i = 0; i < instance->num_connections; i++) {
2531 + vchi_service_use(instance->vchi_handle[i]);
2533 + success = vchi_service_close(instance->vchi_handle[i]);
2545 +static int vc_sm_cma_vchi_send_msg(struct sm_instance *handle,
2546 + enum vc_sm_msg_type msg_id, void *msg,
2547 + u32 msg_size, void *result, u32 result_size,
2548 + u32 *cur_trans_id, u8 wait_reply)
2551 + struct sm_instance *instance = handle;
2552 + struct sm_cmd_rsp_blk *cmd_blk;
2555 + pr_err("%s: invalid handle", __func__);
2559 + pr_err("%s: invalid msg pointer", __func__);
2564 + vc_vchi_cmd_create(instance, msg_id, msg, msg_size, wait_reply);
2566 + pr_err("[%s]: failed to allocate global tracking resource",
2572 + *cur_trans_id = cmd_blk->id;
2574 + mutex_lock(&instance->lock);
2575 + list_add_tail(&cmd_blk->head, &instance->cmd_list);
2576 + mutex_unlock(&instance->lock);
2577 + complete(&instance->io_cmplt);
2583 + /* Wait for the response */
2584 + if (wait_for_completion_interruptible(&cmd_blk->cmplt)) {
2585 + mutex_lock(&instance->lock);
2586 + if (!cmd_blk->sent) {
2587 + list_del(&cmd_blk->head);
2588 + mutex_unlock(&instance->lock);
2589 + vc_vchi_cmd_delete(instance, cmd_blk);
2593 + list_move(&cmd_blk->head, &instance->dead_list);
2594 + mutex_unlock(&instance->lock);
2595 + complete(&instance->io_cmplt);
2596 + return -EINTR; /* We're done */
2599 + if (result && result_size) {
2600 + memcpy(result, cmd_blk->msg, result_size);
2602 + struct vc_sm_result_t *res =
2603 + (struct vc_sm_result_t *)cmd_blk->msg;
2604 + status = (res->success == 0) ? 0 : -ENXIO;
2607 + mutex_lock(&instance->lock);
2608 + list_del(&cmd_blk->head);
2609 + mutex_unlock(&instance->lock);
2610 + vc_vchi_cmd_delete(instance, cmd_blk);
2614 +int vc_sm_cma_vchi_free(struct sm_instance *handle, struct vc_sm_free_t *msg,
2615 + u32 *cur_trans_id)
2617 + return vc_sm_cma_vchi_send_msg(handle, VC_SM_MSG_TYPE_FREE,
2618 + msg, sizeof(*msg), 0, 0, cur_trans_id, 0);
2621 +int vc_sm_cma_vchi_import(struct sm_instance *handle, struct vc_sm_import *msg,
2622 + struct vc_sm_import_result *result, u32 *cur_trans_id)
2624 + return vc_sm_cma_vchi_send_msg(handle, VC_SM_MSG_TYPE_IMPORT,
2625 + msg, sizeof(*msg), result, sizeof(*result),
2629 +int vc_sm_cma_vchi_client_version(struct sm_instance *handle,
2630 + struct vc_sm_version *msg,
2631 + struct vc_sm_result_t *result,
2632 + u32 *cur_trans_id)
2634 + return vc_sm_cma_vchi_send_msg(handle, VC_SM_MSG_TYPE_CLIENT_VERSION,
2635 + //msg, sizeof(*msg), result, sizeof(*result),
2636 + //cur_trans_id, 1);
2637 + msg, sizeof(*msg), NULL, 0,
2641 +int vc_sm_vchi_client_vc_mem_req_reply(struct sm_instance *handle,
2642 + struct vc_sm_vc_mem_request_result *msg,
2643 + uint32_t *cur_trans_id)
2645 + return vc_sm_cma_vchi_send_msg(handle,
2646 + VC_SM_MSG_TYPE_VC_MEM_REQUEST_REPLY,
2647 + msg, sizeof(*msg), 0, 0, cur_trans_id,
2651 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.h
2653 +/* SPDX-License-Identifier: GPL-2.0 */
2656 + * VideoCore Shared Memory CMA allocator
2658 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
2659 + * Copyright 2011-2012 Broadcom Corporation. All rights reserved.
2661 + * Based on vmcs_sm driver from Broadcom Corporation.
2665 +#ifndef __VC_SM_CMA_VCHI_H__INCLUDED__
2666 +#define __VC_SM_CMA_VCHI_H__INCLUDED__
2668 +#include "interface/vchi/vchi.h"
2670 +#include "vc_sm_defs.h"
2673 + * Forward declare.
2675 +struct sm_instance;
2677 +typedef void (*vpu_event_cb)(struct sm_instance *instance,
2678 + struct vc_sm_result_t *reply, int reply_len);
2681 + * Initialize the shared memory service, opens up vchi connection to talk to it.
2683 +struct sm_instance *vc_sm_cma_vchi_init(VCHI_INSTANCE_T vchi_instance,
2684 + unsigned int num_connections,
2685 + vpu_event_cb vpu_event);
2688 + * Terminates the shared memory service.
2690 +int vc_sm_cma_vchi_stop(struct sm_instance **handle);
2693 + * Ask the shared memory service to free up some memory that was previously
2694 + * allocated by the vc_sm_cma_vchi_alloc function call.
2696 +int vc_sm_cma_vchi_free(struct sm_instance *handle, struct vc_sm_free_t *msg,
2697 + u32 *cur_trans_id);
2700 + * Import a contiguous block of memory and wrap it in a GPU MEM_HANDLE_T.
2702 +int vc_sm_cma_vchi_import(struct sm_instance *handle, struct vc_sm_import *msg,
2703 + struct vc_sm_import_result *result,
2704 + u32 *cur_trans_id);
2706 +int vc_sm_cma_vchi_client_version(struct sm_instance *handle,
2707 + struct vc_sm_version *msg,
2708 + struct vc_sm_result_t *result,
2709 + u32 *cur_trans_id);
2711 +int vc_sm_vchi_client_vc_mem_req_reply(struct sm_instance *handle,
2712 + struct vc_sm_vc_mem_request_result *msg,
2713 + uint32_t *cur_trans_id);
2715 +#endif /* __VC_SM_CMA_VCHI_H__INCLUDED__ */
2717 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_defs.h
2719 +/* SPDX-License-Identifier: GPL-2.0 */
2722 + * VideoCore Shared Memory CMA allocator
2724 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
2726 + * Based on vc_sm_defs.h from the vmcs_sm driver Copyright Broadcom Corporation.
2727 + * All IPC messages are copied across to this file, even if the vc-sm-cma
2728 + * driver is not currently using them.
2730 + ****************************************************************************
2733 +#ifndef __VC_SM_DEFS_H__INCLUDED__
2734 +#define __VC_SM_DEFS_H__INCLUDED__
2736 +/* FourCC code used for VCHI connection */
2737 +#define VC_SM_SERVER_NAME MAKE_FOURCC("SMEM")
2739 +/* Maximum message length */
2740 +#define VC_SM_MAX_MSG_LEN (sizeof(union vc_sm_msg_union_t) + \
2741 + sizeof(struct vc_sm_msg_hdr_t))
2742 +#define VC_SM_MAX_RSP_LEN (sizeof(union vc_sm_msg_union_t))
2744 +/* Resource name maximum size */
2745 +#define VC_SM_RESOURCE_NAME 32
2748 + * Version to be reported to the VPU
2749 + * VPU assumes 0 (aka 1) which does not require the released callback, nor
2750 + * expect the client to handle VC_MEM_REQUESTS.
2751 + * Version 2 requires the released callback, and must support VC_MEM_REQUESTS.
2753 +#define VC_SM_PROTOCOL_VERSION 2
2755 +enum vc_sm_msg_type {
2756 + /* Message types supported for HOST->VC direction */
2758 + /* Allocate shared memory block */
2759 + VC_SM_MSG_TYPE_ALLOC,
2760 + /* Lock allocated shared memory block */
2761 + VC_SM_MSG_TYPE_LOCK,
2762 + /* Unlock allocated shared memory block */
2763 + VC_SM_MSG_TYPE_UNLOCK,
2764 + /* Unlock allocated shared memory block, do not answer command */
2765 + VC_SM_MSG_TYPE_UNLOCK_NOANS,
2766 + /* Free shared memory block */
2767 + VC_SM_MSG_TYPE_FREE,
2768 + /* Resize a shared memory block */
2769 + VC_SM_MSG_TYPE_RESIZE,
2770 + /* Walk the allocated shared memory block(s) */
2771 + VC_SM_MSG_TYPE_WALK_ALLOC,
2773 + /* A previously applied action will need to be reverted */
2774 + VC_SM_MSG_TYPE_ACTION_CLEAN,
2777 + * Import a physical address and wrap into a MEM_HANDLE_T.
2778 + * Release with VC_SM_MSG_TYPE_FREE.
2780 + VC_SM_MSG_TYPE_IMPORT,
2782 + *Tells VC the protocol version supported by this client.
2783 + * 2 supports the async/cmd messages from the VPU for final release
2784 + * of memory, and for VC allocations.
2786 + VC_SM_MSG_TYPE_CLIENT_VERSION,
2787 + /* Response to VC request for memory */
2788 + VC_SM_MSG_TYPE_VC_MEM_REQUEST_REPLY,
2791 + * Asynchronous/cmd messages supported for VC->HOST direction.
2792 + * Signalled by setting the top bit in vc_sm_result_t trans_id.
2796 + * VC has finished with an imported memory allocation.
2797 + * Release any Linux reference counts on the underlying block.
2799 + VC_SM_MSG_TYPE_RELEASED,
2800 + /* VC request for memory */
2801 + VC_SM_MSG_TYPE_VC_MEM_REQUEST,
2803 + VC_SM_MSG_TYPE_MAX
2806 +/* Type of memory to be allocated */
2807 +enum vc_sm_alloc_type_t {
2808 + VC_SM_ALLOC_CACHED,
2809 + VC_SM_ALLOC_NON_CACHED,
2812 +/* Message header for all messages in HOST->VC direction */
2813 +struct vc_sm_msg_hdr_t {
2820 +/* Request to allocate memory (HOST->VC) */
2821 +struct vc_sm_alloc_t {
2822 + /* type of memory to allocate */
2823 + enum vc_sm_alloc_type_t type;
2824 + /* byte amount of data to allocate per unit */
2826 + /* number of unit to allocate */
2828 + /* alignment to be applied on allocation */
2830 + /* identity of who allocated this block */
2832 + /* resource name (for easier tracking on vc side) */
2833 + char name[VC_SM_RESOURCE_NAME];
2837 +/* Result of a requested memory allocation (VC->HOST) */
2838 +struct vc_sm_alloc_result_t {
2839 + /* Transaction identifier */
2842 + /* Resource handle */
2844 + /* Pointer to resource buffer */
2846 + /* Resource base size (bytes) */
2847 + u32 res_base_size;
2848 + /* Resource number */
2853 +/* Request to free a previously allocated memory (HOST->VC) */
2854 +struct vc_sm_free_t {
2855 + /* Resource handle (returned from alloc) */
2857 + /* Resource buffer (returned from alloc) */
2862 +/* Request to lock a previously allocated memory (HOST->VC) */
2863 +struct vc_sm_lock_unlock_t {
2864 + /* Resource handle (returned from alloc) */
2866 + /* Resource buffer (returned from alloc) */
2871 +/* Request to resize a previously allocated memory (HOST->VC) */
2872 +struct vc_sm_resize_t {
2873 + /* Resource handle (returned from alloc) */
2875 + /* Resource buffer (returned from alloc) */
2877 + /* Resource *new* size requested (bytes) */
2882 +/* Result of a requested memory lock (VC->HOST) */
2883 +struct vc_sm_lock_result_t {
2884 + /* Transaction identifier */
2887 + /* Resource handle */
2889 + /* Pointer to resource buffer */
2892 + * Pointer to former resource buffer if the memory
2899 +/* Generic result for a request (VC->HOST) */
2900 +struct vc_sm_result_t {
2901 + /* Transaction identifier */
2908 +/* Request to revert a previously applied action (HOST->VC) */
2909 +struct vc_sm_action_clean_t {
2910 + /* Action of interest */
2911 + enum vc_sm_msg_type res_action;
2912 + /* Transaction identifier for the action of interest */
2913 + u32 action_trans_id;
2917 +/* Request to remove all data associated with a given allocator (HOST->VC) */
2918 +struct vc_sm_free_all_t {
2919 + /* Allocator identifier */
2923 +/* Request to import memory (HOST->VC) */
2924 +struct vc_sm_import {
2925 + /* type of memory to allocate */
2926 + enum vc_sm_alloc_type_t type;
2927 + /* pointer to the VC (ie physical) address of the allocated memory */
2929 + /* size of buffer */
2931 + /* opaque handle returned in RELEASED messages */
2933 + /* Allocator identifier */
2935 + /* resource name (for easier tracking on vc side) */
2936 + char name[VC_SM_RESOURCE_NAME];
2939 +/* Result of a requested memory import (VC->HOST) */
2940 +struct vc_sm_import_result {
2941 + /* Transaction identifier */
2944 + /* Resource handle */
2948 +/* Notification that VC has finished with an allocation (VC->HOST) */
2949 +struct vc_sm_released {
2950 + /* cmd type / trans_id */
2953 + /* pointer to the VC (ie physical) address of the allocated memory */
2955 + /* size of buffer */
2957 + /* opaque handle returned in RELEASED messages */
2963 + * Client informing VC as to the protocol version it supports.
2964 + * >=2 requires the released callback, and supports VC asking for memory.
2965 + * Failure means that the firmware doesn't support this call, and therefore the
2966 + * client should either fail, or NOT rely on getting the released callback.
2968 +struct vc_sm_version {
2972 +/* Request FROM VideoCore for some memory */
2973 +struct vc_sm_vc_mem_request {
2977 + /* trans_id (from VPU) */
2979 + /* size of buffer */
2981 + /* alignment of buffer */
2983 + /* resource name (for easier tracking) */
2984 + char name[VC_SM_RESOURCE_NAME];
2985 + /* VPU handle for the resource */
2989 +/* Response from the kernel to provide the VPU with some memory */
2990 +struct vc_sm_vc_mem_request_result {
2991 + /* Transaction identifier for the VPU */
2993 + /* pointer to the physical address of the allocated memory */
2995 + /* opaque handle returned in RELEASED messages */
2999 +/* Union of ALL messages */
3000 +union vc_sm_msg_union_t {
3001 + struct vc_sm_alloc_t alloc;
3002 + struct vc_sm_alloc_result_t alloc_result;
3003 + struct vc_sm_free_t free;
3004 + struct vc_sm_lock_unlock_t lock_unlock;
3005 + struct vc_sm_action_clean_t action_clean;
3006 + struct vc_sm_resize_t resize;
3007 + struct vc_sm_lock_result_t lock_result;
3008 + struct vc_sm_result_t result;
3009 + struct vc_sm_free_all_t free_all;
3010 + struct vc_sm_import import;
3011 + struct vc_sm_import_result import_result;
3012 + struct vc_sm_version version;
3013 + struct vc_sm_released released;
3014 + struct vc_sm_vc_mem_request vc_request;
3015 + struct vc_sm_vc_mem_request_result vc_request_result;
3018 +#endif /* __VC_SM_DEFS_H__INCLUDED__ */
3020 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_knl.h
3022 +/* SPDX-License-Identifier: GPL-2.0 */
3025 + * VideoCore Shared Memory CMA allocator
3027 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
3029 + * Based on vc_sm_defs.h from the vmcs_sm driver Copyright Broadcom Corporation.
3033 +#ifndef __VC_SM_KNL_H__INCLUDED__
3034 +#define __VC_SM_KNL_H__INCLUDED__
3036 +#if !defined(__KERNEL__)
3037 +#error "This interface is for kernel use only..."
3040 +/* Free a previously allocated or imported shared memory handle and block. */
3041 +int vc_sm_cma_free(void *handle);
3043 +/* Get an internal resource handle mapped from the external one. */
3044 +int vc_sm_cma_int_handle(void *handle);
3046 +/* Import a block of memory into the GPU space. */
3047 +int vc_sm_cma_import_dmabuf(struct dma_buf *dmabuf, void **handle);
3049 +#endif /* __VC_SM_KNL_H__INCLUDED__ */
3050 --- a/drivers/staging/vc04_services/vchiq-mmal/Makefile
3051 +++ b/drivers/staging/vc04_services/vchiq-mmal/Makefile
3052 @@ -4,5 +4,5 @@ bcm2835-mmal-vchiq-objs := mmal-vchiq.o
3053 obj-$(CONFIG_BCM2835_VCHIQ_MMAL) += bcm2835-mmal-vchiq.o
3056 - -Idrivers/staging/vc04_services \
3057 + -I$(srctree)/drivers/staging/vc04_services \
3058 -D__VCCOREVER__=0x04000000
3060 +++ b/include/linux/broadcom/vc_sm_cma_ioctl.h
3062 +/* SPDX-License-Identifier: GPL-2.0 */
3065 + * Copyright 2019 Raspberry Pi (Trading) Ltd. All rights reserved.
3067 + * Based on vmcs_sm_ioctl.h Copyright Broadcom Corporation.
3070 +#ifndef __VC_SM_CMA_IOCTL_H
3071 +#define __VC_SM_CMA_IOCTL_H
3073 +/* ---- Include Files ---------------------------------------------------- */
3075 +#if defined(__KERNEL__)
3076 +#include <linux/types.h> /* Needed for standard types */
3078 +#include <stdint.h>
3081 +#include <linux/ioctl.h>
3083 +/* ---- Constants and Types ---------------------------------------------- */
3085 +#define VC_SM_CMA_RESOURCE_NAME 32
3086 +#define VC_SM_CMA_RESOURCE_NAME_DEFAULT "sm-host-resource"
3088 +/* Type define used to create unique IOCTL number */
3089 +#define VC_SM_CMA_MAGIC_TYPE 'J'
3091 +/* IOCTL commands on /dev/vc-sm-cma */
3092 +enum vc_sm_cma_cmd_e {
3093 + VC_SM_CMA_CMD_ALLOC = 0x5A, /* Start at 0x5A arbitrarily */
3095 + VC_SM_CMA_CMD_IMPORT_DMABUF,
3097 + VC_SM_CMA_CMD_CLEAN_INVALID2,
3099 + VC_SM_CMA_CMD_LAST /* Do not delete */
3102 +/* Cache type supported, conveniently matches the user space definition in
3105 +enum vc_sm_cma_cache_e {
3106 + VC_SM_CMA_CACHE_NONE,
3107 + VC_SM_CMA_CACHE_HOST,
3108 + VC_SM_CMA_CACHE_VC,
3109 + VC_SM_CMA_CACHE_BOTH,
3112 +/* IOCTL Data structures */
3113 +struct vc_sm_cma_ioctl_alloc {
3114 + /* user -> kernel */
3117 + __u32 cached; /* enum vc_sm_cma_cache_e */
3119 + __u8 name[VC_SM_CMA_RESOURCE_NAME];
3121 + /* kernel -> user */
3127 +struct vc_sm_cma_ioctl_import_dmabuf {
3128 + /* user -> kernel */
3130 + __u32 cached; /* enum vc_sm_cma_cache_e */
3131 + __u8 name[VC_SM_CMA_RESOURCE_NAME];
3133 + /* kernel -> user */
3142 + * Cache functions to be set to struct vc_sm_cma_ioctl_clean_invalid2
3143 + * invalidate_mode.
3145 +#define VC_SM_CACHE_OP_NOP 0x00
3146 +#define VC_SM_CACHE_OP_INV 0x01
3147 +#define VC_SM_CACHE_OP_CLEAN 0x02
3148 +#define VC_SM_CACHE_OP_FLUSH 0x03
3150 +struct vc_sm_cma_ioctl_clean_invalid2 {
3153 + struct vc_sm_cma_ioctl_clean_invalid_block {
3154 + __u32 invalidate_mode;
3155 + __u32 block_count;
3156 + void * __user start_address;
3158 + __u32 inter_block_stride;
3162 +/* IOCTL numbers */
3163 +#define VC_SM_CMA_IOCTL_MEM_ALLOC\
3164 + _IOR(VC_SM_CMA_MAGIC_TYPE, VC_SM_CMA_CMD_ALLOC,\
3165 + struct vc_sm_cma_ioctl_alloc)
3167 +#define VC_SM_CMA_IOCTL_MEM_IMPORT_DMABUF\
3168 + _IOR(VC_SM_CMA_MAGIC_TYPE, VC_SM_CMA_CMD_IMPORT_DMABUF,\
3169 + struct vc_sm_cma_ioctl_import_dmabuf)
3171 +#define VC_SM_CMA_IOCTL_MEM_CLEAN_INVALID2\
3172 + _IOR(VC_SM_CMA_MAGIC_TYPE, VC_SM_CMA_CMD_CLEAN_INVALID2,\
3173 + struct vc_sm_cma_ioctl_clean_invalid2)
3175 +#endif /* __VC_SM_CMA_IOCTL_H */