brcm2708: add linux 4.19 support
[oweals/openwrt.git] / target / linux / brcm2708 / patches-4.19 / 950-0630-staging-vc-sm-cma-Add-in-allocation-for-VPU-requests.patch
1 From 904c0d6a47b181b134a3626bfd93b456ec6b411d Mon Sep 17 00:00:00 2001
2 From: Dave Stevenson <dave.stevenson@raspberrypi.org>
3 Date: Fri, 21 Dec 2018 16:50:53 +0000
4 Subject: [PATCH 630/703] staging: vc-sm-cma: Add in allocation for VPU
5  requests.
6
7 Module has to change from tristate to bool as all CMA functions
8 are boolean.
9
10 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
11 ---
12  .../staging/vc04_services/vc-sm-cma/Kconfig   |   4 +-
13  .../staging/vc04_services/vc-sm-cma/Makefile  |   2 +-
14  .../staging/vc04_services/vc-sm-cma/vc_sm.c   | 642 +++++++++++++++---
15  .../staging/vc04_services/vc-sm-cma/vc_sm.h   |  30 +-
16  .../vc04_services/vc-sm-cma/vc_sm_cma.c       |  99 +++
17  .../vc04_services/vc-sm-cma/vc_sm_cma.h       |  39 ++
18  .../vc04_services/vc-sm-cma/vc_sm_cma_vchi.c  |  10 +
19  .../vc04_services/vc-sm-cma/vc_sm_cma_vchi.h  |   4 +
20  .../vc04_services/vc-sm-cma/vc_sm_defs.h      |   2 +
21  9 files changed, 723 insertions(+), 109 deletions(-)
22  create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma.c
23  create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma.h
24
25 --- a/drivers/staging/vc04_services/vc-sm-cma/Kconfig
26 +++ b/drivers/staging/vc04_services/vc-sm-cma/Kconfig
27 @@ -1,6 +1,6 @@
28  config BCM_VC_SM_CMA
29 -       tristate "VideoCore Shared Memory (CMA) driver"
30 -       depends on BCM2835_VCHIQ
31 +       bool "VideoCore Shared Memory (CMA) driver"
32 +       depends on BCM2835_VCHIQ && DMA_CMA
33         select RBTREE
34         select DMA_SHARED_BUFFER
35         help
36 --- a/drivers/staging/vc04_services/vc-sm-cma/Makefile
37 +++ b/drivers/staging/vc04_services/vc-sm-cma/Makefile
38 @@ -3,6 +3,6 @@ ccflags-y += -Idrivers/staging/vc04_serv
39  ccflags-y += -D__VCCOREVER__=0
40  
41  vc-sm-cma-$(CONFIG_BCM_VC_SM_CMA) := \
42 -       vc_sm.o vc_sm_cma_vchi.o
43 +       vc_sm.o vc_sm_cma_vchi.o vc_sm_cma.o
44  
45  obj-$(CONFIG_BCM_VC_SM_CMA) += vc-sm-cma.o
46 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm.c
47 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm.c
48 @@ -9,10 +9,21 @@
49   * and taking some code for CMA/dmabuf handling from the Android Ion
50   * driver (Google/Linaro).
51   *
52 - * This is cut down version to only support import of dma_bufs from
53 - * other kernel drivers. A more complete implementation of the old
54 - * vmcs_sm functionality can follow later.
55   *
56 + * This driver has 3 main uses:
57 + * 1) Allocating buffers for the kernel or userspace that can be shared with the
58 + *    VPU.
59 + * 2) Importing dmabufs from elsewhere for sharing with the VPU.
60 + * 3) Allocating buffers for use by the VPU.
61 + *
62 + * In the first and second cases the native handle is a dmabuf. Releasing the
63 + * resource inherently comes from releasing the dmabuf, and this will trigger
64 + * unmapping on the VPU. The underlying allocation and our buffer structure are
65 + * retained until the VPU has confirmed that it has finished with it.
66 + *
67 + * For the VPU allocations the VPU is responsible for triggering the release,
68 + * and therefore the released message decrements the dma_buf refcount (with the
69 + * VPU mapping having already been marked as released).
70   */
71  
72  /* ---- Include Files ----------------------------------------------------- */
73 @@ -39,6 +50,7 @@
74  #include "vc_sm_cma_vchi.h"
75  
76  #include "vc_sm.h"
77 +#include "vc_sm_cma.h"
78  #include "vc_sm_knl.h"
79  
80  /* ---- Private Constants and Types --------------------------------------- */
81 @@ -72,6 +84,7 @@ struct sm_state_t {
82         struct platform_device *pdev;
83  
84         struct sm_instance *sm_handle;  /* Handle for videocore service. */
85 +       struct cma *cma_heap;
86  
87         spinlock_t kernelid_map_lock;   /* Spinlock protecting kernelid_map */
88         struct idr kernelid_map;
89 @@ -80,6 +93,7 @@ struct sm_state_t {
90         struct list_head buffer_list;   /* List of buffer. */
91  
92         struct vc_sm_privdata_t *data_knl;  /* Kernel internal data tracking. */
93 +       struct vc_sm_privdata_t *vpu_allocs; /* All allocations from the VPU */
94         struct dentry *dir_root;        /* Debug fs entries root. */
95         struct sm_pde_t dir_state;      /* Debug fs entries state sub-tree. */
96  
97 @@ -89,6 +103,12 @@ struct sm_state_t {
98         u32 int_trans_id;               /* Interrupted transaction. */
99  };
100  
101 +struct vc_sm_dma_buf_attachment {
102 +       struct device *dev;
103 +       struct sg_table *table;
104 +       struct list_head list;
105 +};
106 +
107  /* ---- Private Variables ----------------------------------------------- */
108  
109  static struct sm_state_t *sm_state;
110 @@ -172,12 +192,14 @@ static int vc_sm_cma_global_state_show(s
111                                    resource->size);
112                         seq_printf(s, "           DMABUF       %p\n",
113                                    resource->dma_buf);
114 -                       seq_printf(s, "           ATTACH       %p\n",
115 -                                  resource->attach);
116 +                       if (resource->imported) {
117 +                               seq_printf(s, "           ATTACH       %p\n",
118 +                                          resource->import.attach);
119 +                               seq_printf(s, "           SGT          %p\n",
120 +                                          resource->import.sgt);
121 +                       }
122                         seq_printf(s, "           SG_TABLE     %p\n",
123                                    resource->sg_table);
124 -                       seq_printf(s, "           SGT          %p\n",
125 -                                  resource->sgt);
126                         seq_printf(s, "           DMA_ADDR     %pad\n",
127                                    &resource->dma_addr);
128                         seq_printf(s, "           VC_HANDLE     %08x\n",
129 @@ -209,17 +231,33 @@ static void vc_sm_add_resource(struct vc
130  }
131  
132  /*
133 - * Release an allocation.
134 - * All refcounting is done via the dma buf object.
135 + * Cleans up imported dmabuf.
136   */
137 -static void vc_sm_release_resource(struct vc_sm_buffer *buffer, int force)
138 +static void vc_sm_clean_up_dmabuf(struct vc_sm_buffer *buffer)
139  {
140 -       mutex_lock(&sm_state->map_lock);
141 -       mutex_lock(&buffer->lock);
142 +       if (!buffer->imported)
143 +               return;
144  
145 -       pr_debug("[%s]: buffer %p (name %s, size %zu)\n",
146 -                __func__, buffer, buffer->name, buffer->size);
147 +       /* Handle cleaning up imported dmabufs */
148 +       mutex_lock(&buffer->lock);
149 +       if (buffer->import.sgt) {
150 +               dma_buf_unmap_attachment(buffer->import.attach,
151 +                                        buffer->import.sgt,
152 +                                        DMA_BIDIRECTIONAL);
153 +               buffer->import.sgt = NULL;
154 +       }
155 +       if (buffer->import.attach) {
156 +               dma_buf_detach(buffer->dma_buf, buffer->import.attach);
157 +               buffer->import.attach = NULL;
158 +       }
159 +       mutex_unlock(&buffer->lock);
160 +}
161  
162 +/*
163 + * Instructs VPU to decrement the refcount on a buffer.
164 + */
165 +static void vc_sm_vpu_free(struct vc_sm_buffer *buffer)
166 +{
167         if (buffer->vc_handle && buffer->vpu_state == VPU_MAPPED) {
168                 struct vc_sm_free_t free = { buffer->vc_handle, 0 };
169                 int status = vc_sm_cma_vchi_free(sm_state->sm_handle, &free,
170 @@ -230,17 +268,32 @@ static void vc_sm_release_resource(struc
171                 }
172  
173                 if (sm_state->require_released_callback) {
174 -                       /* Need to wait for the VPU to confirm the free */
175 +                       /* Need to wait for the VPU to confirm the free. */
176  
177                         /* Retain a reference on this until the VPU has
178                          * released it
179                          */
180                         buffer->vpu_state = VPU_UNMAPPING;
181 -                       goto defer;
182 +               } else {
183 +                       buffer->vpu_state = VPU_NOT_MAPPED;
184 +                       buffer->vc_handle = 0;
185                 }
186 -               buffer->vpu_state = VPU_NOT_MAPPED;
187 -               buffer->vc_handle = 0;
188         }
189 +}
190 +
191 +/*
192 + * Release an allocation.
193 + * All refcounting is done via the dma buf object.
194 + *
195 + * Must be called with the mutex held. The function will either release the
196 + * mutex (if defering the release) or destroy it. The caller must therefore not
197 + * reuse the buffer on return.
198 + */
199 +static void vc_sm_release_resource(struct vc_sm_buffer *buffer)
200 +{
201 +       pr_debug("[%s]: buffer %p (name %s, size %zu)\n",
202 +                __func__, buffer, buffer->name, buffer->size);
203 +
204         if (buffer->vc_handle) {
205                 /* We've sent the unmap request but not had the response. */
206                 pr_err("[%s]: Waiting for VPU unmap response on %p\n",
207 @@ -248,45 +301,43 @@ static void vc_sm_release_resource(struc
208                 goto defer;
209         }
210         if (buffer->in_use) {
211 -               /* Don't release dmabuf here - we await the release */
212 +               /* dmabuf still in use - we await the release */
213                 pr_err("[%s]: buffer %p is still in use\n",
214                        __func__, buffer);
215                 goto defer;
216         }
217  
218 -       /* Handle cleaning up imported dmabufs */
219 -       if (buffer->sgt) {
220 -               dma_buf_unmap_attachment(buffer->attach, buffer->sgt,
221 -                                        DMA_BIDIRECTIONAL);
222 -               buffer->sgt = NULL;
223 -       }
224 -       if (buffer->attach) {
225 -               dma_buf_detach(buffer->dma_buf, buffer->attach);
226 -               buffer->attach = NULL;
227 -       }
228 -
229 -       /* Release the dma_buf (whether ours or imported) */
230 -       if (buffer->import_dma_buf) {
231 -               dma_buf_put(buffer->import_dma_buf);
232 -               buffer->import_dma_buf = NULL;
233 -               buffer->dma_buf = NULL;
234 -       } else if (buffer->dma_buf) {
235 -               dma_buf_put(buffer->dma_buf);
236 -               buffer->dma_buf = NULL;
237 +       /* Release the allocation (whether imported dmabuf or CMA allocation) */
238 +       if (buffer->imported) {
239 +               pr_debug("%s: Release imported dmabuf %p\n", __func__,
240 +                        buffer->import.dma_buf);
241 +               if (buffer->import.dma_buf)
242 +                       dma_buf_put(buffer->import.dma_buf);
243 +               else
244 +                       pr_err("%s: Imported dmabuf already been put for buf %p\n",
245 +                              __func__, buffer);
246 +               buffer->import.dma_buf = NULL;
247 +       } else {
248 +               if (buffer->sg_table) {
249 +                       /* Our own allocation that we need to dma_unmap_sg */
250 +                       dma_unmap_sg(&sm_state->pdev->dev,
251 +                                    buffer->sg_table->sgl,
252 +                                    buffer->sg_table->nents,
253 +                                    DMA_BIDIRECTIONAL);
254 +               }
255 +               pr_debug("%s: Release our allocation\n", __func__);
256 +               vc_sm_cma_buffer_free(&buffer->alloc);
257 +               pr_debug("%s: Release our allocation - done\n", __func__);
258         }
259  
260 -       if (buffer->sg_table && !buffer->import_dma_buf) {
261 -               /* Our own allocation that we need to dma_unmap_sg */
262 -               dma_unmap_sg(&sm_state->pdev->dev, buffer->sg_table->sgl,
263 -                            buffer->sg_table->nents, DMA_BIDIRECTIONAL);
264 -       }
265  
266 -       /* Free the local resource. Start by removing it from the list */
267 -       buffer->private = NULL;
268 +       /* Free our buffer. Start by removing it from the list */
269 +       mutex_lock(&sm_state->map_lock);
270         list_del(&buffer->global_buffer_list);
271 +       mutex_unlock(&sm_state->map_lock);
272  
273 +       pr_debug("%s: Release our allocation - done\n", __func__);
274         mutex_unlock(&buffer->lock);
275 -       mutex_unlock(&sm_state->map_lock);
276  
277         mutex_destroy(&buffer->lock);
278  
279 @@ -295,7 +346,7 @@ static void vc_sm_release_resource(struc
280  
281  defer:
282         mutex_unlock(&buffer->lock);
283 -       mutex_unlock(&sm_state->map_lock);
284 +       return;
285  }
286  
287  /* Create support for private data tracking. */
288 @@ -317,16 +368,267 @@ static struct vc_sm_privdata_t *vc_sm_cm
289         return file_data;
290  }
291  
292 +static struct sg_table *dup_sg_table(struct sg_table *table)
293 +{
294 +       struct sg_table *new_table;
295 +       int ret, i;
296 +       struct scatterlist *sg, *new_sg;
297 +
298 +       new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
299 +       if (!new_table)
300 +               return ERR_PTR(-ENOMEM);
301 +
302 +       ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
303 +       if (ret) {
304 +               kfree(new_table);
305 +               return ERR_PTR(-ENOMEM);
306 +       }
307 +
308 +       new_sg = new_table->sgl;
309 +       for_each_sg(table->sgl, sg, table->nents, i) {
310 +               memcpy(new_sg, sg, sizeof(*sg));
311 +               sg->dma_address = 0;
312 +               new_sg = sg_next(new_sg);
313 +       }
314 +
315 +       return new_table;
316 +}
317 +
318 +static void free_duped_table(struct sg_table *table)
319 +{
320 +       sg_free_table(table);
321 +       kfree(table);
322 +}
323 +
324 +/* Dma buf operations for use with our own allocations */
325 +
326 +static int vc_sm_dma_buf_attach(struct dma_buf *dmabuf,
327 +                               struct dma_buf_attachment *attachment)
328 +
329 +{
330 +       struct vc_sm_dma_buf_attachment *a;
331 +       struct sg_table *table;
332 +       struct vc_sm_buffer *buf = dmabuf->priv;
333 +
334 +       a = kzalloc(sizeof(*a), GFP_KERNEL);
335 +       if (!a)
336 +               return -ENOMEM;
337 +
338 +       table = dup_sg_table(buf->sg_table);
339 +       if (IS_ERR(table)) {
340 +               kfree(a);
341 +               return -ENOMEM;
342 +       }
343 +
344 +       a->table = table;
345 +       INIT_LIST_HEAD(&a->list);
346 +
347 +       attachment->priv = a;
348 +
349 +       mutex_lock(&buf->lock);
350 +       list_add(&a->list, &buf->attachments);
351 +       mutex_unlock(&buf->lock);
352 +       pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
353 +
354 +       return 0;
355 +}
356 +
357 +static void vc_sm_dma_buf_detatch(struct dma_buf *dmabuf,
358 +                                 struct dma_buf_attachment *attachment)
359 +{
360 +       struct vc_sm_dma_buf_attachment *a = attachment->priv;
361 +       struct vc_sm_buffer *buf = dmabuf->priv;
362 +
363 +       pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
364 +       free_duped_table(a->table);
365 +       mutex_lock(&buf->lock);
366 +       list_del(&a->list);
367 +       mutex_unlock(&buf->lock);
368 +
369 +       kfree(a);
370 +}
371 +
372 +static struct sg_table *vc_sm_map_dma_buf(struct dma_buf_attachment *attachment,
373 +                                         enum dma_data_direction direction)
374 +{
375 +       struct vc_sm_dma_buf_attachment *a = attachment->priv;
376 +       struct sg_table *table;
377 +
378 +       table = a->table;
379 +
380 +       if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
381 +                       direction))
382 +               return ERR_PTR(-ENOMEM);
383 +
384 +       pr_debug("%s attachment %p\n", __func__, attachment);
385 +       return table;
386 +}
387 +
388 +static void vc_sm_unmap_dma_buf(struct dma_buf_attachment *attachment,
389 +                               struct sg_table *table,
390 +                               enum dma_data_direction direction)
391 +{
392 +       pr_debug("%s attachment %p\n", __func__, attachment);
393 +       dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
394 +}
395 +
396 +static int vc_sm_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
397 +{
398 +       struct vc_sm_buffer *buf = dmabuf->priv;
399 +       struct sg_table *table = buf->sg_table;
400 +       unsigned long addr = vma->vm_start;
401 +       unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
402 +       struct scatterlist *sg;
403 +       int i;
404 +       int ret = 0;
405 +
406 +       pr_debug("%s dmabuf %p, buf %p, vm_start %08lX\n", __func__, dmabuf,
407 +                buf, addr);
408 +
409 +       mutex_lock(&buf->lock);
410 +
411 +       /* now map it to userspace */
412 +       for_each_sg(table->sgl, sg, table->nents, i) {
413 +               struct page *page = sg_page(sg);
414 +               unsigned long remainder = vma->vm_end - addr;
415 +               unsigned long len = sg->length;
416 +
417 +               if (offset >= sg->length) {
418 +                       offset -= sg->length;
419 +                       continue;
420 +               } else if (offset) {
421 +                       page += offset / PAGE_SIZE;
422 +                       len = sg->length - offset;
423 +                       offset = 0;
424 +               }
425 +               len = min(len, remainder);
426 +               ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
427 +                                     vma->vm_page_prot);
428 +               if (ret)
429 +                       break;
430 +               addr += len;
431 +               if (addr >= vma->vm_end)
432 +                       break;
433 +       }
434 +       mutex_unlock(&buf->lock);
435 +
436 +       if (ret)
437 +               pr_err("%s: failure mapping buffer to userspace\n",
438 +                      __func__);
439 +
440 +       return ret;
441 +}
442 +
443 +static void vc_sm_dma_buf_release(struct dma_buf *dmabuf)
444 +{
445 +       struct vc_sm_buffer *buffer;
446 +
447 +       if (!dmabuf)
448 +               return;
449 +
450 +       buffer = (struct vc_sm_buffer *)dmabuf->priv;
451 +
452 +       mutex_lock(&buffer->lock);
453 +
454 +       pr_debug("%s dmabuf %p, buffer %p\n", __func__, dmabuf, buffer);
455 +
456 +       buffer->in_use = 0;
457 +
458 +       /* Unmap on the VPU */
459 +       vc_sm_vpu_free(buffer);
460 +       pr_debug("%s vpu_free done\n", __func__);
461 +
462 +       /* Unmap our dma_buf object (the vc_sm_buffer remains until released
463 +        * on the VPU).
464 +        */
465 +       vc_sm_clean_up_dmabuf(buffer);
466 +       pr_debug("%s clean_up dmabuf done\n", __func__);
467 +
468 +       vc_sm_release_resource(buffer);
469 +       pr_debug("%s done\n", __func__);
470 +}
471 +
472 +static int vc_sm_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
473 +                                         enum dma_data_direction direction)
474 +{
475 +       struct vc_sm_buffer *buf;
476 +       struct vc_sm_dma_buf_attachment *a;
477 +
478 +       if (!dmabuf)
479 +               return -EFAULT;
480 +
481 +       buf = dmabuf->priv;
482 +       if (!buf)
483 +               return -EFAULT;
484 +
485 +       mutex_lock(&buf->lock);
486 +
487 +       list_for_each_entry(a, &buf->attachments, list) {
488 +               dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
489 +                                   direction);
490 +       }
491 +       mutex_unlock(&buf->lock);
492 +
493 +       return 0;
494 +}
495 +
496 +static int vc_sm_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
497 +                                       enum dma_data_direction direction)
498 +{
499 +       struct vc_sm_buffer *buf;
500 +       struct vc_sm_dma_buf_attachment *a;
501 +
502 +       if (!dmabuf)
503 +               return -EFAULT;
504 +       buf = dmabuf->priv;
505 +       if (!buf)
506 +               return -EFAULT;
507 +
508 +       mutex_lock(&buf->lock);
509 +
510 +       list_for_each_entry(a, &buf->attachments, list) {
511 +               dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
512 +                                      direction);
513 +       }
514 +       mutex_unlock(&buf->lock);
515 +
516 +       return 0;
517 +}
518 +
519 +static void *vc_sm_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
520 +{
521 +       /* FIXME */
522 +       return NULL;
523 +}
524 +
525 +static void vc_sm_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
526 +                                void *ptr)
527 +{
528 +       /* FIXME */
529 +}
530 +
531 +static const struct dma_buf_ops dma_buf_ops = {
532 +       .map_dma_buf = vc_sm_map_dma_buf,
533 +       .unmap_dma_buf = vc_sm_unmap_dma_buf,
534 +       .mmap = vc_sm_dmabuf_mmap,
535 +       .release = vc_sm_dma_buf_release,
536 +       .attach = vc_sm_dma_buf_attach,
537 +       .detach = vc_sm_dma_buf_detatch,
538 +       .begin_cpu_access = vc_sm_dma_buf_begin_cpu_access,
539 +       .end_cpu_access = vc_sm_dma_buf_end_cpu_access,
540 +       .map = vc_sm_dma_buf_kmap,
541 +       .unmap = vc_sm_dma_buf_kunmap,
542 +};
543  /* Dma_buf operations for chaining through to an imported dma_buf */
544  static
545  int vc_sm_import_dma_buf_attach(struct dma_buf *dmabuf,
546                                 struct dma_buf_attachment *attachment)
547  {
548 -       struct vc_sm_buffer *res = dmabuf->priv;
549 +       struct vc_sm_buffer *buf = dmabuf->priv;
550  
551 -       if (!res->import_dma_buf)
552 +       if (!buf->imported)
553                 return -EINVAL;
554 -       return res->import_dma_buf->ops->attach(res->import_dma_buf,
555 +       return buf->import.dma_buf->ops->attach(buf->import.dma_buf,
556                                                 attachment);
557  }
558  
559 @@ -334,22 +636,23 @@ static
560  void vc_sm_import_dma_buf_detatch(struct dma_buf *dmabuf,
561                                   struct dma_buf_attachment *attachment)
562  {
563 -       struct vc_sm_buffer *res = dmabuf->priv;
564 +       struct vc_sm_buffer *buf = dmabuf->priv;
565  
566 -       if (!res->import_dma_buf)
567 +       if (!buf->imported)
568                 return;
569 -       res->import_dma_buf->ops->detach(res->import_dma_buf, attachment);
570 +       buf->import.dma_buf->ops->detach(buf->import.dma_buf, attachment);
571  }
572  
573  static
574  struct sg_table *vc_sm_import_map_dma_buf(struct dma_buf_attachment *attachment,
575                                           enum dma_data_direction direction)
576  {
577 -       struct vc_sm_buffer *res = attachment->dmabuf->priv;
578 +       struct vc_sm_buffer *buf = attachment->dmabuf->priv;
579  
580 -       if (!res->import_dma_buf)
581 +       if (!buf->imported)
582                 return NULL;
583 -       return res->import_dma_buf->ops->map_dma_buf(attachment, direction);
584 +       return buf->import.dma_buf->ops->map_dma_buf(attachment,
585 +                                                    direction);
586  }
587  
588  static
589 @@ -357,87 +660,88 @@ void vc_sm_import_unmap_dma_buf(struct d
590                                 struct sg_table *table,
591                                 enum dma_data_direction direction)
592  {
593 -       struct vc_sm_buffer *res = attachment->dmabuf->priv;
594 +       struct vc_sm_buffer *buf = attachment->dmabuf->priv;
595  
596 -       if (!res->import_dma_buf)
597 +       if (!buf->imported)
598                 return;
599 -       res->import_dma_buf->ops->unmap_dma_buf(attachment, table, direction);
600 +       buf->import.dma_buf->ops->unmap_dma_buf(attachment, table, direction);
601  }
602  
603  static
604  int vc_sm_import_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
605  {
606 -       struct vc_sm_buffer *res = dmabuf->priv;
607 +       struct vc_sm_buffer *buf = dmabuf->priv;
608  
609 -       pr_debug("%s: mmap dma_buf %p, res %p, imported db %p\n", __func__,
610 -                dmabuf, res, res->import_dma_buf);
611 -       if (!res->import_dma_buf) {
612 +       pr_debug("%s: mmap dma_buf %p, buf %p, imported db %p\n", __func__,
613 +                dmabuf, buf, buf->import.dma_buf);
614 +       if (!buf->imported) {
615                 pr_err("%s: mmap dma_buf %p- not an imported buffer\n",
616                        __func__, dmabuf);
617                 return -EINVAL;
618         }
619 -       return res->import_dma_buf->ops->mmap(res->import_dma_buf, vma);
620 +       return buf->import.dma_buf->ops->mmap(buf->import.dma_buf, vma);
621  }
622  
623  static
624  void vc_sm_import_dma_buf_release(struct dma_buf *dmabuf)
625  {
626 -       struct vc_sm_buffer *res = dmabuf->priv;
627 +       struct vc_sm_buffer *buf = dmabuf->priv;
628  
629         pr_debug("%s: Relasing dma_buf %p\n", __func__, dmabuf);
630 -       if (!res->import_dma_buf)
631 +       mutex_lock(&buf->lock);
632 +       if (!buf->imported)
633                 return;
634  
635 -       res->in_use = 0;
636 +       buf->in_use = 0;
637  
638 -       vc_sm_release_resource(res, 0);
639 +       vc_sm_vpu_free(buf);
640 +
641 +       vc_sm_release_resource(buf);
642  }
643  
644  static
645  void *vc_sm_import_dma_buf_kmap(struct dma_buf *dmabuf,
646                                 unsigned long offset)
647  {
648 -       struct vc_sm_buffer *res = dmabuf->priv;
649 +       struct vc_sm_buffer *buf = dmabuf->priv;
650  
651 -       if (!res->import_dma_buf)
652 +       if (!buf->imported)
653                 return NULL;
654 -       return res->import_dma_buf->ops->map(res->import_dma_buf,
655 -                                                     offset);
656 +       return buf->import.dma_buf->ops->map(buf->import.dma_buf, offset);
657  }
658  
659  static
660  void vc_sm_import_dma_buf_kunmap(struct dma_buf *dmabuf,
661                                  unsigned long offset, void *ptr)
662  {
663 -       struct vc_sm_buffer *res = dmabuf->priv;
664 +       struct vc_sm_buffer *buf = dmabuf->priv;
665  
666 -       if (!res->import_dma_buf)
667 +       if (!buf->imported)
668                 return;
669 -       res->import_dma_buf->ops->unmap(res->import_dma_buf,
670 -                                              offset, ptr);
671 +       buf->import.dma_buf->ops->unmap(buf->import.dma_buf, offset, ptr);
672  }
673  
674  static
675  int vc_sm_import_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
676                                           enum dma_data_direction direction)
677  {
678 -       struct vc_sm_buffer *res = dmabuf->priv;
679 +       struct vc_sm_buffer *buf = dmabuf->priv;
680  
681 -       if (!res->import_dma_buf)
682 +       if (!buf->imported)
683                 return -EINVAL;
684 -       return res->import_dma_buf->ops->begin_cpu_access(res->import_dma_buf,
685 -                                                           direction);
686 +       return buf->import.dma_buf->ops->begin_cpu_access(buf->import.dma_buf,
687 +                                                         direction);
688  }
689  
690  static
691  int vc_sm_import_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
692                                         enum dma_data_direction direction)
693  {
694 -       struct vc_sm_buffer *res = dmabuf->priv;
695 +       struct vc_sm_buffer *buf = dmabuf->priv;
696  
697 -       if (!res->import_dma_buf)
698 +       if (!buf->imported)
699                 return -EINVAL;
700 -       return res->import_dma_buf->ops->end_cpu_access(res->import_dma_buf,
701 +       return buf->import.dma_buf->ops->end_cpu_access(buf->import.dma_buf,
702                                                           direction);
703  }
704  
705 @@ -516,9 +820,8 @@ vc_sm_cma_import_dmabuf_internal(struct
706         memcpy(import.name, VC_SM_RESOURCE_NAME_DEFAULT,
707                sizeof(VC_SM_RESOURCE_NAME_DEFAULT));
708  
709 -       pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %pad, size %u\n",
710 -                __func__, import.name, import.type, &dma_addr,
711 -                import.size);
712 +       pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %pad, size %u.\n",
713 +                __func__, import.name, import.type, &dma_addr, import.size);
714  
715         /* Allocate the videocore buffer. */
716         status = vc_sm_cma_vchi_import(sm_state->sm_handle, &import, &result,
717 @@ -548,12 +851,14 @@ vc_sm_cma_import_dmabuf_internal(struct
718         buffer->size = import.size;
719         buffer->vpu_state = VPU_MAPPED;
720  
721 -       buffer->import_dma_buf = dma_buf;
722 +       buffer->imported = 1;
723 +       buffer->import.dma_buf = dma_buf;
724  
725 -       buffer->attach = attach;
726 -       buffer->sgt = sgt;
727 +       buffer->import.attach = attach;
728 +       buffer->import.sgt = sgt;
729         buffer->dma_addr = dma_addr;
730         buffer->in_use = 1;
731 +       buffer->kernel_id = import.kernel_id;
732  
733         /*
734          * We're done - we need to export a new dmabuf chaining through most
735 @@ -594,6 +899,91 @@ error:
736         return ret;
737  }
738  
739 +static int vc_sm_cma_vpu_alloc(u32 size, uint32_t align, const char *name,
740 +                              u32 mem_handle, struct vc_sm_buffer **ret_buffer)
741 +{
742 +       DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
743 +       struct vc_sm_buffer *buffer = NULL;
744 +       int aligned_size;
745 +       int ret = 0;
746 +
747 +       /* Align to the user requested align */
748 +       aligned_size = ALIGN(size, align);
749 +       /* and then to a page boundary */
750 +       aligned_size = PAGE_ALIGN(aligned_size);
751 +
752 +       if (!aligned_size)
753 +               return -EINVAL;
754 +
755 +       /* Allocate local buffer to track this allocation. */
756 +       buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
757 +       if (!buffer)
758 +               return -ENOMEM;
759 +
760 +       mutex_init(&buffer->lock);
761 +
762 +       if (vc_sm_cma_buffer_allocate(sm_state->cma_heap, &buffer->alloc,
763 +                                     aligned_size)) {
764 +               pr_err("[%s]: cma alloc of %d bytes failed\n",
765 +                      __func__, aligned_size);
766 +               ret = -ENOMEM;
767 +               goto error;
768 +       }
769 +       buffer->sg_table = buffer->alloc.sg_table;
770 +
771 +       pr_debug("[%s]: cma alloc of %d bytes success\n",
772 +                __func__, aligned_size);
773 +
774 +       if (dma_map_sg(&sm_state->pdev->dev, buffer->sg_table->sgl,
775 +                      buffer->sg_table->nents, DMA_BIDIRECTIONAL) <= 0) {
776 +               pr_err("[%s]: dma_map_sg failed\n", __func__);
777 +               goto error;
778 +       }
779 +
780 +       INIT_LIST_HEAD(&buffer->attachments);
781 +
782 +       memcpy(buffer->name, name,
783 +              min(sizeof(buffer->name), strlen(name)));
784 +
785 +       exp_info.ops = &dma_buf_ops;
786 +       exp_info.size = aligned_size;
787 +       exp_info.flags = O_RDWR;
788 +       exp_info.priv = buffer;
789 +
790 +       buffer->dma_buf = dma_buf_export(&exp_info);
791 +       if (IS_ERR(buffer->dma_buf)) {
792 +               ret = PTR_ERR(buffer->dma_buf);
793 +               goto error;
794 +       }
795 +       buffer->dma_addr = (uint32_t)sg_dma_address(buffer->sg_table->sgl);
796 +       if ((buffer->dma_addr & 0xC0000000) != 0xC0000000) {
797 +               pr_err("%s: Expecting an uncached alias for dma_addr %pad\n",
798 +                      __func__, &buffer->dma_addr);
799 +               buffer->dma_addr |= 0xC0000000;
800 +       }
801 +       buffer->private = sm_state->vpu_allocs;
802 +
803 +       buffer->vc_handle = mem_handle;
804 +       buffer->vpu_state = VPU_MAPPED;
805 +       buffer->vpu_allocated = 1;
806 +       buffer->size = size;
807 +       /*
808 +        * Create an ID that will be passed along with our message so
809 +        * that when we service the release reply, we can look up which
810 +        * resource is being released.
811 +        */
812 +       buffer->kernel_id = get_kernel_id(buffer);
813 +
814 +       vc_sm_add_resource(sm_state->vpu_allocs, buffer);
815 +
816 +       *ret_buffer = buffer;
817 +       return 0;
818 +error:
819 +       if (buffer)
820 +               vc_sm_release_resource(buffer);
821 +       return ret;
822 +}
823 +
824  static void
825  vc_sm_vpu_event(struct sm_instance *instance, struct vc_sm_result_t *reply,
826                 int reply_len)
827 @@ -612,21 +1002,61 @@ vc_sm_vpu_event(struct sm_instance *inst
828                 struct vc_sm_released *release = (struct vc_sm_released *)reply;
829                 struct vc_sm_buffer *buffer =
830                                         lookup_kernel_id(release->kernel_id);
831 +               if (!buffer) {
832 +                       pr_err("%s: VC released a buffer that is already released, kernel_id %d\n",
833 +                              __func__, release->kernel_id);
834 +                       break;
835 +               }
836 +               mutex_lock(&buffer->lock);
837  
838 -               /*
839 -                * FIXME: Need to check buffer is still valid and allocated
840 -                * before continuing
841 -                */
842                 pr_debug("%s: Released addr %08x, size %u, id %08x, mem_handle %08x\n",
843                          __func__, release->addr, release->size,
844                          release->kernel_id, release->vc_handle);
845 -               mutex_lock(&buffer->lock);
846 +
847                 buffer->vc_handle = 0;
848                 buffer->vpu_state = VPU_NOT_MAPPED;
849 -               mutex_unlock(&buffer->lock);
850                 free_kernel_id(release->kernel_id);
851  
852 -               vc_sm_release_resource(buffer, 0);
853 +               if (buffer->vpu_allocated) {
854 +                       /* VPU allocation, so release the dmabuf which will
855 +                        * trigger the clean up.
856 +                        */
857 +                       mutex_unlock(&buffer->lock);
858 +                       dma_buf_put(buffer->dma_buf);
859 +               } else {
860 +                       vc_sm_release_resource(buffer);
861 +               }
862 +       }
863 +       break;
864 +       case VC_SM_MSG_TYPE_VC_MEM_REQUEST:
865 +       {
866 +               struct vc_sm_buffer *buffer = NULL;
867 +               struct vc_sm_vc_mem_request *req =
868 +                                       (struct vc_sm_vc_mem_request *)reply;
869 +               struct vc_sm_vc_mem_request_result reply;
870 +               int ret;
871 +
872 +               pr_debug("%s: Request %u bytes of memory, align %d name %s, trans_id %08x\n",
873 +                        __func__, req->size, req->align, req->name,
874 +                        req->trans_id);
875 +               ret = vc_sm_cma_vpu_alloc(req->size, req->align, req->name,
876 +                                         req->vc_handle, &buffer);
877 +
878 +               reply.trans_id = req->trans_id;
879 +               if (!ret) {
880 +                       reply.addr = buffer->dma_addr;
881 +                       reply.kernel_id = buffer->kernel_id;
882 +                       pr_debug("%s: Allocated resource buffer %p, addr %pad\n",
883 +                                __func__, buffer, &buffer->dma_addr);
884 +               } else {
885 +                       pr_err("%s: Allocation failed size %u, name %s, vc_handle %u\n",
886 +                              __func__, req->size, req->name, req->vc_handle);
887 +                       reply.addr = 0;
888 +                       reply.kernel_id = 0;
889 +               }
890 +               vc_sm_vchi_client_vc_mem_req_reply(sm_state->sm_handle, &reply,
891 +                                                  &sm_state->int_trans_id);
892 +               break;
893         }
894         break;
895         default:
896 @@ -645,6 +1075,14 @@ static void vc_sm_connected_init(void)
897  
898         pr_info("[%s]: start\n", __func__);
899  
900 +       if (vc_sm_cma_add_heaps(&sm_state->cma_heap) ||
901 +           !sm_state->cma_heap) {
902 +               pr_err("[%s]: failed to initialise CMA heaps\n",
903 +                      __func__);
904 +               ret = -EIO;
905 +               goto err_free_mem;
906 +       }
907 +
908         /*
909          * Initialize and create a VCHI connection for the shared memory service
910          * running on videocore.
911 @@ -696,7 +1134,7 @@ static void vc_sm_connected_init(void)
912                 goto err_remove_shared_memory;
913         }
914  
915 -       version.version = 1;
916 +       version.version = 2;
917         ret = vc_sm_cma_vchi_client_version(sm_state->sm_handle, &version,
918                                             &version_result,
919                                             &sm_state->int_trans_id);
920 @@ -768,7 +1206,7 @@ static int bcm2835_vc_sm_cma_remove(stru
921  int vc_sm_cma_int_handle(void *handle)
922  {
923         struct dma_buf *dma_buf = (struct dma_buf *)handle;
924 -       struct vc_sm_buffer *res;
925 +       struct vc_sm_buffer *buf;
926  
927         /* Validate we can work with this device. */
928         if (!sm_state || !handle) {
929 @@ -776,8 +1214,8 @@ int vc_sm_cma_int_handle(void *handle)
930                 return 0;
931         }
932  
933 -       res = (struct vc_sm_buffer *)dma_buf->priv;
934 -       return res->vc_handle;
935 +       buf = (struct vc_sm_buffer *)dma_buf->priv;
936 +       return buf->vc_handle;
937  }
938  EXPORT_SYMBOL_GPL(vc_sm_cma_int_handle);
939  
940 @@ -804,7 +1242,7 @@ EXPORT_SYMBOL_GPL(vc_sm_cma_free);
941  int vc_sm_cma_import_dmabuf(struct dma_buf *src_dmabuf, void **handle)
942  {
943         struct dma_buf *new_dma_buf;
944 -       struct vc_sm_buffer *res;
945 +       struct vc_sm_buffer *buf;
946         int ret;
947  
948         /* Validate we can work with this device. */
949 @@ -818,7 +1256,7 @@ int vc_sm_cma_import_dmabuf(struct dma_b
950  
951         if (!ret) {
952                 pr_debug("%s: imported to ptr %p\n", __func__, new_dma_buf);
953 -               res = (struct vc_sm_buffer *)new_dma_buf->priv;
954 +               buf = (struct vc_sm_buffer *)new_dma_buf->priv;
955  
956                 /* Assign valid handle at this time.*/
957                 *handle = new_dma_buf;
958 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm.h
959 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm.h
960 @@ -21,6 +21,8 @@
961  #include <linux/types.h>
962  #include <linux/miscdevice.h>
963  
964 +#include "vc_sm_cma.h"
965 +
966  #define VC_SM_MAX_NAME_LEN 32
967  
968  enum vc_sm_vpu_mapping_state {
969 @@ -29,31 +31,51 @@ enum vc_sm_vpu_mapping_state {
970         VPU_UNMAPPING
971  };
972  
973 +struct vc_sm_imported {
974 +       struct dma_buf *dma_buf;
975 +       struct dma_buf_attachment *attach;
976 +       struct sg_table *sgt;
977 +};
978 +
979  struct vc_sm_buffer {
980         struct list_head global_buffer_list;    /* Global list of buffers. */
981  
982 +       /* Index in the kernel_id idr so that we can find the
983 +        * mmal_msg_context again when servicing the VCHI reply.
984 +        */
985 +       int kernel_id;
986 +
987         size_t size;
988  
989         /* Lock over all the following state for this buffer */
990         struct mutex lock;
991 -       struct sg_table *sg_table;
992         struct list_head attachments;
993  
994         char name[VC_SM_MAX_NAME_LEN];
995  
996         int in_use:1;   /* Kernel is still using this resource */
997 +       int imported:1; /* Imported dmabuf */
998 +
999 +       struct sg_table *sg_table;
1000  
1001         enum vc_sm_vpu_mapping_state vpu_state;
1002         u32 vc_handle;  /* VideoCore handle for this buffer */
1003 +       int vpu_allocated;      /*
1004 +                                * The VPU made this allocation. Release the
1005 +                                * local dma_buf when the VPU releases the
1006 +                                * resource.
1007 +                                */
1008  
1009         /* DMABUF related fields */
1010 -       struct dma_buf *import_dma_buf;
1011         struct dma_buf *dma_buf;
1012 -       struct dma_buf_attachment *attach;
1013 -       struct sg_table *sgt;
1014         dma_addr_t dma_addr;
1015  
1016         struct vc_sm_privdata_t *private;
1017 +
1018 +       union {
1019 +               struct vc_sm_cma_alloc_data alloc;
1020 +               struct vc_sm_imported import;
1021 +       };
1022  };
1023  
1024  #endif
1025 --- /dev/null
1026 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma.c
1027 @@ -0,0 +1,99 @@
1028 +// SPDX-License-Identifier: GPL-2.0
1029 +/*
1030 + * VideoCore Shared Memory CMA allocator
1031 + *
1032 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
1033 + *
1034 + * Based on the Android ION allocator
1035 + * Copyright (C) Linaro 2012
1036 + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
1037 + *
1038 + */
1039 +
1040 +#include <linux/slab.h>
1041 +#include <linux/errno.h>
1042 +#include <linux/err.h>
1043 +#include <linux/cma.h>
1044 +#include <linux/scatterlist.h>
1045 +
1046 +#include "vc_sm_cma.h"
1047 +
1048 +/* CMA heap operations functions */
1049 +int vc_sm_cma_buffer_allocate(struct cma *cma_heap,
1050 +                             struct vc_sm_cma_alloc_data *buffer,
1051 +                             unsigned long len)
1052 +{
1053 +       /* len should already be page aligned */
1054 +       unsigned long num_pages = len / PAGE_SIZE;
1055 +       struct sg_table *table;
1056 +       struct page *pages;
1057 +       int ret;
1058 +
1059 +       pages = cma_alloc(cma_heap, num_pages, 0, GFP_KERNEL);
1060 +       if (!pages)
1061 +               return -ENOMEM;
1062 +
1063 +       table = kmalloc(sizeof(*table), GFP_KERNEL);
1064 +       if (!table)
1065 +               goto err;
1066 +
1067 +       ret = sg_alloc_table(table, 1, GFP_KERNEL);
1068 +       if (ret)
1069 +               goto free_mem;
1070 +
1071 +       sg_set_page(table->sgl, pages, len, 0);
1072 +
1073 +       buffer->priv_virt = pages;
1074 +       buffer->sg_table = table;
1075 +       buffer->cma_heap = cma_heap;
1076 +       buffer->num_pages = num_pages;
1077 +       return 0;
1078 +
1079 +free_mem:
1080 +       kfree(table);
1081 +err:
1082 +       cma_release(cma_heap, pages, num_pages);
1083 +       return -ENOMEM;
1084 +}
1085 +
1086 +void vc_sm_cma_buffer_free(struct vc_sm_cma_alloc_data *buffer)
1087 +{
1088 +       struct cma *cma_heap = buffer->cma_heap;
1089 +       struct page *pages = buffer->priv_virt;
1090 +
1091 +       /* release memory */
1092 +       if (cma_heap)
1093 +               cma_release(cma_heap, pages, buffer->num_pages);
1094 +
1095 +       /* release sg table */
1096 +       if (buffer->sg_table) {
1097 +               sg_free_table(buffer->sg_table);
1098 +               kfree(buffer->sg_table);
1099 +               buffer->sg_table = NULL;
1100 +       }
1101 +}
1102 +
1103 +int __vc_sm_cma_add_heaps(struct cma *cma, void *priv)
1104 +{
1105 +       struct cma **heap = (struct cma **)priv;
1106 +       const char *name = cma_get_name(cma);
1107 +
1108 +       if (!(*heap)) {
1109 +               phys_addr_t phys_addr = cma_get_base(cma);
1110 +
1111 +               pr_debug("%s: Adding cma heap %s (start %pap, size %lu) for use by vcsm\n",
1112 +                        __func__, name, &phys_addr, cma_get_size(cma));
1113 +               *heap = cma;
1114 +       } else {
1115 +               pr_err("%s: Ignoring heap %s as already set\n",
1116 +                      __func__, name);
1117 +       }
1118 +
1119 +       return 0;
1120 +}
1121 +
1122 +int vc_sm_cma_add_heaps(struct cma **cma_heap)
1123 +{
1124 +       cma_for_each_area(__vc_sm_cma_add_heaps, cma_heap);
1125 +       return 0;
1126 +}
1127 --- /dev/null
1128 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma.h
1129 @@ -0,0 +1,39 @@
1130 +/* SPDX-License-Identifier: GPL-2.0 */
1131 +
1132 +/*
1133 + * VideoCore Shared Memory CMA allocator
1134 + *
1135 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
1136 + *
1137 + * Based on the Android ION allocator
1138 + * Copyright (C) Linaro 2012
1139 + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
1140 + *
1141 + * This software is licensed under the terms of the GNU General Public
1142 + * License version 2, as published by the Free Software Foundation, and
1143 + * may be copied, distributed, and modified under those terms.
1144 + *
1145 + * This program is distributed in the hope that it will be useful,
1146 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1147 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1148 + * GNU General Public License for more details.
1149 + *
1150 + */
1151 +#ifndef VC_SM_CMA_H
1152 +#define VC_SM_CMA_H
1153 +
1154 +struct vc_sm_cma_alloc_data {
1155 +       struct cma *cma_heap;
1156 +       unsigned long num_pages;
1157 +       void *priv_virt;
1158 +       struct sg_table *sg_table;
1159 +};
1160 +
1161 +int vc_sm_cma_buffer_allocate(struct cma *cma_heap,
1162 +                             struct vc_sm_cma_alloc_data *buffer,
1163 +                             unsigned long len);
1164 +void vc_sm_cma_buffer_free(struct vc_sm_cma_alloc_data *buffer);
1165 +
1166 +int vc_sm_cma_add_heaps(struct cma **cma_heap);
1167 +
1168 +#endif
1169 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.c
1170 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.c
1171 @@ -500,3 +500,13 @@ int vc_sm_cma_vchi_client_version(struct
1172                                    msg, sizeof(*msg), NULL, 0,
1173                                    cur_trans_id, 0);
1174  }
1175 +
1176 +int vc_sm_vchi_client_vc_mem_req_reply(struct sm_instance *handle,
1177 +                                      struct vc_sm_vc_mem_request_result *msg,
1178 +                                      uint32_t *cur_trans_id)
1179 +{
1180 +       return vc_sm_cma_vchi_send_msg(handle,
1181 +                                      VC_SM_MSG_TYPE_VC_MEM_REQUEST_REPLY,
1182 +                                      msg, sizeof(*msg), 0, 0, cur_trans_id,
1183 +                                      0);
1184 +}
1185 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.h
1186 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.h
1187 @@ -56,4 +56,8 @@ int vc_sm_cma_vchi_client_version(struct
1188                                   struct vc_sm_result_t *result,
1189                                   u32 *cur_trans_id);
1190  
1191 +int vc_sm_vchi_client_vc_mem_req_reply(struct sm_instance *handle,
1192 +                                      struct vc_sm_vc_mem_request_result *msg,
1193 +                                      uint32_t *cur_trans_id);
1194 +
1195  #endif /* __VC_SM_CMA_VCHI_H__INCLUDED__ */
1196 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm_defs.h
1197 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_defs.h
1198 @@ -264,6 +264,8 @@ struct vc_sm_vc_mem_request {
1199         u32 align;
1200         /* resource name (for easier tracking) */
1201         char     name[VC_SM_RESOURCE_NAME];
1202 +       /* VPU handle for the resource */
1203 +       u32 vc_handle;
1204  };
1205  
1206  /* Response from the kernel to provide the VPU with some memory */