lantiq: fix broadcasts and vlans in two iface mode
[oweals/openwrt.git] / target / linux / brcm2708 / patches-4.9 / 0037-cma-Add-vc_cma-driver-to-enable-use-of-CMA.patch
1 From 4d96064d4c95d73d22563cea403eccd72e7b0da0 Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Wed, 3 Jul 2013 00:31:47 +0100
4 Subject: [PATCH] cma: Add vc_cma driver to enable use of CMA
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 Signed-off-by: popcornmix <popcornmix@gmail.com>
10
11 vc_cma: Make the vc_cma area the default contiguous DMA area
12
13 vc_cma: Provide empty functions when module is not built
14
15 Providing empty functions saves the users from guarding the
16 function call with an #if clause.
17 Move __init markings from prototypes to functions.
18
19 Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
20 ---
21  drivers/char/Kconfig                  |    2 +
22  drivers/char/Makefile                 |    1 +
23  drivers/char/broadcom/Kconfig         |   15 +
24  drivers/char/broadcom/Makefile        |    1 +
25  drivers/char/broadcom/vc_cma/Makefile |    7 +
26  drivers/char/broadcom/vc_cma/vc_cma.c | 1193 +++++++++++++++++++++++++++++++++
27  include/linux/broadcom/vc_cma.h       |   36 +
28  7 files changed, 1255 insertions(+)
29  create mode 100644 drivers/char/broadcom/Kconfig
30  create mode 100644 drivers/char/broadcom/Makefile
31  create mode 100644 drivers/char/broadcom/vc_cma/Makefile
32  create mode 100644 drivers/char/broadcom/vc_cma/vc_cma.c
33  create mode 100644 include/linux/broadcom/vc_cma.h
34
35 --- a/drivers/char/Kconfig
36 +++ b/drivers/char/Kconfig
37 @@ -4,6 +4,8 @@
38  
39  menu "Character devices"
40  
41 +source "drivers/char/broadcom/Kconfig"
42 +
43  source "drivers/tty/Kconfig"
44  
45  config DEVMEM
46 --- a/drivers/char/Makefile
47 +++ b/drivers/char/Makefile
48 @@ -60,3 +60,4 @@ js-rtc-y = rtc.o
49  obj-$(CONFIG_TILE_SROM)                += tile-srom.o
50  obj-$(CONFIG_XILLYBUS)         += xillybus/
51  obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
52 +obj-$(CONFIG_BRCM_CHAR_DRIVERS) += broadcom/
53 --- /dev/null
54 +++ b/drivers/char/broadcom/Kconfig
55 @@ -0,0 +1,15 @@
56 +#
57 +# Broadcom char driver config
58 +#
59 +
60 +menuconfig BRCM_CHAR_DRIVERS
61 +       bool "Broadcom Char Drivers"
62 +       help
63 +         Broadcom's char drivers
64 +
65 +config BCM_VC_CMA
66 +       bool "Videocore CMA"
67 +       depends on CMA && BRCM_CHAR_DRIVERS && BCM2708_VCHIQ
68 +       default n
69 +        help
70 +          Helper for videocore CMA access.
71 --- /dev/null
72 +++ b/drivers/char/broadcom/Makefile
73 @@ -0,0 +1 @@
74 +obj-$(CONFIG_BCM_VC_CMA)       += vc_cma/
75 --- /dev/null
76 +++ b/drivers/char/broadcom/vc_cma/Makefile
77 @@ -0,0 +1,7 @@
78 +ccflags-$(CONFIG_BCM_VC_CMA)  += -Wall -Wstrict-prototypes -Wno-trigraphs -Werror
79 +ccflags-$(CONFIG_BCM_VC_CMA)  += -Iinclude/linux/broadcom -Idrivers/staging/vc04_services -Idrivers/staging/vc04_services/interface/vchi -Idrivers/staging/vc04_services/interface/vchiq_arm
80 +ccflags-$(CONFIG_BCM_VC_CMA)  += -D__KERNEL__ -D__linux__
81 +
82 +obj-$(CONFIG_BCM_VC_CMA) += vc-cma.o
83 +
84 +vc-cma-objs := vc_cma.o
85 --- /dev/null
86 +++ b/drivers/char/broadcom/vc_cma/vc_cma.c
87 @@ -0,0 +1,1193 @@
88 +/**
89 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
90 + *
91 + * Redistribution and use in source and binary forms, with or without
92 + * modification, are permitted provided that the following conditions
93 + * are met:
94 + * 1. Redistributions of source code must retain the above copyright
95 + *    notice, this list of conditions, and the following disclaimer,
96 + *    without modification.
97 + * 2. Redistributions in binary form must reproduce the above copyright
98 + *    notice, this list of conditions and the following disclaimer in the
99 + *    documentation and/or other materials provided with the distribution.
100 + * 3. The names of the above-listed copyright holders may not be used
101 + *    to endorse or promote products derived from this software without
102 + *    specific prior written permission.
103 + *
104 + * ALTERNATIVELY, this software may be distributed under the terms of the
105 + * GNU General Public License ("GPL") version 2, as published by the Free
106 + * Software Foundation.
107 + *
108 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
109 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
110 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
111 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
112 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
113 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
114 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
115 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
116 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
117 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
118 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
119 + */
120 +
121 +#include <linux/kernel.h>
122 +#include <linux/module.h>
123 +#include <linux/kthread.h>
124 +#include <linux/fs.h>
125 +#include <linux/device.h>
126 +#include <linux/cdev.h>
127 +#include <linux/mm.h>
128 +#include <linux/proc_fs.h>
129 +#include <linux/seq_file.h>
130 +#include <linux/dma-mapping.h>
131 +#include <linux/dma-contiguous.h>
132 +#include <linux/platform_device.h>
133 +#include <linux/uaccess.h>
134 +#include <asm/cacheflush.h>
135 +
136 +#include "vc_cma.h"
137 +
138 +#include "vchiq_util.h"
139 +#include "vchiq_connected.h"
140 +//#include "debug_sym.h"
141 +//#include "vc_mem.h"
142 +
143 +#define DRIVER_NAME  "vc-cma"
144 +
145 +#define LOG_DBG(fmt, ...) \
146 +       if (vc_cma_debug) \
147 +               printk(KERN_INFO fmt "\n", ##__VA_ARGS__)
148 +#define LOG_INFO(fmt, ...) \
149 +       printk(KERN_INFO fmt "\n", ##__VA_ARGS__)
150 +#define LOG_ERR(fmt, ...) \
151 +       printk(KERN_ERR fmt "\n", ##__VA_ARGS__)
152 +
153 +#define VC_CMA_FOURCC VCHIQ_MAKE_FOURCC('C', 'M', 'A', ' ')
154 +#define VC_CMA_VERSION 2
155 +
156 +#define VC_CMA_CHUNK_ORDER 6   /* 256K */
157 +#define VC_CMA_CHUNK_SIZE (4096 << VC_CMA_CHUNK_ORDER)
158 +#define VC_CMA_MAX_PARAMS_PER_MSG \
159 +       ((VCHIQ_MAX_MSG_SIZE - sizeof(unsigned short))/sizeof(unsigned short))
160 +#define VC_CMA_RESERVE_COUNT_MAX 16
161 +
162 +#define PAGES_PER_CHUNK (VC_CMA_CHUNK_SIZE / PAGE_SIZE)
163 +
164 +#define VCADDR_TO_PHYSADDR(vcaddr) (mm_vc_mem_phys_addr + vcaddr)
165 +
166 +#define loud_error(...) \
167 +       LOG_ERR("===== " __VA_ARGS__)
168 +
169 +enum {
170 +       VC_CMA_MSG_QUIT,
171 +       VC_CMA_MSG_OPEN,
172 +       VC_CMA_MSG_TICK,
173 +       VC_CMA_MSG_ALLOC,       /* chunk count */
174 +       VC_CMA_MSG_FREE,        /* chunk, chunk, ... */
175 +       VC_CMA_MSG_ALLOCATED,   /* chunk, chunk, ... */
176 +       VC_CMA_MSG_REQUEST_ALLOC,       /* chunk count */
177 +       VC_CMA_MSG_REQUEST_FREE,        /* chunk count */
178 +       VC_CMA_MSG_RESERVE,     /* bytes lo, bytes hi */
179 +       VC_CMA_MSG_UPDATE_RESERVE,
180 +       VC_CMA_MSG_MAX
181 +};
182 +
183 +struct cma_msg {
184 +       unsigned short type;
185 +       unsigned short params[VC_CMA_MAX_PARAMS_PER_MSG];
186 +};
187 +
188 +struct vc_cma_reserve_user {
189 +       unsigned int pid;
190 +       unsigned int reserve;
191 +};
192 +
193 +/* Device (/dev) related variables */
194 +static dev_t vc_cma_devnum;
195 +static struct class *vc_cma_class;
196 +static struct cdev vc_cma_cdev;
197 +static int vc_cma_inited;
198 +static int vc_cma_debug;
199 +
200 +/* Proc entry */
201 +static struct proc_dir_entry *vc_cma_proc_entry;
202 +
203 +phys_addr_t vc_cma_base;
204 +struct page *vc_cma_base_page;
205 +unsigned int vc_cma_size;
206 +EXPORT_SYMBOL(vc_cma_size);
207 +unsigned int vc_cma_initial;
208 +unsigned int vc_cma_chunks;
209 +unsigned int vc_cma_chunks_used;
210 +unsigned int vc_cma_chunks_reserved;
211 +
212 +
213 +void *vc_cma_dma_alloc;
214 +unsigned int vc_cma_dma_size;
215 +
216 +static int in_loud_error;
217 +
218 +unsigned int vc_cma_reserve_total;
219 +unsigned int vc_cma_reserve_count;
220 +struct vc_cma_reserve_user vc_cma_reserve_users[VC_CMA_RESERVE_COUNT_MAX];
221 +static DEFINE_SEMAPHORE(vc_cma_reserve_mutex);
222 +static DEFINE_SEMAPHORE(vc_cma_worker_queue_push_mutex);
223 +
224 +static u64 vc_cma_dma_mask = DMA_BIT_MASK(32);
225 +static struct platform_device vc_cma_device = {
226 +       .name = "vc-cma",
227 +       .id = 0,
228 +       .dev = {
229 +               .dma_mask = &vc_cma_dma_mask,
230 +               .coherent_dma_mask = DMA_BIT_MASK(32),
231 +               },
232 +};
233 +
234 +static VCHIQ_INSTANCE_T cma_instance;
235 +static VCHIQ_SERVICE_HANDLE_T cma_service;
236 +static VCHIU_QUEUE_T cma_msg_queue;
237 +static struct task_struct *cma_worker;
238 +
239 +static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid);
240 +static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply);
241 +static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
242 +                                          VCHIQ_HEADER_T * header,
243 +                                          VCHIQ_SERVICE_HANDLE_T service,
244 +                                          void *bulk_userdata);
245 +static void send_vc_msg(unsigned short type,
246 +                       unsigned short param1, unsigned short param2);
247 +static bool send_worker_msg(VCHIQ_HEADER_T * msg);
248 +
249 +static int early_vc_cma_mem(char *p)
250 +{
251 +       unsigned int new_size;
252 +       printk(KERN_NOTICE "early_vc_cma_mem(%s)", p);
253 +       vc_cma_size = memparse(p, &p);
254 +       vc_cma_initial = vc_cma_size;
255 +       if (*p == '/')
256 +               vc_cma_size = memparse(p + 1, &p);
257 +       if (*p == '@')
258 +               vc_cma_base = memparse(p + 1, &p);
259 +
260 +       new_size = (vc_cma_size - ((-vc_cma_base) & (VC_CMA_CHUNK_SIZE - 1)))
261 +           & ~(VC_CMA_CHUNK_SIZE - 1);
262 +       if (new_size > vc_cma_size)
263 +               vc_cma_size = 0;
264 +       vc_cma_initial = (vc_cma_initial + VC_CMA_CHUNK_SIZE - 1)
265 +           & ~(VC_CMA_CHUNK_SIZE - 1);
266 +       if (vc_cma_initial > vc_cma_size)
267 +               vc_cma_initial = vc_cma_size;
268 +       vc_cma_base = (vc_cma_base + VC_CMA_CHUNK_SIZE - 1)
269 +           & ~(VC_CMA_CHUNK_SIZE - 1);
270 +
271 +       printk(KERN_NOTICE " -> initial %x, size %x, base %x", vc_cma_initial,
272 +              vc_cma_size, (unsigned int)vc_cma_base);
273 +
274 +       return 0;
275 +}
276 +
277 +early_param("vc-cma-mem", early_vc_cma_mem);
278 +
279 +void __init vc_cma_early_init(void)
280 +{
281 +       LOG_DBG("vc_cma_early_init - vc_cma_chunks = %d", vc_cma_chunks);
282 +       if (vc_cma_size) {
283 +               int rc = platform_device_register(&vc_cma_device);
284 +               LOG_DBG("platform_device_register -> %d", rc);
285 +       }
286 +}
287 +
288 +void __init vc_cma_reserve(void)
289 +{
290 +       /* if vc_cma_size is set, then declare vc CMA area of the same
291 +        * size from the end of memory
292 +        */
293 +       if (vc_cma_size) {
294 +               if (dma_declare_contiguous(&vc_cma_device.dev, vc_cma_size,
295 +                                          vc_cma_base, 0) == 0) {
296 +                       if (!dev_get_cma_area(NULL)) {
297 +                               /* There is no default CMA area - make this
298 +                                  the default */
299 +                               struct cma *vc_cma_area = dev_get_cma_area(
300 +                                       &vc_cma_device.dev);
301 +                               dma_contiguous_set_default(vc_cma_area);
302 +                               LOG_INFO("vc_cma_reserve - using vc_cma as "
303 +                                        "the default contiguous DMA area");
304 +                       }
305 +               } else {
306 +                       LOG_ERR("vc_cma: dma_declare_contiguous(%x,%x) failed",
307 +                               vc_cma_size, (unsigned int)vc_cma_base);
308 +                       vc_cma_size = 0;
309 +               }
310 +       }
311 +       vc_cma_chunks = vc_cma_size / VC_CMA_CHUNK_SIZE;
312 +}
313 +
314 +/****************************************************************************
315 +*
316 +*   vc_cma_open
317 +*
318 +***************************************************************************/
319 +
320 +static int vc_cma_open(struct inode *inode, struct file *file)
321 +{
322 +       (void)inode;
323 +       (void)file;
324 +
325 +       return 0;
326 +}
327 +
328 +/****************************************************************************
329 +*
330 +*   vc_cma_release
331 +*
332 +***************************************************************************/
333 +
334 +static int vc_cma_release(struct inode *inode, struct file *file)
335 +{
336 +       (void)inode;
337 +       (void)file;
338 +
339 +       vc_cma_set_reserve(0, current->tgid);
340 +
341 +       return 0;
342 +}
343 +
344 +/****************************************************************************
345 +*
346 +*   vc_cma_ioctl
347 +*
348 +***************************************************************************/
349 +
350 +static long vc_cma_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
351 +{
352 +       int rc = 0;
353 +
354 +       (void)cmd;
355 +       (void)arg;
356 +
357 +       switch (cmd) {
358 +       case VC_CMA_IOC_RESERVE:
359 +               rc = vc_cma_set_reserve((unsigned int)arg, current->tgid);
360 +               if (rc >= 0)
361 +                       rc = 0;
362 +               break;
363 +       default:
364 +               LOG_ERR("vc-cma: Unknown ioctl %x", cmd);
365 +               return -ENOTTY;
366 +       }
367 +
368 +       return rc;
369 +}
370 +
371 +/****************************************************************************
372 +*
373 +*   File Operations for the driver.
374 +*
375 +***************************************************************************/
376 +
377 +static const struct file_operations vc_cma_fops = {
378 +       .owner = THIS_MODULE,
379 +       .open = vc_cma_open,
380 +       .release = vc_cma_release,
381 +       .unlocked_ioctl = vc_cma_ioctl,
382 +};
383 +
384 +/****************************************************************************
385 +*
386 +*   vc_cma_proc_open
387 +*
388 +***************************************************************************/
389 +
390 +static int vc_cma_show_info(struct seq_file *m, void *v)
391 +{
392 +       int i;
393 +
394 +       seq_printf(m, "Videocore CMA:\n");
395 +       seq_printf(m, "   Base       : %08x\n", (unsigned int)vc_cma_base);
396 +       seq_printf(m, "   Length     : %08x\n", vc_cma_size);
397 +       seq_printf(m, "   Initial    : %08x\n", vc_cma_initial);
398 +       seq_printf(m, "   Chunk size : %08x\n", VC_CMA_CHUNK_SIZE);
399 +       seq_printf(m, "   Chunks     : %4d (%d bytes)\n",
400 +                  (int)vc_cma_chunks,
401 +                  (int)(vc_cma_chunks * VC_CMA_CHUNK_SIZE));
402 +       seq_printf(m, "   Used       : %4d (%d bytes)\n",
403 +                  (int)vc_cma_chunks_used,
404 +                  (int)(vc_cma_chunks_used * VC_CMA_CHUNK_SIZE));
405 +       seq_printf(m, "   Reserved   : %4d (%d bytes)\n",
406 +                  (unsigned int)vc_cma_chunks_reserved,
407 +                  (int)(vc_cma_chunks_reserved * VC_CMA_CHUNK_SIZE));
408 +
409 +       for (i = 0; i < vc_cma_reserve_count; i++) {
410 +               struct vc_cma_reserve_user *user = &vc_cma_reserve_users[i];
411 +               seq_printf(m, "     PID %5d: %d bytes\n", user->pid,
412 +                          user->reserve);
413 +       }
414 +       seq_printf(m, "   dma_alloc  : %p (%d pages)\n",
415 +                  vc_cma_dma_alloc ? page_address(vc_cma_dma_alloc) : 0,
416 +                  vc_cma_dma_size);
417 +
418 +       seq_printf(m, "\n");
419 +
420 +       return 0;
421 +}
422 +
423 +static int vc_cma_proc_open(struct inode *inode, struct file *file)
424 +{
425 +       return single_open(file, vc_cma_show_info, NULL);
426 +}
427 +
428 +/****************************************************************************
429 +*
430 +*   vc_cma_proc_write
431 +*
432 +***************************************************************************/
433 +
434 +static int vc_cma_proc_write(struct file *file,
435 +                            const char __user *buffer,
436 +                            size_t size, loff_t *ppos)
437 +{
438 +       int rc = -EFAULT;
439 +       char input_str[20];
440 +
441 +       memset(input_str, 0, sizeof(input_str));
442 +
443 +       if (size > sizeof(input_str)) {
444 +               LOG_ERR("%s: input string length too long", __func__);
445 +               goto out;
446 +       }
447 +
448 +       if (copy_from_user(input_str, buffer, size - 1)) {
449 +               LOG_ERR("%s: failed to get input string", __func__);
450 +               goto out;
451 +       }
452 +#define ALLOC_STR "alloc"
453 +#define FREE_STR "free"
454 +#define DEBUG_STR "debug"
455 +#define RESERVE_STR "reserve"
456 +#define DMA_ALLOC_STR "dma_alloc"
457 +#define DMA_FREE_STR "dma_free"
458 +       if (strncmp(input_str, ALLOC_STR, strlen(ALLOC_STR)) == 0) {
459 +               int alloc_size;
460 +               char *p = input_str + strlen(ALLOC_STR);
461 +
462 +               while (*p == ' ')
463 +                       p++;
464 +               alloc_size = memparse(p, NULL);
465 +               LOG_INFO("/proc/vc-cma: alloc %d", alloc_size);
466 +               if (alloc_size)
467 +                       send_vc_msg(VC_CMA_MSG_REQUEST_FREE,
468 +                                   alloc_size / VC_CMA_CHUNK_SIZE, 0);
469 +               else
470 +                       LOG_ERR("invalid size '%s'", p);
471 +               rc = size;
472 +       } else if (strncmp(input_str, FREE_STR, strlen(FREE_STR)) == 0) {
473 +               int alloc_size;
474 +               char *p = input_str + strlen(FREE_STR);
475 +
476 +               while (*p == ' ')
477 +                       p++;
478 +               alloc_size = memparse(p, NULL);
479 +               LOG_INFO("/proc/vc-cma: free %d", alloc_size);
480 +               if (alloc_size)
481 +                       send_vc_msg(VC_CMA_MSG_REQUEST_ALLOC,
482 +                                   alloc_size / VC_CMA_CHUNK_SIZE, 0);
483 +               else
484 +                       LOG_ERR("invalid size '%s'", p);
485 +               rc = size;
486 +       } else if (strncmp(input_str, DEBUG_STR, strlen(DEBUG_STR)) == 0) {
487 +               char *p = input_str + strlen(DEBUG_STR);
488 +               while (*p == ' ')
489 +                       p++;
490 +               if ((strcmp(p, "on") == 0) || (strcmp(p, "1") == 0))
491 +                       vc_cma_debug = 1;
492 +               else if ((strcmp(p, "off") == 0) || (strcmp(p, "0") == 0))
493 +                       vc_cma_debug = 0;
494 +               LOG_INFO("/proc/vc-cma: debug %s", vc_cma_debug ? "on" : "off");
495 +               rc = size;
496 +       } else if (strncmp(input_str, RESERVE_STR, strlen(RESERVE_STR)) == 0) {
497 +               int alloc_size;
498 +               int reserved;
499 +               char *p = input_str + strlen(RESERVE_STR);
500 +               while (*p == ' ')
501 +                       p++;
502 +               alloc_size = memparse(p, NULL);
503 +
504 +               reserved = vc_cma_set_reserve(alloc_size, current->tgid);
505 +               rc = (reserved >= 0) ? size : reserved;
506 +       } else if (strncmp(input_str, DMA_ALLOC_STR, strlen(DMA_ALLOC_STR)) == 0) {
507 +               int alloc_size;
508 +               char *p = input_str + strlen(DMA_ALLOC_STR);
509 +               while (*p == ' ')
510 +                       p++;
511 +               alloc_size = memparse(p, NULL);
512 +
513 +               if (vc_cma_dma_alloc) {
514 +                   dma_release_from_contiguous(NULL, vc_cma_dma_alloc,
515 +                                               vc_cma_dma_size);
516 +                   vc_cma_dma_alloc = NULL;
517 +                   vc_cma_dma_size = 0;
518 +               }
519 +               vc_cma_dma_alloc = dma_alloc_from_contiguous(NULL, alloc_size, 0);
520 +               vc_cma_dma_size = (vc_cma_dma_alloc ? alloc_size : 0);
521 +               if (vc_cma_dma_alloc)
522 +                       LOG_INFO("dma_alloc(%d pages) -> %p", alloc_size, page_address(vc_cma_dma_alloc));
523 +               else
524 +                       LOG_ERR("dma_alloc(%d pages) failed", alloc_size);
525 +               rc = size;
526 +       } else if (strncmp(input_str, DMA_FREE_STR, strlen(DMA_FREE_STR)) == 0) {
527 +               if (vc_cma_dma_alloc) {
528 +                   dma_release_from_contiguous(NULL, vc_cma_dma_alloc,
529 +                                               vc_cma_dma_size);
530 +                   vc_cma_dma_alloc = NULL;
531 +                   vc_cma_dma_size = 0;
532 +               }
533 +               rc = size;
534 +       }
535 +
536 +out:
537 +       return rc;
538 +}
539 +
540 +/****************************************************************************
541 +*
542 +*   File Operations for /proc interface.
543 +*
544 +***************************************************************************/
545 +
546 +static const struct file_operations vc_cma_proc_fops = {
547 +       .open = vc_cma_proc_open,
548 +       .read = seq_read,
549 +       .write = vc_cma_proc_write,
550 +       .llseek = seq_lseek,
551 +       .release = single_release
552 +};
553 +
554 +static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid)
555 +{
556 +       struct vc_cma_reserve_user *user = NULL;
557 +       int delta = 0;
558 +       int i;
559 +
560 +       if (down_interruptible(&vc_cma_reserve_mutex))
561 +               return -ERESTARTSYS;
562 +
563 +       for (i = 0; i < vc_cma_reserve_count; i++) {
564 +               if (pid == vc_cma_reserve_users[i].pid) {
565 +                       user = &vc_cma_reserve_users[i];
566 +                       delta = reserve - user->reserve;
567 +                       if (reserve)
568 +                               user->reserve = reserve;
569 +                       else {
570 +                               /* Remove this entry by copying downwards */
571 +                               while ((i + 1) < vc_cma_reserve_count) {
572 +                                       user[0].pid = user[1].pid;
573 +                                       user[0].reserve = user[1].reserve;
574 +                                       user++;
575 +                                       i++;
576 +                               }
577 +                               vc_cma_reserve_count--;
578 +                               user = NULL;
579 +                       }
580 +                       break;
581 +               }
582 +       }
583 +
584 +       if (reserve && !user) {
585 +               if (vc_cma_reserve_count == VC_CMA_RESERVE_COUNT_MAX) {
586 +                       LOG_ERR("vc-cma: Too many reservations - "
587 +                               "increase CMA_RESERVE_COUNT_MAX");
588 +                       up(&vc_cma_reserve_mutex);
589 +                       return -EBUSY;
590 +               }
591 +               user = &vc_cma_reserve_users[vc_cma_reserve_count];
592 +               user->pid = pid;
593 +               user->reserve = reserve;
594 +               delta = reserve;
595 +               vc_cma_reserve_count++;
596 +       }
597 +
598 +       vc_cma_reserve_total += delta;
599 +
600 +       send_vc_msg(VC_CMA_MSG_RESERVE,
601 +                   vc_cma_reserve_total & 0xffff, vc_cma_reserve_total >> 16);
602 +
603 +       send_worker_msg((VCHIQ_HEADER_T *) VC_CMA_MSG_UPDATE_RESERVE);
604 +
605 +       LOG_DBG("/proc/vc-cma: reserve %d (PID %d) - total %u",
606 +               reserve, pid, vc_cma_reserve_total);
607 +
608 +       up(&vc_cma_reserve_mutex);
609 +
610 +       return vc_cma_reserve_total;
611 +}
612 +
613 +static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
614 +                                          VCHIQ_HEADER_T * header,
615 +                                          VCHIQ_SERVICE_HANDLE_T service,
616 +                                          void *bulk_userdata)
617 +{
618 +       switch (reason) {
619 +       case VCHIQ_MESSAGE_AVAILABLE:
620 +               if (!send_worker_msg(header))
621 +                       return VCHIQ_RETRY;
622 +               break;
623 +       case VCHIQ_SERVICE_CLOSED:
624 +               LOG_DBG("CMA service closed");
625 +               break;
626 +       default:
627 +               LOG_ERR("Unexpected CMA callback reason %d", reason);
628 +               break;
629 +       }
630 +       return VCHIQ_SUCCESS;
631 +}
632 +
633 +static void send_vc_msg(unsigned short type,
634 +                       unsigned short param1, unsigned short param2)
635 +{
636 +       unsigned short msg[] = { type, param1, param2 };
637 +       VCHIQ_ELEMENT_T elem = { &msg, sizeof(msg) };
638 +       VCHIQ_STATUS_T ret;
639 +       vchiq_use_service(cma_service);
640 +       ret = vchiq_queue_message(cma_service, &elem, 1);
641 +       vchiq_release_service(cma_service);
642 +       if (ret != VCHIQ_SUCCESS)
643 +               LOG_ERR("vchiq_queue_message returned %x", ret);
644 +}
645 +
646 +static bool send_worker_msg(VCHIQ_HEADER_T * msg)
647 +{
648 +       if (down_interruptible(&vc_cma_worker_queue_push_mutex))
649 +               return false;
650 +       vchiu_queue_push(&cma_msg_queue, msg);
651 +       up(&vc_cma_worker_queue_push_mutex);
652 +       return true;
653 +}
654 +
655 +static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply)
656 +{
657 +       int i;
658 +       for (i = 0; i < num_chunks; i++) {
659 +               struct page *chunk;
660 +               unsigned int chunk_num;
661 +               uint8_t *chunk_addr;
662 +               size_t chunk_size = PAGES_PER_CHUNK << PAGE_SHIFT;
663 +
664 +               chunk = dma_alloc_from_contiguous(&vc_cma_device.dev,
665 +                                                 PAGES_PER_CHUNK,
666 +                                                 VC_CMA_CHUNK_ORDER);
667 +               if (!chunk)
668 +                       break;
669 +
670 +               chunk_addr = page_address(chunk);
671 +               dmac_flush_range(chunk_addr, chunk_addr + chunk_size);
672 +               outer_inv_range(__pa(chunk_addr), __pa(chunk_addr) +
673 +                       chunk_size);
674 +
675 +               chunk_num =
676 +                   (page_to_phys(chunk) - vc_cma_base) / VC_CMA_CHUNK_SIZE;
677 +               BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
678 +                       VC_CMA_CHUNK_SIZE) != 0);
679 +               if (chunk_num >= vc_cma_chunks) {
680 +                       phys_addr_t _pa = vc_cma_base + vc_cma_size - 1;
681 +                       LOG_ERR("%s: ===============================",
682 +                               __func__);
683 +                       LOG_ERR("%s: chunk phys %x, vc_cma %pa-%pa - "
684 +                               "bad SPARSEMEM configuration?",
685 +                               __func__, (unsigned int)page_to_phys(chunk),
686 +                               &vc_cma_base, &_pa);
687 +                       LOG_ERR("%s: dev->cma_area = %p", __func__,
688 +                               (void*)0/*vc_cma_device.dev.cma_area*/);
689 +                       LOG_ERR("%s: ===============================",
690 +                               __func__);
691 +                       break;
692 +               }
693 +               reply->params[i] = chunk_num;
694 +               vc_cma_chunks_used++;
695 +       }
696 +
697 +       if (i < num_chunks) {
698 +               LOG_ERR("%s: dma_alloc_from_contiguous failed "
699 +                       "for %x bytes (alloc %d of %d, %d free)",
700 +                       __func__, VC_CMA_CHUNK_SIZE, i,
701 +                       num_chunks, vc_cma_chunks - vc_cma_chunks_used);
702 +               num_chunks = i;
703 +       }
704 +
705 +       LOG_DBG("CMA allocated %d chunks -> %d used",
706 +               num_chunks, vc_cma_chunks_used);
707 +       reply->type = VC_CMA_MSG_ALLOCATED;
708 +
709 +       {
710 +               VCHIQ_ELEMENT_T elem = {
711 +                       reply,
712 +                       offsetof(struct cma_msg, params[0]) +
713 +                           num_chunks * sizeof(reply->params[0])
714 +               };
715 +               VCHIQ_STATUS_T ret;
716 +               vchiq_use_service(cma_service);
717 +               ret = vchiq_queue_message(cma_service, &elem, 1);
718 +               vchiq_release_service(cma_service);
719 +               if (ret != VCHIQ_SUCCESS)
720 +                       LOG_ERR("vchiq_queue_message return " "%x", ret);
721 +       }
722 +
723 +       return num_chunks;
724 +}
725 +
726 +static int cma_worker_proc(void *param)
727 +{
728 +       static struct cma_msg reply;
729 +       (void)param;
730 +
731 +       while (1) {
732 +               VCHIQ_HEADER_T *msg;
733 +               static struct cma_msg msg_copy;
734 +               struct cma_msg *cma_msg = &msg_copy;
735 +               int type, msg_size;
736 +
737 +               msg = vchiu_queue_pop(&cma_msg_queue);
738 +               if ((unsigned int)msg >= VC_CMA_MSG_MAX) {
739 +                       msg_size = msg->size;
740 +                       memcpy(&msg_copy, msg->data, msg_size);
741 +                       type = cma_msg->type;
742 +                       vchiq_release_message(cma_service, msg);
743 +               } else {
744 +                       msg_size = 0;
745 +                       type = (int)msg;
746 +                       if (type == VC_CMA_MSG_QUIT)
747 +                               break;
748 +                       else if (type == VC_CMA_MSG_UPDATE_RESERVE) {
749 +                               msg = NULL;
750 +                               cma_msg = NULL;
751 +                       } else {
752 +                               BUG();
753 +                               continue;
754 +                       }
755 +               }
756 +
757 +               switch (type) {
758 +               case VC_CMA_MSG_ALLOC:{
759 +                               int num_chunks, free_chunks;
760 +                               num_chunks = cma_msg->params[0];
761 +                               free_chunks =
762 +                                   vc_cma_chunks - vc_cma_chunks_used;
763 +                               LOG_DBG("CMA_MSG_ALLOC(%d chunks)", num_chunks);
764 +                               if (num_chunks > VC_CMA_MAX_PARAMS_PER_MSG) {
765 +                                       LOG_ERR
766 +                                           ("CMA_MSG_ALLOC - chunk count (%d) "
767 +                                            "exceeds VC_CMA_MAX_PARAMS_PER_MSG (%d)",
768 +                                            num_chunks,
769 +                                            VC_CMA_MAX_PARAMS_PER_MSG);
770 +                                       num_chunks = VC_CMA_MAX_PARAMS_PER_MSG;
771 +                               }
772 +
773 +                               if (num_chunks > free_chunks) {
774 +                                       LOG_ERR
775 +                                           ("CMA_MSG_ALLOC - chunk count (%d) "
776 +                                            "exceeds free chunks (%d)",
777 +                                            num_chunks, free_chunks);
778 +                                       num_chunks = free_chunks;
779 +                               }
780 +
781 +                               vc_cma_alloc_chunks(num_chunks, &reply);
782 +                       }
783 +                       break;
784 +
785 +               case VC_CMA_MSG_FREE:{
786 +                               int chunk_count =
787 +                                   (msg_size -
788 +                                    offsetof(struct cma_msg,
789 +                                             params)) /
790 +                                   sizeof(cma_msg->params[0]);
791 +                               int i;
792 +                               BUG_ON(chunk_count <= 0);
793 +
794 +                               LOG_DBG("CMA_MSG_FREE(%d chunks - %x, ...)",
795 +                                       chunk_count, cma_msg->params[0]);
796 +                               for (i = 0; i < chunk_count; i++) {
797 +                                       int chunk_num = cma_msg->params[i];
798 +                                       struct page *page = vc_cma_base_page +
799 +                                           chunk_num * PAGES_PER_CHUNK;
800 +                                       if (chunk_num >= vc_cma_chunks) {
801 +                                               LOG_ERR
802 +                                                   ("CMA_MSG_FREE - chunk %d of %d"
803 +                                                    " (value %x) exceeds maximum "
804 +                                                    "(%x)", i, chunk_count,
805 +                                                    chunk_num,
806 +                                                    vc_cma_chunks - 1);
807 +                                               break;
808 +                                       }
809 +
810 +                                       if (!dma_release_from_contiguous
811 +                                           (&vc_cma_device.dev, page,
812 +                                            PAGES_PER_CHUNK)) {
813 +                                               phys_addr_t _pa = page_to_phys(page);
814 +                                               LOG_ERR
815 +                                                   ("CMA_MSG_FREE - failed to "
816 +                                                    "release chunk %d (phys %pa, "
817 +                                                    "page %x)", chunk_num,
818 +                                                    &_pa,
819 +                                                    (unsigned int)page);
820 +                                       }
821 +                                       vc_cma_chunks_used--;
822 +                               }
823 +                               LOG_DBG("CMA released %d chunks -> %d used",
824 +                                       i, vc_cma_chunks_used);
825 +                       }
826 +                       break;
827 +
828 +               case VC_CMA_MSG_UPDATE_RESERVE:{
829 +                               int chunks_needed =
830 +                                   ((vc_cma_reserve_total + VC_CMA_CHUNK_SIZE -
831 +                                     1)
832 +                                    / VC_CMA_CHUNK_SIZE) -
833 +                                   vc_cma_chunks_reserved;
834 +
835 +                               LOG_DBG
836 +                                   ("CMA_MSG_UPDATE_RESERVE(%d chunks needed)",
837 +                                    chunks_needed);
838 +
839 +                               /* Cap the reservations to what is available */
840 +                               if (chunks_needed > 0) {
841 +                                       if (chunks_needed >
842 +                                           (vc_cma_chunks -
843 +                                            vc_cma_chunks_used))
844 +                                               chunks_needed =
845 +                                                   (vc_cma_chunks -
846 +                                                    vc_cma_chunks_used);
847 +
848 +                                       chunks_needed =
849 +                                           vc_cma_alloc_chunks(chunks_needed,
850 +                                                               &reply);
851 +                               }
852 +
853 +                               LOG_DBG
854 +                                   ("CMA_MSG_UPDATE_RESERVE(%d chunks allocated)",
855 +                                    chunks_needed);
856 +                               vc_cma_chunks_reserved += chunks_needed;
857 +                       }
858 +                       break;
859 +
860 +               default:
861 +                       LOG_ERR("unexpected msg type %d", type);
862 +                       break;
863 +               }
864 +       }
865 +
866 +       LOG_DBG("quitting...");
867 +       return 0;
868 +}
869 +
870 +/****************************************************************************
871 +*
872 +*   vc_cma_connected_init
873 +*
874 +*   This function is called once the videocore has been connected.
875 +*
876 +***************************************************************************/
877 +
878 +static void vc_cma_connected_init(void)
879 +{
880 +       VCHIQ_SERVICE_PARAMS_T service_params;
881 +
882 +       LOG_DBG("vc_cma_connected_init");
883 +
884 +       if (!vchiu_queue_init(&cma_msg_queue, 16)) {
885 +               LOG_ERR("could not create CMA msg queue");
886 +               goto fail_queue;
887 +       }
888 +
889 +       if (vchiq_initialise(&cma_instance) != VCHIQ_SUCCESS)
890 +               goto fail_vchiq_init;
891 +
892 +       vchiq_connect(cma_instance);
893 +
894 +       service_params.fourcc = VC_CMA_FOURCC;
895 +       service_params.callback = cma_service_callback;
896 +       service_params.userdata = NULL;
897 +       service_params.version = VC_CMA_VERSION;
898 +       service_params.version_min = VC_CMA_VERSION;
899 +
900 +       if (vchiq_open_service(cma_instance, &service_params,
901 +                              &cma_service) != VCHIQ_SUCCESS) {
902 +               LOG_ERR("failed to open service - already in use?");
903 +               goto fail_vchiq_open;
904 +       }
905 +
906 +       vchiq_release_service(cma_service);
907 +
908 +       cma_worker = kthread_create(cma_worker_proc, NULL, "cma_worker");
909 +       if (!cma_worker) {
910 +               LOG_ERR("could not create CMA worker thread");
911 +               goto fail_worker;
912 +       }
913 +       set_user_nice(cma_worker, -20);
914 +       wake_up_process(cma_worker);
915 +
916 +       return;
917 +
918 +fail_worker:
919 +       vchiq_close_service(cma_service);
920 +fail_vchiq_open:
921 +       vchiq_shutdown(cma_instance);
922 +fail_vchiq_init:
923 +       vchiu_queue_delete(&cma_msg_queue);
924 +fail_queue:
925 +       return;
926 +}
927 +
928 +void
929 +loud_error_header(void)
930 +{
931 +       if (in_loud_error)
932 +               return;
933 +
934 +       LOG_ERR("============================================================"
935 +               "================");
936 +       LOG_ERR("============================================================"
937 +               "================");
938 +       LOG_ERR("=====");
939 +
940 +       in_loud_error = 1;
941 +}
942 +
943 +void
944 +loud_error_footer(void)
945 +{
946 +       if (!in_loud_error)
947 +               return;
948 +
949 +       LOG_ERR("=====");
950 +       LOG_ERR("============================================================"
951 +               "================");
952 +       LOG_ERR("============================================================"
953 +               "================");
954 +
955 +       in_loud_error = 0;
956 +}
957 +
958 +#if 1
959 +static int check_cma_config(void) { return 1; }
960 +#else
961 +static int
962 +read_vc_debug_var(VC_MEM_ACCESS_HANDLE_T handle,
963 +       const char *symbol,
964 +       void *buf, size_t bufsize)
965 +{
966 +       VC_MEM_ADDR_T vcMemAddr;
967 +       size_t vcMemSize;
968 +       uint8_t *mapAddr;
969 +       off_t  vcMapAddr;
970 +
971 +       if (!LookupVideoCoreSymbol(handle, symbol,
972 +               &vcMemAddr,
973 +               &vcMemSize)) {
974 +               loud_error_header();
975 +               loud_error(
976 +                       "failed to find VC symbol \"%s\".",
977 +                       symbol);
978 +               loud_error_footer();
979 +               return 0;
980 +       }
981 +
982 +       if (vcMemSize != bufsize) {
983 +               loud_error_header();
984 +               loud_error(
985 +                       "VC symbol \"%s\" is the wrong size.",
986 +                       symbol);
987 +               loud_error_footer();
988 +               return 0;
989 +       }
990 +
991 +       vcMapAddr = (off_t)vcMemAddr & VC_MEM_TO_ARM_ADDR_MASK;
992 +       vcMapAddr += mm_vc_mem_phys_addr;
993 +       mapAddr = ioremap_nocache(vcMapAddr, vcMemSize);
994 +       if (mapAddr == 0) {
995 +               loud_error_header();
996 +               loud_error(
997 +                       "failed to ioremap \"%s\" @ 0x%x "
998 +                       "(phys: 0x%x, size: %u).",
999 +                       symbol,
1000 +                       (unsigned int)vcMapAddr,
1001 +                       (unsigned int)vcMemAddr,
1002 +                       (unsigned int)vcMemSize);
1003 +               loud_error_footer();
1004 +               return 0;
1005 +       }
1006 +
1007 +       memcpy(buf, mapAddr, bufsize);
1008 +       iounmap(mapAddr);
1009 +
1010 +       return 1;
1011 +}
1012 +
1013 +
1014 +static int
1015 +check_cma_config(void)
1016 +{
1017 +       VC_MEM_ACCESS_HANDLE_T mem_hndl;
1018 +       VC_MEM_ADDR_T mempool_start;
1019 +       VC_MEM_ADDR_T mempool_end;
1020 +       VC_MEM_ADDR_T mempool_offline_start;
1021 +       VC_MEM_ADDR_T mempool_offline_end;
1022 +       VC_MEM_ADDR_T cam_alloc_base;
1023 +       VC_MEM_ADDR_T cam_alloc_size;
1024 +       VC_MEM_ADDR_T cam_alloc_end;
1025 +       int success = 0;
1026 +
1027 +       if (OpenVideoCoreMemory(&mem_hndl) != 0)
1028 +               goto out;
1029 +
1030 +       /* Read the relevant VideoCore variables */
1031 +       if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_START",
1032 +               &mempool_start,
1033 +               sizeof(mempool_start)))
1034 +               goto close;
1035 +
1036 +       if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_END",
1037 +               &mempool_end,
1038 +               sizeof(mempool_end)))
1039 +               goto close;
1040 +
1041 +       if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_START",
1042 +               &mempool_offline_start,
1043 +               sizeof(mempool_offline_start)))
1044 +               goto close;
1045 +
1046 +       if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_END",
1047 +               &mempool_offline_end,
1048 +               sizeof(mempool_offline_end)))
1049 +               goto close;
1050 +
1051 +       if (!read_vc_debug_var(mem_hndl, "cam_alloc_base",
1052 +               &cam_alloc_base,
1053 +               sizeof(cam_alloc_base)))
1054 +               goto close;
1055 +
1056 +       if (!read_vc_debug_var(mem_hndl, "cam_alloc_size",
1057 +               &cam_alloc_size,
1058 +               sizeof(cam_alloc_size)))
1059 +               goto close;
1060 +
1061 +       cam_alloc_end = cam_alloc_base + cam_alloc_size;
1062 +
1063 +       success = 1;
1064 +
1065 +       /* Now the sanity checks */
1066 +       if (!mempool_offline_start)
1067 +               mempool_offline_start = mempool_start;
1068 +       if (!mempool_offline_end)
1069 +               mempool_offline_end = mempool_end;
1070 +
1071 +       if (VCADDR_TO_PHYSADDR(mempool_offline_start) != vc_cma_base) {
1072 +               loud_error_header();
1073 +               loud_error(
1074 +                       "__MEMPOOL_OFFLINE_START(%x -> %lx) doesn't match "
1075 +                       "vc_cma_base(%x)",
1076 +                       mempool_offline_start,
1077 +                       VCADDR_TO_PHYSADDR(mempool_offline_start),
1078 +                       vc_cma_base);
1079 +               success = 0;
1080 +       }
1081 +
1082 +       if (VCADDR_TO_PHYSADDR(mempool_offline_end) !=
1083 +               (vc_cma_base + vc_cma_size)) {
1084 +               loud_error_header();
1085 +               loud_error(
1086 +                       "__MEMPOOL_OFFLINE_END(%x -> %lx) doesn't match "
1087 +                       "vc_cma_base(%x) + vc_cma_size(%x) = %x",
1088 +                       mempool_offline_start,
1089 +                       VCADDR_TO_PHYSADDR(mempool_offline_end),
1090 +                       vc_cma_base, vc_cma_size, vc_cma_base + vc_cma_size);
1091 +               success = 0;
1092 +       }
1093 +
1094 +       if (mempool_end < mempool_start) {
1095 +               loud_error_header();
1096 +               loud_error(
1097 +                       "__MEMPOOL_END(%x) must not be before "
1098 +                       "__MEMPOOL_START(%x)",
1099 +                       mempool_end,
1100 +                       mempool_start);
1101 +               success = 0;
1102 +       }
1103 +
1104 +       if (mempool_offline_end < mempool_offline_start) {
1105 +               loud_error_header();
1106 +               loud_error(
1107 +                       "__MEMPOOL_OFFLINE_END(%x) must not be before "
1108 +                       "__MEMPOOL_OFFLINE_START(%x)",
1109 +                       mempool_offline_end,
1110 +                       mempool_offline_start);
1111 +               success = 0;
1112 +       }
1113 +
1114 +       if (mempool_offline_start < mempool_start) {
1115 +               loud_error_header();
1116 +               loud_error(
1117 +                       "__MEMPOOL_OFFLINE_START(%x) must not be before "
1118 +                       "__MEMPOOL_START(%x)",
1119 +                       mempool_offline_start,
1120 +                       mempool_start);
1121 +               success = 0;
1122 +       }
1123 +
1124 +       if (mempool_offline_end > mempool_end) {
1125 +               loud_error_header();
1126 +               loud_error(
1127 +                       "__MEMPOOL_OFFLINE_END(%x) must not be after "
1128 +                       "__MEMPOOL_END(%x)",
1129 +                       mempool_offline_end,
1130 +                       mempool_end);
1131 +               success = 0;
1132 +       }
1133 +
1134 +       if ((cam_alloc_base < mempool_end) &&
1135 +               (cam_alloc_end > mempool_start)) {
1136 +               loud_error_header();
1137 +               loud_error(
1138 +                       "cam_alloc pool(%x-%x) overlaps "
1139 +                       "mempool(%x-%x)",
1140 +                       cam_alloc_base, cam_alloc_end,
1141 +                       mempool_start, mempool_end);
1142 +               success = 0;
1143 +       }
1144 +
1145 +       loud_error_footer();
1146 +
1147 +close:
1148 +       CloseVideoCoreMemory(mem_hndl);
1149 +
1150 +out:
1151 +       return success;
1152 +}
1153 +#endif
1154 +
1155 +static int vc_cma_init(void)
1156 +{
1157 +       int rc = -EFAULT;
1158 +       struct device *dev;
1159 +
1160 +       if (!check_cma_config())
1161 +               goto out_release;
1162 +
1163 +       LOG_INFO("vc-cma: Videocore CMA driver");
1164 +       LOG_INFO("vc-cma: vc_cma_base      = %pa", &vc_cma_base);
1165 +       LOG_INFO("vc-cma: vc_cma_size      = 0x%08x (%u MiB)",
1166 +                vc_cma_size, vc_cma_size / (1024 * 1024));
1167 +       LOG_INFO("vc-cma: vc_cma_initial   = 0x%08x (%u MiB)",
1168 +                vc_cma_initial, vc_cma_initial / (1024 * 1024));
1169 +
1170 +       vc_cma_base_page = phys_to_page(vc_cma_base);
1171 +
1172 +       if (vc_cma_chunks) {
1173 +               int chunks_needed = vc_cma_initial / VC_CMA_CHUNK_SIZE;
1174 +
1175 +               for (vc_cma_chunks_used = 0;
1176 +                    vc_cma_chunks_used < chunks_needed; vc_cma_chunks_used++) {
1177 +                       struct page *chunk;
1178 +                       chunk = dma_alloc_from_contiguous(&vc_cma_device.dev,
1179 +                                                         PAGES_PER_CHUNK,
1180 +                                                         VC_CMA_CHUNK_ORDER);
1181 +                       if (!chunk)
1182 +                               break;
1183 +                       BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
1184 +                               VC_CMA_CHUNK_SIZE) != 0);
1185 +               }
1186 +               if (vc_cma_chunks_used != chunks_needed) {
1187 +                       LOG_ERR("%s: dma_alloc_from_contiguous failed (%d "
1188 +                               "bytes, allocation %d of %d)",
1189 +                               __func__, VC_CMA_CHUNK_SIZE,
1190 +                               vc_cma_chunks_used, chunks_needed);
1191 +                       goto out_release;
1192 +               }
1193 +
1194 +               vchiq_add_connected_callback(vc_cma_connected_init);
1195 +       }
1196 +
1197 +       rc = alloc_chrdev_region(&vc_cma_devnum, 0, 1, DRIVER_NAME);
1198 +       if (rc < 0) {
1199 +               LOG_ERR("%s: alloc_chrdev_region failed (rc=%d)", __func__, rc);
1200 +               goto out_release;
1201 +       }
1202 +
1203 +       cdev_init(&vc_cma_cdev, &vc_cma_fops);
1204 +       rc = cdev_add(&vc_cma_cdev, vc_cma_devnum, 1);
1205 +       if (rc != 0) {
1206 +               LOG_ERR("%s: cdev_add failed (rc=%d)", __func__, rc);
1207 +               goto out_unregister;
1208 +       }
1209 +
1210 +       vc_cma_class = class_create(THIS_MODULE, DRIVER_NAME);
1211 +       if (IS_ERR(vc_cma_class)) {
1212 +               rc = PTR_ERR(vc_cma_class);
1213 +               LOG_ERR("%s: class_create failed (rc=%d)", __func__, rc);
1214 +               goto out_cdev_del;
1215 +       }
1216 +
1217 +       dev = device_create(vc_cma_class, NULL, vc_cma_devnum, NULL,
1218 +                           DRIVER_NAME);
1219 +       if (IS_ERR(dev)) {
1220 +               rc = PTR_ERR(dev);
1221 +               LOG_ERR("%s: device_create failed (rc=%d)", __func__, rc);
1222 +               goto out_class_destroy;
1223 +       }
1224 +
1225 +       vc_cma_proc_entry = proc_create(DRIVER_NAME, 0444, NULL, &vc_cma_proc_fops);
1226 +       if (vc_cma_proc_entry == NULL) {
1227 +               rc = -EFAULT;
1228 +               LOG_ERR("%s: proc_create failed", __func__);
1229 +               goto out_device_destroy;
1230 +       }
1231 +
1232 +       vc_cma_inited = 1;
1233 +       return 0;
1234 +
1235 +out_device_destroy:
1236 +       device_destroy(vc_cma_class, vc_cma_devnum);
1237 +
1238 +out_class_destroy:
1239 +       class_destroy(vc_cma_class);
1240 +       vc_cma_class = NULL;
1241 +
1242 +out_cdev_del:
1243 +       cdev_del(&vc_cma_cdev);
1244 +
1245 +out_unregister:
1246 +       unregister_chrdev_region(vc_cma_devnum, 1);
1247 +
1248 +out_release:
1249 +       /* It is tempting to try to clean up by calling
1250 +          dma_release_from_contiguous for all allocated chunks, but it isn't
1251 +          a very safe thing to do. If vc_cma_initial is non-zero it is because
1252 +          VideoCore is already using that memory, so giving it back to Linux
1253 +          is likely to be fatal.
1254 +        */
1255 +       return -1;
1256 +}
1257 +
1258 +/****************************************************************************
1259 +*
1260 +*   vc_cma_exit
1261 +*
1262 +***************************************************************************/
1263 +
1264 +static void __exit vc_cma_exit(void)
1265 +{
1266 +       LOG_DBG("%s: called", __func__);
1267 +
1268 +       if (vc_cma_inited) {
1269 +               remove_proc_entry(DRIVER_NAME, NULL);
1270 +               device_destroy(vc_cma_class, vc_cma_devnum);
1271 +               class_destroy(vc_cma_class);
1272 +               cdev_del(&vc_cma_cdev);
1273 +               unregister_chrdev_region(vc_cma_devnum, 1);
1274 +       }
1275 +}
1276 +
1277 +module_init(vc_cma_init);
1278 +module_exit(vc_cma_exit);
1279 +MODULE_LICENSE("GPL");
1280 +MODULE_AUTHOR("Broadcom Corporation");
1281 --- /dev/null
1282 +++ b/include/linux/broadcom/vc_cma.h
1283 @@ -0,0 +1,36 @@
1284 +/*****************************************************************************
1285 +* Copyright 2012 Broadcom Corporation.  All rights reserved.
1286 +*
1287 +* Unless you and Broadcom execute a separate written software license
1288 +* agreement governing use of this software, this software is licensed to you
1289 +* under the terms of the GNU General Public License version 2, available at
1290 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
1291 +*
1292 +* Notwithstanding the above, under no circumstances may you combine this
1293 +* software in any way with any other Broadcom software provided under a
1294 +* license other than the GPL, without Broadcom's express prior written
1295 +* consent.
1296 +*****************************************************************************/
1297 +
1298 +#if !defined( VC_CMA_H )
1299 +#define VC_CMA_H
1300 +
1301 +#include <linux/ioctl.h>
1302 +
1303 +#define VC_CMA_IOC_MAGIC 0xc5
1304 +
1305 +#define VC_CMA_IOC_RESERVE _IO(VC_CMA_IOC_MAGIC, 0)
1306 +
1307 +#ifdef __KERNEL__
1308 +
1309 +#ifdef CONFIG_BCM_VC_CMA
1310 +void vc_cma_early_init(void);
1311 +void vc_cma_reserve(void);
1312 +#else
1313 +static inline void vc_cma_early_init(void) { }
1314 +static inline void vc_cma_reserve(void) { }
1315 +#endif
1316 +
1317 +#endif
1318 +
1319 +#endif /* VC_CMA_H */