Linux-libre 4.15.7-gnu
[librecmc/linux-libre.git] / drivers / staging / android / ion / ion_carveout_heap.c
1 /*
2  * drivers/staging/android/ion/ion_carveout_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 #include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
20 #include <linux/io.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include "ion.h"
26
27 #define ION_CARVEOUT_ALLOCATE_FAIL      -1
28
29 struct ion_carveout_heap {
30         struct ion_heap heap;
31         struct gen_pool *pool;
32         phys_addr_t base;
33 };
34
35 static phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
36                                          unsigned long size)
37 {
38         struct ion_carveout_heap *carveout_heap =
39                 container_of(heap, struct ion_carveout_heap, heap);
40         unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
41
42         if (!offset)
43                 return ION_CARVEOUT_ALLOCATE_FAIL;
44
45         return offset;
46 }
47
48 static void ion_carveout_free(struct ion_heap *heap, phys_addr_t addr,
49                               unsigned long size)
50 {
51         struct ion_carveout_heap *carveout_heap =
52                 container_of(heap, struct ion_carveout_heap, heap);
53
54         if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
55                 return;
56         gen_pool_free(carveout_heap->pool, addr, size);
57 }
58
59 static int ion_carveout_heap_allocate(struct ion_heap *heap,
60                                       struct ion_buffer *buffer,
61                                       unsigned long size,
62                                       unsigned long flags)
63 {
64         struct sg_table *table;
65         phys_addr_t paddr;
66         int ret;
67
68         table = kmalloc(sizeof(*table), GFP_KERNEL);
69         if (!table)
70                 return -ENOMEM;
71         ret = sg_alloc_table(table, 1, GFP_KERNEL);
72         if (ret)
73                 goto err_free;
74
75         paddr = ion_carveout_allocate(heap, size);
76         if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
77                 ret = -ENOMEM;
78                 goto err_free_table;
79         }
80
81         sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
82         buffer->sg_table = table;
83
84         return 0;
85
86 err_free_table:
87         sg_free_table(table);
88 err_free:
89         kfree(table);
90         return ret;
91 }
92
93 static void ion_carveout_heap_free(struct ion_buffer *buffer)
94 {
95         struct ion_heap *heap = buffer->heap;
96         struct sg_table *table = buffer->sg_table;
97         struct page *page = sg_page(table->sgl);
98         phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
99
100         ion_heap_buffer_zero(buffer);
101
102         ion_carveout_free(heap, paddr, buffer->size);
103         sg_free_table(table);
104         kfree(table);
105 }
106
107 static struct ion_heap_ops carveout_heap_ops = {
108         .allocate = ion_carveout_heap_allocate,
109         .free = ion_carveout_heap_free,
110         .map_user = ion_heap_map_user,
111         .map_kernel = ion_heap_map_kernel,
112         .unmap_kernel = ion_heap_unmap_kernel,
113 };
114
115 struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
116 {
117         struct ion_carveout_heap *carveout_heap;
118         int ret;
119
120         struct page *page;
121         size_t size;
122
123         page = pfn_to_page(PFN_DOWN(heap_data->base));
124         size = heap_data->size;
125
126         ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
127         if (ret)
128                 return ERR_PTR(ret);
129
130         carveout_heap = kzalloc(sizeof(*carveout_heap), GFP_KERNEL);
131         if (!carveout_heap)
132                 return ERR_PTR(-ENOMEM);
133
134         carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
135         if (!carveout_heap->pool) {
136                 kfree(carveout_heap);
137                 return ERR_PTR(-ENOMEM);
138         }
139         carveout_heap->base = heap_data->base;
140         gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
141                      -1);
142         carveout_heap->heap.ops = &carveout_heap_ops;
143         carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
144         carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
145
146         return &carveout_heap->heap;
147 }