1 // SPDX-License-Identifier: GPL-2.0
3 * ION Memory Allocator generic heap helpers
5 * Copyright (C) 2011 Google, Inc.
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
12 #include <linux/rtmutex.h>
13 #include <linux/sched.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/scatterlist.h>
16 #include <linux/vmalloc.h>
20 void *ion_heap_map_kernel(struct ion_heap *heap,
21 struct ion_buffer *buffer)
23 struct scatterlist *sg;
27 struct sg_table *table = buffer->sg_table;
28 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
29 struct page **pages = vmalloc(array_size(npages,
30 sizeof(struct page *)));
31 struct page **tmp = pages;
34 return ERR_PTR(-ENOMEM);
36 if (buffer->flags & ION_FLAG_CACHED)
39 pgprot = pgprot_writecombine(PAGE_KERNEL);
41 for_each_sg(table->sgl, sg, table->nents, i) {
42 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
43 struct page *page = sg_page(sg);
46 for (j = 0; j < npages_this_entry; j++)
49 vaddr = vmap(pages, npages, VM_MAP, pgprot);
53 return ERR_PTR(-ENOMEM);
58 void ion_heap_unmap_kernel(struct ion_heap *heap,
59 struct ion_buffer *buffer)
61 vunmap(buffer->vaddr);
64 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
65 struct vm_area_struct *vma)
67 struct sg_table *table = buffer->sg_table;
68 unsigned long addr = vma->vm_start;
69 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
70 struct scatterlist *sg;
74 for_each_sg(table->sgl, sg, table->nents, i) {
75 struct page *page = sg_page(sg);
76 unsigned long remainder = vma->vm_end - addr;
77 unsigned long len = sg->length;
79 if (offset >= sg->length) {
83 page += offset / PAGE_SIZE;
84 len = sg->length - offset;
87 len = min(len, remainder);
88 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
93 if (addr >= vma->vm_end)
100 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
102 void *addr = vm_map_ram(pages, num, -1, pgprot);
106 memset(addr, 0, PAGE_SIZE * num);
107 vm_unmap_ram(addr, num);
112 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
117 struct sg_page_iter piter;
118 struct page *pages[32];
120 for_each_sg_page(sgl, &piter, nents, 0) {
121 pages[p++] = sg_page_iter_page(&piter);
122 if (p == ARRAY_SIZE(pages)) {
123 ret = ion_heap_clear_pages(pages, p, pgprot);
130 ret = ion_heap_clear_pages(pages, p, pgprot);
135 int ion_heap_buffer_zero(struct ion_buffer *buffer)
137 struct sg_table *table = buffer->sg_table;
140 if (buffer->flags & ION_FLAG_CACHED)
141 pgprot = PAGE_KERNEL;
143 pgprot = pgprot_writecombine(PAGE_KERNEL);
145 return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
148 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
150 struct scatterlist sg;
152 sg_init_table(&sg, 1);
153 sg_set_page(&sg, page, size, 0);
154 return ion_heap_sglist_zero(&sg, 1, pgprot);
157 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
159 spin_lock(&heap->free_lock);
160 list_add(&buffer->list, &heap->free_list);
161 heap->free_list_size += buffer->size;
162 spin_unlock(&heap->free_lock);
163 wake_up(&heap->waitqueue);
166 size_t ion_heap_freelist_size(struct ion_heap *heap)
170 spin_lock(&heap->free_lock);
171 size = heap->free_list_size;
172 spin_unlock(&heap->free_lock);
177 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
180 struct ion_buffer *buffer;
181 size_t total_drained = 0;
183 if (ion_heap_freelist_size(heap) == 0)
186 spin_lock(&heap->free_lock);
188 size = heap->free_list_size;
190 while (!list_empty(&heap->free_list)) {
191 if (total_drained >= size)
193 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
195 list_del(&buffer->list);
196 heap->free_list_size -= buffer->size;
198 buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
199 total_drained += buffer->size;
200 spin_unlock(&heap->free_lock);
201 ion_buffer_destroy(buffer);
202 spin_lock(&heap->free_lock);
204 spin_unlock(&heap->free_lock);
206 return total_drained;
209 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
211 return _ion_heap_freelist_drain(heap, size, false);
214 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
216 return _ion_heap_freelist_drain(heap, size, true);
219 static int ion_heap_deferred_free(void *data)
221 struct ion_heap *heap = data;
224 struct ion_buffer *buffer;
226 wait_event_freezable(heap->waitqueue,
227 ion_heap_freelist_size(heap) > 0);
229 spin_lock(&heap->free_lock);
230 if (list_empty(&heap->free_list)) {
231 spin_unlock(&heap->free_lock);
234 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
236 list_del(&buffer->list);
237 heap->free_list_size -= buffer->size;
238 spin_unlock(&heap->free_lock);
239 ion_buffer_destroy(buffer);
245 int ion_heap_init_deferred_free(struct ion_heap *heap)
247 struct sched_param param = { .sched_priority = 0 };
249 INIT_LIST_HEAD(&heap->free_list);
250 init_waitqueue_head(&heap->waitqueue);
251 heap->task = kthread_run(ion_heap_deferred_free, heap,
253 if (IS_ERR(heap->task)) {
254 pr_err("%s: creating thread for deferred free failed\n",
256 return PTR_ERR_OR_ZERO(heap->task);
258 sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
263 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
264 struct shrink_control *sc)
266 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
270 total = ion_heap_freelist_size(heap) / PAGE_SIZE;
272 if (heap->ops->shrink)
273 total += heap->ops->shrink(heap, sc->gfp_mask, 0);
278 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
279 struct shrink_control *sc)
281 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
284 int to_scan = sc->nr_to_scan;
290 * shrink the free list first, no point in zeroing the memory if we're
291 * just going to reclaim it. Also, skip any possible page pooling.
293 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
294 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
301 if (heap->ops->shrink)
302 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
307 int ion_heap_init_shrinker(struct ion_heap *heap)
309 heap->shrinker.count_objects = ion_heap_shrink_count;
310 heap->shrinker.scan_objects = ion_heap_shrink_scan;
311 heap->shrinker.seeks = DEFAULT_SEEKS;
312 heap->shrinker.batch = 0;
314 return register_shrinker(&heap->shrinker);