1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Intel Corporation
4 * Copyright 2018 Google LLC.
6 * Author: Tomasz Figa <tfiga@chromium.org>
7 * Author: Yong Zhi <yong.zhi@intel.com>
10 #include <linux/vmalloc.h>
13 #include "ipu3-css-pool.h"
15 #include "ipu3-dmamap.h"
18 * Free a buffer allocated by imgu_dmamap_alloc_buffer()
20 static void imgu_dmamap_free_buffer(struct page **pages,
23 int count = size >> PAGE_SHIFT;
26 __free_page(pages[count]);
31 * Based on the implementation of __iommu_dma_alloc_pages()
32 * defined in drivers/iommu/dma-iommu.c
34 static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp)
37 unsigned int i = 0, count = size >> PAGE_SHIFT;
38 unsigned int order_mask = 1;
39 const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
41 /* Allocate mem for array of page ptrs */
42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL);
47 gfp |= __GFP_HIGHMEM | __GFP_ZERO;
50 struct page *page = NULL;
51 unsigned int order_size;
53 for (order_mask &= (2U << __fls(count)) - 1;
54 order_mask; order_mask &= ~order_size) {
55 unsigned int order = __fls(order_mask);
57 order_size = 1U << order;
58 page = alloc_pages((order_mask - order_size) ?
59 gfp | high_order_gfp : gfp, order);
64 if (!PageCompound(page)) {
65 split_page(page, order);
69 __free_pages(page, order);
72 imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT);
84 * imgu_dmamap_alloc - allocate and map a buffer into KVA
85 * @imgu: struct device pointer
86 * @map: struct to store mapping variables
93 void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
96 unsigned long shift = iova_shift(&imgu->iova_domain);
97 struct device *dev = &imgu->pci_dev->dev;
98 size_t size = PAGE_ALIGN(len);
104 dev_dbg(dev, "%s: allocating %zu\n", __func__, size);
106 iova = alloc_iova(&imgu->iova_domain, size >> shift,
107 imgu->mmu->aperture_end >> shift, 0);
111 pages = imgu_dmamap_alloc_buffer(size, GFP_KERNEL);
115 /* Call IOMMU driver to setup pgt */
116 iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
117 for (i = 0; i < size / PAGE_SIZE; ++i) {
118 rval = imgu_mmu_map(imgu->mmu, iovaddr,
119 page_to_phys(pages[i]), PAGE_SIZE);
123 iovaddr += PAGE_SIZE;
126 /* Now grab a virtual region */
127 map->vma = __get_vm_area(size, VM_USERMAP, VMALLOC_START, VMALLOC_END);
131 map->vma->pages = pages;
132 /* And map it in KVA */
133 if (map_vm_area(map->vma, PAGE_KERNEL, pages))
137 map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
138 map->vaddr = map->vma->addr;
140 dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__,
141 size, &map->daddr, map->vma->addr);
143 return map->vma->addr;
146 vunmap(map->vma->addr);
149 imgu_dmamap_free_buffer(pages, size);
150 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
155 __free_iova(&imgu->iova_domain, iova);
160 void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map)
164 iova = find_iova(&imgu->iova_domain,
165 iova_pfn(&imgu->iova_domain, map->daddr));
169 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
170 iova_size(iova) << iova_shift(&imgu->iova_domain));
172 __free_iova(&imgu->iova_domain, iova);
176 * Counterpart of imgu_dmamap_alloc
178 void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
180 struct vm_struct *area = map->vma;
182 dev_dbg(&imgu->pci_dev->dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
183 __func__, map->size, &map->daddr, map->vaddr);
188 imgu_dmamap_unmap(imgu, map);
190 if (WARN_ON(!area) || WARN_ON(!area->pages))
193 imgu_dmamap_free_buffer(area->pages, map->size);
198 int imgu_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
199 int nents, struct imgu_css_map *map)
201 unsigned long shift = iova_shift(&imgu->iova_domain);
202 struct scatterlist *sg;
207 for_each_sg(sglist, sg, nents, i) {
211 if (i != nents - 1 && !PAGE_ALIGNED(sg->length))
217 size = iova_align(&imgu->iova_domain, size);
218 dev_dbg(&imgu->pci_dev->dev, "dmamap: mapping sg %d entries, %zu pages\n",
219 nents, size >> shift);
221 iova = alloc_iova(&imgu->iova_domain, size >> shift,
222 imgu->mmu->aperture_end >> shift, 0);
226 dev_dbg(&imgu->pci_dev->dev, "dmamap: iova low pfn %lu, high pfn %lu\n",
227 iova->pfn_lo, iova->pfn_hi);
229 if (imgu_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
230 sglist, nents) < size)
233 memset(map, 0, sizeof(*map));
234 map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
240 __free_iova(&imgu->iova_domain, iova);
245 int imgu_dmamap_init(struct imgu_device *imgu)
247 unsigned long order, base_pfn;
248 int ret = iova_cache_get();
253 order = __ffs(IPU3_PAGE_SIZE);
254 base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
255 init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
260 void imgu_dmamap_exit(struct imgu_device *imgu)
262 put_iova_domain(&imgu->iova_domain);