Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / staging / media / ipu3 / ipu3-dmamap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Intel Corporation
4  * Copyright 2018 Google LLC.
5  *
6  * Author: Tomasz Figa <tfiga@chromium.org>
7  * Author: Yong Zhi <yong.zhi@intel.com>
8  */
9
10 #include <linux/vmalloc.h>
11
12 #include "ipu3.h"
13 #include "ipu3-css-pool.h"
14 #include "ipu3-mmu.h"
15 #include "ipu3-dmamap.h"
16
17 /*
18  * Free a buffer allocated by imgu_dmamap_alloc_buffer()
19  */
20 static void imgu_dmamap_free_buffer(struct page **pages,
21                                     size_t size)
22 {
23         int count = size >> PAGE_SHIFT;
24
25         while (count--)
26                 __free_page(pages[count]);
27         kvfree(pages);
28 }
29
30 /*
31  * Based on the implementation of __iommu_dma_alloc_pages()
32  * defined in drivers/iommu/dma-iommu.c
33  */
34 static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp)
35 {
36         struct page **pages;
37         unsigned int i = 0, count = size >> PAGE_SHIFT;
38         unsigned int order_mask = 1;
39         const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
40
41         /* Allocate mem for array of page ptrs */
42         pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL);
43
44         if (!pages)
45                 return NULL;
46
47         gfp |= __GFP_HIGHMEM | __GFP_ZERO;
48
49         while (count) {
50                 struct page *page = NULL;
51                 unsigned int order_size;
52
53                 for (order_mask &= (2U << __fls(count)) - 1;
54                      order_mask; order_mask &= ~order_size) {
55                         unsigned int order = __fls(order_mask);
56
57                         order_size = 1U << order;
58                         page = alloc_pages((order_mask - order_size) ?
59                                            gfp | high_order_gfp : gfp, order);
60                         if (!page)
61                                 continue;
62                         if (!order)
63                                 break;
64                         if (!PageCompound(page)) {
65                                 split_page(page, order);
66                                 break;
67                         }
68
69                         __free_pages(page, order);
70                 }
71                 if (!page) {
72                         imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT);
73                         return NULL;
74                 }
75                 count -= order_size;
76                 while (order_size--)
77                         pages[i++] = page++;
78         }
79
80         return pages;
81 }
82
83 /**
84  * imgu_dmamap_alloc - allocate and map a buffer into KVA
85  * @imgu: struct device pointer
86  * @map: struct to store mapping variables
87  * @len: size required
88  *
89  * Returns:
90  *  KVA on success
91  *  %NULL on failure
92  */
93 void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
94                         size_t len)
95 {
96         unsigned long shift = iova_shift(&imgu->iova_domain);
97         struct device *dev = &imgu->pci_dev->dev;
98         size_t size = PAGE_ALIGN(len);
99         struct page **pages;
100         dma_addr_t iovaddr;
101         struct iova *iova;
102         int i, rval;
103
104         dev_dbg(dev, "%s: allocating %zu\n", __func__, size);
105
106         iova = alloc_iova(&imgu->iova_domain, size >> shift,
107                           imgu->mmu->aperture_end >> shift, 0);
108         if (!iova)
109                 return NULL;
110
111         pages = imgu_dmamap_alloc_buffer(size, GFP_KERNEL);
112         if (!pages)
113                 goto out_free_iova;
114
115         /* Call IOMMU driver to setup pgt */
116         iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
117         for (i = 0; i < size / PAGE_SIZE; ++i) {
118                 rval = imgu_mmu_map(imgu->mmu, iovaddr,
119                                     page_to_phys(pages[i]), PAGE_SIZE);
120                 if (rval)
121                         goto out_unmap;
122
123                 iovaddr += PAGE_SIZE;
124         }
125
126         /* Now grab a virtual region */
127         map->vma = __get_vm_area(size, VM_USERMAP, VMALLOC_START, VMALLOC_END);
128         if (!map->vma)
129                 goto out_unmap;
130
131         map->vma->pages = pages;
132         /* And map it in KVA */
133         if (map_vm_area(map->vma, PAGE_KERNEL, pages))
134                 goto out_vunmap;
135
136         map->size = size;
137         map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
138         map->vaddr = map->vma->addr;
139
140         dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__,
141                 size, &map->daddr, map->vma->addr);
142
143         return map->vma->addr;
144
145 out_vunmap:
146         vunmap(map->vma->addr);
147
148 out_unmap:
149         imgu_dmamap_free_buffer(pages, size);
150         imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
151                        i * PAGE_SIZE);
152         map->vma = NULL;
153
154 out_free_iova:
155         __free_iova(&imgu->iova_domain, iova);
156
157         return NULL;
158 }
159
160 void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map)
161 {
162         struct iova *iova;
163
164         iova = find_iova(&imgu->iova_domain,
165                          iova_pfn(&imgu->iova_domain, map->daddr));
166         if (WARN_ON(!iova))
167                 return;
168
169         imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
170                        iova_size(iova) << iova_shift(&imgu->iova_domain));
171
172         __free_iova(&imgu->iova_domain, iova);
173 }
174
175 /*
176  * Counterpart of imgu_dmamap_alloc
177  */
178 void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
179 {
180         struct vm_struct *area = map->vma;
181
182         dev_dbg(&imgu->pci_dev->dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
183                 __func__, map->size, &map->daddr, map->vaddr);
184
185         if (!map->vaddr)
186                 return;
187
188         imgu_dmamap_unmap(imgu, map);
189
190         if (WARN_ON(!area) || WARN_ON(!area->pages))
191                 return;
192
193         imgu_dmamap_free_buffer(area->pages, map->size);
194         vunmap(map->vaddr);
195         map->vaddr = NULL;
196 }
197
198 int imgu_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
199                        int nents, struct imgu_css_map *map)
200 {
201         unsigned long shift = iova_shift(&imgu->iova_domain);
202         struct scatterlist *sg;
203         struct iova *iova;
204         size_t size = 0;
205         int i;
206
207         for_each_sg(sglist, sg, nents, i) {
208                 if (sg->offset)
209                         return -EINVAL;
210
211                 if (i != nents - 1 && !PAGE_ALIGNED(sg->length))
212                         return -EINVAL;
213
214                 size += sg->length;
215         }
216
217         size = iova_align(&imgu->iova_domain, size);
218         dev_dbg(&imgu->pci_dev->dev, "dmamap: mapping sg %d entries, %zu pages\n",
219                 nents, size >> shift);
220
221         iova = alloc_iova(&imgu->iova_domain, size >> shift,
222                           imgu->mmu->aperture_end >> shift, 0);
223         if (!iova)
224                 return -ENOMEM;
225
226         dev_dbg(&imgu->pci_dev->dev, "dmamap: iova low pfn %lu, high pfn %lu\n",
227                 iova->pfn_lo, iova->pfn_hi);
228
229         if (imgu_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
230                             sglist, nents) < size)
231                 goto out_fail;
232
233         memset(map, 0, sizeof(*map));
234         map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
235         map->size = size;
236
237         return 0;
238
239 out_fail:
240         __free_iova(&imgu->iova_domain, iova);
241
242         return -EFAULT;
243 }
244
245 int imgu_dmamap_init(struct imgu_device *imgu)
246 {
247         unsigned long order, base_pfn;
248         int ret = iova_cache_get();
249
250         if (ret)
251                 return ret;
252
253         order = __ffs(IPU3_PAGE_SIZE);
254         base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
255         init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
256
257         return 0;
258 }
259
260 void imgu_dmamap_exit(struct imgu_device *imgu)
261 {
262         put_iova_domain(&imgu->iova_domain);
263         iova_cache_put();
264 }