brcm2708: update linux 4.4 patches to latest version
[oweals/openwrt.git] / target / linux / brcm2708 / patches-4.4 / 0090-drm-vc4-Add-suport-for-3D-rendering-using-the-V3D-en.patch
1 From 5009df0a7714100a74d455893485ea9a8dd8a48d Mon Sep 17 00:00:00 2001
2 From: Eric Anholt <eric@anholt.net>
3 Date: Mon, 2 Mar 2015 13:01:12 -0800
4 Subject: [PATCH] drm/vc4: Add suport for 3D rendering using the V3D engine.
5
6 This is a squash of the out-of-tree development series.  Since that
7 series contained code from the first "get a demo triangle rendered
8 using a hacked up driver using binary shader code" to "plug the last
9 known security hole", it's hard to reconstruct a different series of
10 incremental development that's mergeable without security holes
11 throughout it.
12
13 Signed-off-by: Eric Anholt <eric@anholt.net>
14 ---
15  drivers/gpu/drm/vc4/Makefile               |  11 +-
16  drivers/gpu/drm/vc4/vc4_bo.c               | 476 +++++++++++++-
17  drivers/gpu/drm/vc4/vc4_crtc.c             |  98 ++-
18  drivers/gpu/drm/vc4/vc4_debugfs.c          |   3 +
19  drivers/gpu/drm/vc4/vc4_drv.c              |  45 +-
20  drivers/gpu/drm/vc4/vc4_drv.h              | 317 ++++++++++
21  drivers/gpu/drm/vc4/vc4_gem.c              | 686 +++++++++++++++++++++
22  drivers/gpu/drm/vc4/vc4_irq.c              | 211 +++++++
23  drivers/gpu/drm/vc4/vc4_kms.c              | 148 ++++-
24  drivers/gpu/drm/vc4/vc4_packet.h           | 384 ++++++++++++
25  drivers/gpu/drm/vc4/vc4_plane.c            |  40 ++
26  drivers/gpu/drm/vc4/vc4_qpu_defines.h      | 268 ++++++++
27  drivers/gpu/drm/vc4/vc4_render_cl.c        | 448 ++++++++++++++
28  drivers/gpu/drm/vc4/vc4_trace.h            |  63 ++
29  drivers/gpu/drm/vc4/vc4_trace_points.c     |  14 +
30  drivers/gpu/drm/vc4/vc4_v3d.c              | 268 ++++++++
31  drivers/gpu/drm/vc4/vc4_validate.c         | 958 +++++++++++++++++++++++++++++
32  drivers/gpu/drm/vc4/vc4_validate_shaders.c | 521 ++++++++++++++++
33  include/uapi/drm/vc4_drm.h                 | 229 +++++++
34  19 files changed, 5173 insertions(+), 15 deletions(-)
35  create mode 100644 drivers/gpu/drm/vc4/vc4_gem.c
36  create mode 100644 drivers/gpu/drm/vc4/vc4_irq.c
37  create mode 100644 drivers/gpu/drm/vc4/vc4_packet.h
38  create mode 100644 drivers/gpu/drm/vc4/vc4_qpu_defines.h
39  create mode 100644 drivers/gpu/drm/vc4/vc4_render_cl.c
40  create mode 100644 drivers/gpu/drm/vc4/vc4_trace.h
41  create mode 100644 drivers/gpu/drm/vc4/vc4_trace_points.c
42  create mode 100644 drivers/gpu/drm/vc4/vc4_v3d.c
43  create mode 100644 drivers/gpu/drm/vc4/vc4_validate.c
44  create mode 100644 drivers/gpu/drm/vc4/vc4_validate_shaders.c
45  create mode 100644 include/uapi/drm/vc4_drm.h
46
47 --- a/drivers/gpu/drm/vc4/Makefile
48 +++ b/drivers/gpu/drm/vc4/Makefile
49 @@ -8,10 +8,19 @@ vc4-y := \
50         vc4_crtc.o \
51         vc4_drv.o \
52         vc4_kms.o \
53 +       vc4_gem.o \
54         vc4_hdmi.o \
55         vc4_hvs.o \
56 -       vc4_plane.o
57 +       vc4_irq.o \
58 +       vc4_plane.o \
59 +       vc4_render_cl.o \
60 +       vc4_trace_points.o \
61 +       vc4_v3d.o \
62 +       vc4_validate.o \
63 +       vc4_validate_shaders.o
64  
65  vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o
66  
67  obj-$(CONFIG_DRM_VC4)  += vc4.o
68 +
69 +CFLAGS_vc4_trace_points.o := -I$(src)
70 --- a/drivers/gpu/drm/vc4/vc4_bo.c
71 +++ b/drivers/gpu/drm/vc4/vc4_bo.c
72 @@ -15,16 +15,174 @@
73   */
74  
75  #include "vc4_drv.h"
76 +#include "uapi/drm/vc4_drm.h"
77  
78 -struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size)
79 +static void vc4_bo_stats_dump(struct vc4_dev *vc4)
80  {
81 +       DRM_INFO("num bos allocated: %d\n",
82 +                vc4->bo_stats.num_allocated);
83 +       DRM_INFO("size bos allocated: %dkb\n",
84 +                vc4->bo_stats.size_allocated / 1024);
85 +       DRM_INFO("num bos used: %d\n",
86 +                vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
87 +       DRM_INFO("size bos used: %dkb\n",
88 +                (vc4->bo_stats.size_allocated -
89 +                 vc4->bo_stats.size_cached) / 1024);
90 +       DRM_INFO("num bos cached: %d\n",
91 +                vc4->bo_stats.num_cached);
92 +       DRM_INFO("size bos cached: %dkb\n",
93 +                vc4->bo_stats.size_cached / 1024);
94 +}
95 +
96 +static uint32_t bo_page_index(size_t size)
97 +{
98 +       return (size / PAGE_SIZE) - 1;
99 +}
100 +
101 +/* Must be called with bo_lock held. */
102 +static void vc4_bo_destroy(struct vc4_bo *bo)
103 +{
104 +       struct drm_gem_object *obj = &bo->base.base;
105 +       struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
106 +
107 +       if (bo->validated_shader) {
108 +               kfree(bo->validated_shader->texture_samples);
109 +               kfree(bo->validated_shader);
110 +               bo->validated_shader = NULL;
111 +       }
112 +
113 +       vc4->bo_stats.num_allocated--;
114 +       vc4->bo_stats.size_allocated -= obj->size;
115 +       drm_gem_cma_free_object(obj);
116 +}
117 +
118 +/* Must be called with bo_lock held. */
119 +static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
120 +{
121 +       struct drm_gem_object *obj = &bo->base.base;
122 +       struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
123 +
124 +       vc4->bo_stats.num_cached--;
125 +       vc4->bo_stats.size_cached -= obj->size;
126 +
127 +       list_del(&bo->unref_head);
128 +       list_del(&bo->size_head);
129 +}
130 +
131 +static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
132 +                                                    size_t size)
133 +{
134 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
135 +       uint32_t page_index = bo_page_index(size);
136 +
137 +       if (vc4->bo_cache.size_list_size <= page_index) {
138 +               uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
139 +                                       page_index + 1);
140 +               struct list_head *new_list;
141 +               uint32_t i;
142 +
143 +               new_list = kmalloc(new_size * sizeof(struct list_head),
144 +                                  GFP_KERNEL);
145 +               if (!new_list)
146 +                       return NULL;
147 +
148 +               /* Rebase the old cached BO lists to their new list
149 +                * head locations.
150 +                */
151 +               for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
152 +                       struct list_head *old_list = &vc4->bo_cache.size_list[i];
153 +                       if (list_empty(old_list))
154 +                               INIT_LIST_HEAD(&new_list[i]);
155 +                       else
156 +                               list_replace(old_list, &new_list[i]);
157 +               }
158 +               /* And initialize the brand new BO list heads. */
159 +               for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
160 +                       INIT_LIST_HEAD(&new_list[i]);
161 +
162 +               kfree(vc4->bo_cache.size_list);
163 +               vc4->bo_cache.size_list = new_list;
164 +               vc4->bo_cache.size_list_size = new_size;
165 +       }
166 +
167 +       return &vc4->bo_cache.size_list[page_index];
168 +}
169 +
170 +void vc4_bo_cache_purge(struct drm_device *dev)
171 +{
172 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
173 +
174 +       spin_lock(&vc4->bo_lock);
175 +       while (!list_empty(&vc4->bo_cache.time_list)) {
176 +               struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
177 +                                                   struct vc4_bo, unref_head);
178 +               vc4_bo_remove_from_cache(bo);
179 +               vc4_bo_destroy(bo);
180 +       }
181 +       spin_unlock(&vc4->bo_lock);
182 +}
183 +
184 +struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size)
185 +{
186 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
187 +       uint32_t size = roundup(unaligned_size, PAGE_SIZE);
188 +       uint32_t page_index = bo_page_index(size);
189         struct drm_gem_cma_object *cma_obj;
190 +       int pass;
191  
192 -       cma_obj = drm_gem_cma_create(dev, size);
193 -       if (IS_ERR(cma_obj))
194 +       if (size == 0)
195                 return NULL;
196 -       else
197 -               return to_vc4_bo(&cma_obj->base);
198 +
199 +       /* First, try to get a vc4_bo from the kernel BO cache. */
200 +       spin_lock(&vc4->bo_lock);
201 +       if (page_index < vc4->bo_cache.size_list_size &&
202 +           !list_empty(&vc4->bo_cache.size_list[page_index])) {
203 +               struct vc4_bo *bo =
204 +                       list_first_entry(&vc4->bo_cache.size_list[page_index],
205 +                                        struct vc4_bo, size_head);
206 +               vc4_bo_remove_from_cache(bo);
207 +               spin_unlock(&vc4->bo_lock);
208 +               kref_init(&bo->base.base.refcount);
209 +               return bo;
210 +       }
211 +       spin_unlock(&vc4->bo_lock);
212 +
213 +       /* Otherwise, make a new BO. */
214 +       for (pass = 0; ; pass++) {
215 +               cma_obj = drm_gem_cma_create(dev, size);
216 +               if (!IS_ERR(cma_obj))
217 +                       break;
218 +
219 +               switch (pass) {
220 +               case 0:
221 +                       /*
222 +                        * If we've run out of CMA memory, kill the cache of
223 +                        * CMA allocations we've got laying around and try again.
224 +                        */
225 +                       vc4_bo_cache_purge(dev);
226 +                       break;
227 +               case 1:
228 +                       /*
229 +                        * Getting desperate, so try to wait for any
230 +                        * previous rendering to finish, free its
231 +                        * unreferenced BOs to the cache, and then
232 +                        * free the cache.
233 +                        */
234 +                       vc4_wait_for_seqno(dev, vc4->emit_seqno, ~0ull, true);
235 +                       vc4_job_handle_completed(vc4);
236 +                       vc4_bo_cache_purge(dev);
237 +                       break;
238 +               case 3:
239 +                       DRM_ERROR("Failed to allocate from CMA:\n");
240 +                       vc4_bo_stats_dump(vc4);
241 +                       return NULL;
242 +               }
243 +       }
244 +
245 +       vc4->bo_stats.num_allocated++;
246 +       vc4->bo_stats.size_allocated += size;
247 +
248 +       return to_vc4_bo(&cma_obj->base);
249  }
250  
251  int vc4_dumb_create(struct drm_file *file_priv,
252 @@ -41,7 +199,129 @@ int vc4_dumb_create(struct drm_file *fil
253         if (args->size < args->pitch * args->height)
254                 args->size = args->pitch * args->height;
255  
256 -       bo = vc4_bo_create(dev, roundup(args->size, PAGE_SIZE));
257 +       bo = vc4_bo_create(dev, args->size);
258 +       if (!bo)
259 +               return -ENOMEM;
260 +
261 +       ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
262 +       drm_gem_object_unreference_unlocked(&bo->base.base);
263 +
264 +       return ret;
265 +}
266 +
267 +static void
268 +vc4_bo_cache_free_old(struct drm_device *dev)
269 +{
270 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
271 +       unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
272 +
273 +       spin_lock(&vc4->bo_lock);
274 +       while (!list_empty(&vc4->bo_cache.time_list)) {
275 +               struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
276 +                                                   struct vc4_bo, unref_head);
277 +               if (time_before(expire_time, bo->free_time)) {
278 +                       mod_timer(&vc4->bo_cache.time_timer,
279 +                                 round_jiffies_up(jiffies +
280 +                                                  msecs_to_jiffies(1000)));
281 +                       spin_unlock(&vc4->bo_lock);
282 +                       return;
283 +               }
284 +
285 +               vc4_bo_remove_from_cache(bo);
286 +               vc4_bo_destroy(bo);
287 +       }
288 +       spin_unlock(&vc4->bo_lock);
289 +}
290 +
291 +/* Called on the last userspace/kernel unreference of the BO.  Returns
292 + * it to the BO cache if possible, otherwise frees it.
293 + *
294 + * Note that this is called with the struct_mutex held.
295 + */
296 +void vc4_free_object(struct drm_gem_object *gem_bo)
297 +{
298 +       struct drm_device *dev = gem_bo->dev;
299 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
300 +       struct vc4_bo *bo = to_vc4_bo(gem_bo);
301 +       struct list_head *cache_list;
302 +
303 +       /* If the object references someone else's memory, we can't cache it.
304 +        */
305 +       if (gem_bo->import_attach) {
306 +               vc4_bo_destroy(bo);
307 +               return;
308 +       }
309 +
310 +       /* Don't cache if it was publicly named. */
311 +       if (gem_bo->name) {
312 +               vc4_bo_destroy(bo);
313 +               return;
314 +       }
315 +
316 +       spin_lock(&vc4->bo_lock);
317 +       cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
318 +       if (!cache_list) {
319 +               vc4_bo_destroy(bo);
320 +               spin_unlock(&vc4->bo_lock);
321 +               return;
322 +       }
323 +
324 +       if (bo->validated_shader) {
325 +               kfree(bo->validated_shader->texture_samples);
326 +               kfree(bo->validated_shader);
327 +               bo->validated_shader = NULL;
328 +       }
329 +
330 +       bo->free_time = jiffies;
331 +       list_add(&bo->size_head, cache_list);
332 +       list_add(&bo->unref_head, &vc4->bo_cache.time_list);
333 +
334 +       vc4->bo_stats.num_cached++;
335 +       vc4->bo_stats.size_cached += gem_bo->size;
336 +       spin_unlock(&vc4->bo_lock);
337 +
338 +       vc4_bo_cache_free_old(dev);
339 +}
340 +
341 +static void vc4_bo_cache_time_work(struct work_struct *work)
342 +{
343 +       struct vc4_dev *vc4 =
344 +               container_of(work, struct vc4_dev, bo_cache.time_work);
345 +       struct drm_device *dev = vc4->dev;
346 +
347 +       vc4_bo_cache_free_old(dev);
348 +}
349 +
350 +static void vc4_bo_cache_time_timer(unsigned long data)
351 +{
352 +       struct drm_device *dev = (struct drm_device *)data;
353 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
354 +
355 +       schedule_work(&vc4->bo_cache.time_work);
356 +}
357 +
358 +struct dma_buf *
359 +vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
360 +{
361 +       struct vc4_bo *bo = to_vc4_bo(obj);
362 +
363 +       if (bo->validated_shader) {
364 +               DRM_ERROR("Attempting to export shader BO\n");
365 +               return ERR_PTR(-EINVAL);
366 +       }
367 +
368 +       return drm_gem_prime_export(dev, obj, flags);
369 +}
370 +
371 +int
372 +vc4_create_bo_ioctl(struct drm_device *dev, void *data,
373 +                   struct drm_file *file_priv)
374 +{
375 +       struct drm_vc4_create_bo *args = data;
376 +       struct vc4_bo *bo = NULL;
377 +       int ret;
378 +
379 +       bo = vc4_bo_create(dev, args->size);
380         if (!bo)
381                 return -ENOMEM;
382  
383 @@ -50,3 +330,187 @@ int vc4_dumb_create(struct drm_file *fil
384  
385         return ret;
386  }
387 +
388 +int
389 +vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
390 +                          struct drm_file *file_priv)
391 +{
392 +       struct drm_vc4_create_shader_bo *args = data;
393 +       struct vc4_bo *bo = NULL;
394 +       int ret;
395 +
396 +       if (args->size == 0)
397 +               return -EINVAL;
398 +
399 +       if (args->size % sizeof(u64) != 0)
400 +               return -EINVAL;
401 +
402 +       if (args->flags != 0) {
403 +               DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
404 +               return -EINVAL;
405 +       }
406 +
407 +       if (args->pad != 0) {
408 +               DRM_INFO("Pad set: 0x%08x\n", args->pad);
409 +               return -EINVAL;
410 +       }
411 +
412 +       bo = vc4_bo_create(dev, args->size);
413 +       if (!bo)
414 +               return -ENOMEM;
415 +
416 +       ret = copy_from_user(bo->base.vaddr,
417 +                            (void __user *)(uintptr_t)args->data,
418 +                            args->size);
419 +       if (ret != 0)
420 +               goto fail;
421 +
422 +       bo->validated_shader = vc4_validate_shader(&bo->base);
423 +       if (!bo->validated_shader) {
424 +               ret = -EINVAL;
425 +               goto fail;
426 +       }
427 +
428 +       /* We have to create the handle after validation, to avoid
429 +        * races for users to do doing things like mmap the shader BO.
430 +        */
431 +       ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
432 +
433 + fail:
434 +       drm_gem_object_unreference_unlocked(&bo->base.base);
435 +
436 +       return ret;
437 +}
438 +
439 +int
440 +vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
441 +                 struct drm_file *file_priv)
442 +{
443 +       struct drm_vc4_mmap_bo *args = data;
444 +       struct drm_gem_object *gem_obj;
445 +
446 +       gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
447 +       if (!gem_obj) {
448 +               DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
449 +               return -EINVAL;
450 +       }
451 +
452 +       /* The mmap offset was set up at BO allocation time. */
453 +       args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
454 +
455 +       drm_gem_object_unreference(gem_obj);
456 +       return 0;
457 +}
458 +
459 +int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
460 +{
461 +       struct drm_gem_object *gem_obj;
462 +       struct vc4_bo *bo;
463 +       int ret;
464 +
465 +       ret = drm_gem_mmap(filp, vma);
466 +       if (ret)
467 +               return ret;
468 +
469 +       gem_obj = vma->vm_private_data;
470 +       bo = to_vc4_bo(gem_obj);
471 +
472 +       if (bo->validated_shader) {
473 +               DRM_ERROR("mmaping of shader BOs not allowed.\n");
474 +               return -EINVAL;
475 +       }
476 +
477 +       /*
478 +        * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
479 +        * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
480 +        * the whole buffer.
481 +        */
482 +       vma->vm_flags &= ~VM_PFNMAP;
483 +       vma->vm_pgoff = 0;
484 +
485 +       ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma,
486 +                                   bo->base.vaddr, bo->base.paddr,
487 +                                   vma->vm_end - vma->vm_start);
488 +       if (ret)
489 +               drm_gem_vm_close(vma);
490 +
491 +       return ret;
492 +}
493 +
494 +int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
495 +{
496 +       struct vc4_bo *bo = to_vc4_bo(obj);
497 +
498 +       if (bo->validated_shader) {
499 +               DRM_ERROR("mmaping of shader BOs not allowed.\n");
500 +               return -EINVAL;
501 +       }
502 +
503 +       return drm_gem_cma_prime_mmap(obj, vma);
504 +}
505 +
506 +void *vc4_prime_vmap(struct drm_gem_object *obj)
507 +{
508 +       struct vc4_bo *bo = to_vc4_bo(obj);
509 +
510 +       if (bo->validated_shader) {
511 +               DRM_ERROR("mmaping of shader BOs not allowed.\n");
512 +               return ERR_PTR(-EINVAL);
513 +       }
514 +
515 +       return drm_gem_cma_prime_vmap(obj);
516 +}
517 +
518 +void vc4_bo_cache_init(struct drm_device *dev)
519 +{
520 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
521 +
522 +       spin_lock_init(&vc4->bo_lock);
523 +
524 +       INIT_LIST_HEAD(&vc4->bo_cache.time_list);
525 +
526 +       INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
527 +       setup_timer(&vc4->bo_cache.time_timer,
528 +                   vc4_bo_cache_time_timer,
529 +                   (unsigned long) dev);
530 +}
531 +
532 +void vc4_bo_cache_destroy(struct drm_device *dev)
533 +{
534 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
535 +
536 +       del_timer(&vc4->bo_cache.time_timer);
537 +       cancel_work_sync(&vc4->bo_cache.time_work);
538 +
539 +       vc4_bo_cache_purge(dev);
540 +
541 +       if (vc4->bo_stats.num_allocated) {
542 +               DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
543 +               vc4_bo_stats_dump(vc4);
544 +       }
545 +}
546 +
547 +#ifdef CONFIG_DEBUG_FS
548 +int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
549 +{
550 +       struct drm_info_node *node = (struct drm_info_node *) m->private;
551 +       struct drm_device *dev = node->minor->dev;
552 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
553 +       struct vc4_bo_stats stats;
554 +
555 +       spin_lock(&vc4->bo_lock);
556 +       stats = vc4->bo_stats;
557 +       spin_unlock(&vc4->bo_lock);
558 +
559 +       seq_printf(m, "num bos allocated: %d\n", stats.num_allocated);
560 +       seq_printf(m, "size bos allocated: %dkb\n", stats.size_allocated / 1024);
561 +       seq_printf(m, "num bos used: %d\n", (stats.num_allocated -
562 +                                            stats.num_cached));
563 +       seq_printf(m, "size bos used: %dkb\n", (stats.size_allocated -
564 +                                               stats.size_cached) / 1024);
565 +       seq_printf(m, "num bos cached: %d\n", stats.num_cached);
566 +       seq_printf(m, "size bos cached: %dkb\n", stats.size_cached / 1024);
567 +
568 +       return 0;
569 +}
570 +#endif
571 --- a/drivers/gpu/drm/vc4/vc4_crtc.c
572 +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
573 @@ -35,6 +35,7 @@
574  #include "drm_atomic_helper.h"
575  #include "drm_crtc_helper.h"
576  #include "linux/clk.h"
577 +#include "drm_fb_cma_helper.h"
578  #include "linux/component.h"
579  #include "linux/of_device.h"
580  #include "vc4_drv.h"
581 @@ -476,10 +477,105 @@ static irqreturn_t vc4_crtc_irq_handler(
582         return ret;
583  }
584  
585 +struct vc4_async_flip_state {
586 +       struct drm_crtc *crtc;
587 +       struct drm_framebuffer *fb;
588 +       struct drm_pending_vblank_event *event;
589 +
590 +       struct vc4_seqno_cb cb;
591 +};
592 +
593 +/* Called when the V3D execution for the BO being flipped to is done, so that
594 + * we can actually update the plane's address to point to it.
595 + */
596 +static void
597 +vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
598 +{
599 +       struct vc4_async_flip_state *flip_state =
600 +               container_of(cb, struct vc4_async_flip_state, cb);
601 +       struct drm_crtc *crtc = flip_state->crtc;
602 +       struct drm_device *dev = crtc->dev;
603 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
604 +       struct drm_plane *plane = crtc->primary;
605 +
606 +       vc4_plane_async_set_fb(plane, flip_state->fb);
607 +       if (flip_state->event) {
608 +               unsigned long flags;
609 +               spin_lock_irqsave(&dev->event_lock, flags);
610 +               drm_crtc_send_vblank_event(crtc, flip_state->event);
611 +               spin_unlock_irqrestore(&dev->event_lock, flags);
612 +       }
613 +
614 +       drm_framebuffer_unreference(flip_state->fb);
615 +       kfree(flip_state);
616 +
617 +       up(&vc4->async_modeset);
618 +}
619 +
620 +/* Implements async (non-vblank-synced) page flips.
621 + *
622 + * The page flip ioctl needs to return immediately, so we grab the
623 + * modeset semaphore on the pipe, and queue the address update for
624 + * when V3D is done with the BO being flipped to.
625 + */
626 +static int vc4_async_page_flip(struct drm_crtc *crtc,
627 +                              struct drm_framebuffer *fb,
628 +                              struct drm_pending_vblank_event *event,
629 +                              uint32_t flags)
630 +{
631 +       struct drm_device *dev = crtc->dev;
632 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
633 +       struct drm_plane *plane = crtc->primary;
634 +       int ret = 0;
635 +       struct vc4_async_flip_state *flip_state;
636 +       struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
637 +       struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
638 +
639 +       flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
640 +       if (!flip_state)
641 +               return -ENOMEM;
642 +
643 +       drm_framebuffer_reference(fb);
644 +       flip_state->fb = fb;
645 +       flip_state->crtc = crtc;
646 +       flip_state->event = event;
647 +
648 +       /* Make sure all other async modesetes have landed. */
649 +       ret = down_interruptible(&vc4->async_modeset);
650 +       if (ret) {
651 +               kfree(flip_state);
652 +               return ret;
653 +       }
654 +
655 +       /* Immediately update the plane's legacy fb pointer, so that later
656 +        * modeset prep sees the state that will be present when the semaphore
657 +        * is released.
658 +        */
659 +       drm_atomic_set_fb_for_plane(plane->state, fb);
660 +       plane->fb = fb;
661 +
662 +       vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
663 +                          vc4_async_page_flip_complete);
664 +
665 +       /* Driver takes ownership of state on successful async commit. */
666 +       return 0;
667 +}
668 +
669 +static int vc4_page_flip(struct drm_crtc *crtc,
670 +                 struct drm_framebuffer *fb,
671 +                 struct drm_pending_vblank_event *event,
672 +                 uint32_t flags)
673 +{
674 +       if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
675 +               return vc4_async_page_flip(crtc, fb, event, flags);
676 +       else
677 +               return drm_atomic_helper_page_flip(crtc, fb, event, flags);
678 +}
679 +
680  static const struct drm_crtc_funcs vc4_crtc_funcs = {
681         .set_config = drm_atomic_helper_set_config,
682         .destroy = vc4_crtc_destroy,
683 -       .page_flip = drm_atomic_helper_page_flip,
684 +       .page_flip = vc4_page_flip,
685         .set_property = NULL,
686         .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
687         .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
688 --- a/drivers/gpu/drm/vc4/vc4_debugfs.c
689 +++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
690 @@ -16,11 +16,14 @@
691  #include "vc4_regs.h"
692  
693  static const struct drm_info_list vc4_debugfs_list[] = {
694 +       {"bo_stats", vc4_bo_stats_debugfs, 0},
695         {"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
696         {"hvs_regs", vc4_hvs_debugfs_regs, 0},
697         {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
698         {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
699         {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2},
700 +       {"v3d_ident", vc4_v3d_debugfs_ident, 0},
701 +       {"v3d_regs", vc4_v3d_debugfs_regs, 0},
702  };
703  
704  #define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list)
705 --- a/drivers/gpu/drm/vc4/vc4_drv.c
706 +++ b/drivers/gpu/drm/vc4/vc4_drv.c
707 @@ -14,8 +14,10 @@
708  #include <linux/module.h>
709  #include <linux/of_platform.h>
710  #include <linux/platform_device.h>
711 +#include <soc/bcm2835/raspberrypi-firmware.h>
712  #include "drm_fb_cma_helper.h"
713  
714 +#include "uapi/drm/vc4_drm.h"
715  #include "vc4_drv.h"
716  #include "vc4_regs.h"
717  
718 @@ -63,7 +65,7 @@ static const struct file_operations vc4_
719         .open = drm_open,
720         .release = drm_release,
721         .unlocked_ioctl = drm_ioctl,
722 -       .mmap = drm_gem_cma_mmap,
723 +       .mmap = vc4_mmap,
724         .poll = drm_poll,
725         .read = drm_read,
726  #ifdef CONFIG_COMPAT
727 @@ -73,16 +75,28 @@ static const struct file_operations vc4_
728  };
729  
730  static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
731 +       DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
732 +       DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
733 +       DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
734 +       DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
735 +       DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
736 +       DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
737  };
738  
739  static struct drm_driver vc4_drm_driver = {
740         .driver_features = (DRIVER_MODESET |
741                             DRIVER_ATOMIC |
742                             DRIVER_GEM |
743 +                           DRIVER_HAVE_IRQ |
744                             DRIVER_PRIME),
745         .lastclose = vc4_lastclose,
746         .preclose = vc4_drm_preclose,
747  
748 +       .irq_handler = vc4_irq,
749 +       .irq_preinstall = vc4_irq_preinstall,
750 +       .irq_postinstall = vc4_irq_postinstall,
751 +       .irq_uninstall = vc4_irq_uninstall,
752 +
753         .enable_vblank = vc4_enable_vblank,
754         .disable_vblank = vc4_disable_vblank,
755         .get_vblank_counter = drm_vblank_count,
756 @@ -92,18 +106,18 @@ static struct drm_driver vc4_drm_driver
757         .debugfs_cleanup = vc4_debugfs_cleanup,
758  #endif
759  
760 -       .gem_free_object = drm_gem_cma_free_object,
761 +       .gem_free_object = vc4_free_object,
762         .gem_vm_ops = &drm_gem_cma_vm_ops,
763  
764         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
765         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
766         .gem_prime_import = drm_gem_prime_import,
767 -       .gem_prime_export = drm_gem_prime_export,
768 +       .gem_prime_export = vc4_prime_export,
769         .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
770         .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
771 -       .gem_prime_vmap = drm_gem_cma_prime_vmap,
772 +       .gem_prime_vmap = vc4_prime_vmap,
773         .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
774 -       .gem_prime_mmap = drm_gem_cma_prime_mmap,
775 +       .gem_prime_mmap = vc4_prime_mmap,
776  
777         .dumb_create = vc4_dumb_create,
778         .dumb_map_offset = drm_gem_cma_dumb_map_offset,
779 @@ -113,6 +127,8 @@ static struct drm_driver vc4_drm_driver
780         .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
781         .fops = &vc4_drm_fops,
782  
783 +       .gem_obj_size = sizeof(struct vc4_bo),
784 +
785         .name = DRIVER_NAME,
786         .desc = DRIVER_DESC,
787         .date = DRIVER_DATE,
788 @@ -153,6 +169,7 @@ static int vc4_drm_bind(struct device *d
789         struct drm_device *drm;
790         struct drm_connector *connector;
791         struct vc4_dev *vc4;
792 +       struct device_node *firmware_node;
793         int ret = 0;
794  
795         dev->coherent_dma_mask = DMA_BIT_MASK(32);
796 @@ -161,6 +178,14 @@ static int vc4_drm_bind(struct device *d
797         if (!vc4)
798                 return -ENOMEM;
799  
800 +       firmware_node = of_parse_phandle(dev->of_node, "firmware", 0);
801 +       vc4->firmware = rpi_firmware_get(firmware_node);
802 +       if (!vc4->firmware) {
803 +               DRM_DEBUG("Failed to get Raspberry Pi firmware reference.\n");
804 +               return -EPROBE_DEFER;
805 +       }
806 +       of_node_put(firmware_node);
807 +
808         drm = drm_dev_alloc(&vc4_drm_driver, dev);
809         if (!drm)
810                 return -ENOMEM;
811 @@ -170,13 +195,17 @@ static int vc4_drm_bind(struct device *d
812  
813         drm_dev_set_unique(drm, dev_name(dev));
814  
815 +       vc4_bo_cache_init(drm);
816 +
817         drm_mode_config_init(drm);
818         if (ret)
819                 goto unref;
820  
821 +       vc4_gem_init(drm);
822 +
823         ret = component_bind_all(dev, drm);
824         if (ret)
825 -               goto unref;
826 +               goto gem_destroy;
827  
828         ret = drm_dev_register(drm, 0);
829         if (ret < 0)
830 @@ -200,8 +229,11 @@ unregister:
831         drm_dev_unregister(drm);
832  unbind_all:
833         component_unbind_all(dev, drm);
834 +gem_destroy:
835 +       vc4_gem_destroy(drm);
836  unref:
837         drm_dev_unref(drm);
838 +       vc4_bo_cache_destroy(drm);
839         return ret;
840  }
841  
842 @@ -228,6 +260,7 @@ static struct platform_driver *const com
843         &vc4_hdmi_driver,
844         &vc4_crtc_driver,
845         &vc4_hvs_driver,
846 +       &vc4_v3d_driver,
847  };
848  
849  static int vc4_platform_drm_probe(struct platform_device *pdev)
850 --- a/drivers/gpu/drm/vc4/vc4_drv.h
851 +++ b/drivers/gpu/drm/vc4/vc4_drv.h
852 @@ -15,8 +15,85 @@ struct vc4_dev {
853         struct vc4_hdmi *hdmi;
854         struct vc4_hvs *hvs;
855         struct vc4_crtc *crtc[3];
856 +       struct vc4_v3d *v3d;
857  
858         struct drm_fbdev_cma *fbdev;
859 +       struct rpi_firmware *firmware;
860 +
861 +       /* The kernel-space BO cache.  Tracks buffers that have been
862 +        * unreferenced by all other users (refcounts of 0!) but not
863 +        * yet freed, so we can do cheap allocations.
864 +        */
865 +       struct vc4_bo_cache {
866 +               /* Array of list heads for entries in the BO cache,
867 +                * based on number of pages, so we can do O(1) lookups
868 +                * in the cache when allocating.
869 +                */
870 +               struct list_head *size_list;
871 +               uint32_t size_list_size;
872 +
873 +               /* List of all BOs in the cache, ordered by age, so we
874 +                * can do O(1) lookups when trying to free old
875 +                * buffers.
876 +                */
877 +               struct list_head time_list;
878 +               struct work_struct time_work;
879 +               struct timer_list time_timer;
880 +       } bo_cache;
881 +
882 +       struct vc4_bo_stats {
883 +               u32 num_allocated;
884 +               u32 size_allocated;
885 +               u32 num_cached;
886 +               u32 size_cached;
887 +       } bo_stats;
888 +
889 +       /* Protects bo_cache and the BO stats. */
890 +       spinlock_t bo_lock;
891 +
892 +       /* Sequence number for the last job queued in job_list.
893 +        * Starts at 0 (no jobs emitted).
894 +        */
895 +       uint64_t emit_seqno;
896 +
897 +       /* Sequence number for the last completed job on the GPU.
898 +        * Starts at 0 (no jobs completed).
899 +        */
900 +       uint64_t finished_seqno;
901 +
902 +       /* List of all struct vc4_exec_info for jobs to be executed.
903 +        * The first job in the list is the one currently programmed
904 +        * into ct0ca/ct1ca for execution.
905 +        */
906 +       struct list_head job_list;
907 +       /* List of the finished vc4_exec_infos waiting to be freed by
908 +        * job_done_work.
909 +        */
910 +       struct list_head job_done_list;
911 +       spinlock_t job_lock;
912 +       wait_queue_head_t job_wait_queue;
913 +       struct work_struct job_done_work;
914 +
915 +       /* List of struct vc4_seqno_cb for callbacks to be made from a
916 +        * workqueue when the given seqno is passed.
917 +        */
918 +       struct list_head seqno_cb_list;
919 +
920 +       /* The binner overflow memory that's currently set up in
921 +        * BPOA/BPOS registers.  When overflow occurs and a new one is
922 +        * allocated, the previous one will be moved to
923 +        * vc4->current_exec's free list.
924 +        */
925 +       struct vc4_bo *overflow_mem;
926 +       struct work_struct overflow_mem_work;
927 +
928 +       struct {
929 +               uint32_t last_ct0ca, last_ct1ca;
930 +               struct timer_list timer;
931 +               struct work_struct reset_work;
932 +       } hangcheck;
933 +
934 +       struct semaphore async_modeset;
935  };
936  
937  static inline struct vc4_dev *
938 @@ -27,6 +104,25 @@ to_vc4_dev(struct drm_device *dev)
939  
940  struct vc4_bo {
941         struct drm_gem_cma_object base;
942 +
943 +       /* seqno of the last job to render to this BO. */
944 +       uint64_t seqno;
945 +
946 +       /* List entry for the BO's position in either
947 +        * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
948 +        */
949 +       struct list_head unref_head;
950 +
951 +       /* Time in jiffies when the BO was put in vc4->bo_cache. */
952 +       unsigned long free_time;
953 +
954 +       /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
955 +       struct list_head size_head;
956 +
957 +       /* Struct for shader validation state, if created by
958 +        * DRM_IOCTL_VC4_CREATE_SHADER_BO.
959 +        */
960 +       struct vc4_validated_shader_info *validated_shader;
961  };
962  
963  static inline struct vc4_bo *
964 @@ -35,6 +131,17 @@ to_vc4_bo(struct drm_gem_object *bo)
965         return (struct vc4_bo *)bo;
966  }
967  
968 +struct vc4_seqno_cb {
969 +       struct work_struct work;
970 +       uint64_t seqno;
971 +       void (*func)(struct vc4_seqno_cb *cb);
972 +};
973 +
974 +struct vc4_v3d {
975 +       struct platform_device *pdev;
976 +       void __iomem *regs;
977 +};
978 +
979  struct vc4_hvs {
980         struct platform_device *pdev;
981         void __iomem *regs;
982 @@ -72,9 +179,151 @@ to_vc4_encoder(struct drm_encoder *encod
983         return container_of(encoder, struct vc4_encoder, base);
984  }
985  
986 +#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
987 +#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
988  #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
989  #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
990  
991 +enum vc4_bo_mode {
992 +       VC4_MODE_UNDECIDED,
993 +       VC4_MODE_RENDER,
994 +       VC4_MODE_SHADER,
995 +};
996 +
997 +struct vc4_bo_exec_state {
998 +       struct drm_gem_cma_object *bo;
999 +       enum vc4_bo_mode mode;
1000 +};
1001 +
1002 +struct vc4_exec_info {
1003 +       /* Sequence number for this bin/render job. */
1004 +       uint64_t seqno;
1005 +
1006 +       /* Kernel-space copy of the ioctl arguments */
1007 +       struct drm_vc4_submit_cl *args;
1008 +
1009 +       /* This is the array of BOs that were looked up at the start of exec.
1010 +        * Command validation will use indices into this array.
1011 +        */
1012 +       struct vc4_bo_exec_state *bo;
1013 +       uint32_t bo_count;
1014 +
1015 +       /* Pointers for our position in vc4->job_list */
1016 +       struct list_head head;
1017 +
1018 +       /* List of other BOs used in the job that need to be released
1019 +        * once the job is complete.
1020 +        */
1021 +       struct list_head unref_list;
1022 +
1023 +       /* Current unvalidated indices into @bo loaded by the non-hardware
1024 +        * VC4_PACKET_GEM_HANDLES.
1025 +        */
1026 +       uint32_t bo_index[2];
1027 +
1028 +       /* This is the BO where we store the validated command lists, shader
1029 +        * records, and uniforms.
1030 +        */
1031 +       struct drm_gem_cma_object *exec_bo;
1032 +
1033 +       /**
1034 +        * This tracks the per-shader-record state (packet 64) that
1035 +        * determines the length of the shader record and the offset
1036 +        * it's expected to be found at.  It gets read in from the
1037 +        * command lists.
1038 +        */
1039 +       struct vc4_shader_state {
1040 +               uint8_t packet;
1041 +               uint32_t addr;
1042 +               /* Maximum vertex index referenced by any primitive using this
1043 +                * shader state.
1044 +                */
1045 +               uint32_t max_index;
1046 +       } *shader_state;
1047 +
1048 +       /** How many shader states the user declared they were using. */
1049 +       uint32_t shader_state_size;
1050 +       /** How many shader state records the validator has seen. */
1051 +       uint32_t shader_state_count;
1052 +
1053 +       bool found_tile_binning_mode_config_packet;
1054 +       bool found_start_tile_binning_packet;
1055 +       bool found_increment_semaphore_packet;
1056 +       uint8_t bin_tiles_x, bin_tiles_y;
1057 +       struct drm_gem_cma_object *tile_bo;
1058 +       uint32_t tile_alloc_offset;
1059 +
1060 +       /**
1061 +        * Computed addresses pointing into exec_bo where we start the
1062 +        * bin thread (ct0) and render thread (ct1).
1063 +        */
1064 +       uint32_t ct0ca, ct0ea;
1065 +       uint32_t ct1ca, ct1ea;
1066 +
1067 +       /* Pointers to the shader recs.  These paddr gets incremented as CL
1068 +        * packets are relocated in validate_gl_shader_state, and the vaddrs
1069 +        * (u and v) get incremented and size decremented as the shader recs
1070 +        * themselves are validated.
1071 +        */
1072 +       void *shader_rec_u;
1073 +       void *shader_rec_v;
1074 +       uint32_t shader_rec_p;
1075 +       uint32_t shader_rec_size;
1076 +
1077 +       /* Pointers to the uniform data.  These pointers are incremented, and
1078 +        * size decremented, as each batch of uniforms is uploaded.
1079 +        */
1080 +       void *uniforms_u;
1081 +       void *uniforms_v;
1082 +       uint32_t uniforms_p;
1083 +       uint32_t uniforms_size;
1084 +};
1085 +
1086 +static inline struct vc4_exec_info *
1087 +vc4_first_job(struct vc4_dev *vc4)
1088 +{
1089 +       if (list_empty(&vc4->job_list))
1090 +               return NULL;
1091 +       return list_first_entry(&vc4->job_list, struct vc4_exec_info, head);
1092 +}
1093 +
1094 +/**
1095 + * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
1096 + * setup parameters.
1097 + *
1098 + * This will be used at draw time to relocate the reference to the texture
1099 + * contents in p0, and validate that the offset combined with
1100 + * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
1101 + * Note that the hardware treats unprovided config parameters as 0, so not all
1102 + * of them need to be set up for every texure sample, and we'll store ~0 as
1103 + * the offset to mark the unused ones.
1104 + *
1105 + * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
1106 + * Setup") for definitions of the texture parameters.
1107 + */
1108 +struct vc4_texture_sample_info {
1109 +       bool is_direct;
1110 +       uint32_t p_offset[4];
1111 +};
1112 +
1113 +/**
1114 + * struct vc4_validated_shader_info - information about validated shaders that
1115 + * needs to be used from command list validation.
1116 + *
1117 + * For a given shader, each time a shader state record references it, we need
1118 + * to verify that the shader doesn't read more uniforms than the shader state
1119 + * record's uniform BO pointer can provide, and we need to apply relocations
1120 + * and validate the shader state record's uniforms that define the texture
1121 + * samples.
1122 + */
1123 +struct vc4_validated_shader_info
1124 +{
1125 +       uint32_t uniforms_size;
1126 +       uint32_t uniforms_src_size;
1127 +       uint32_t num_texture_samples;
1128 +       struct vc4_texture_sample_info *texture_samples;
1129 +};
1130 +
1131  /**
1132   * _wait_for - magic (register) wait macro
1133   *
1134 @@ -111,6 +360,18 @@ int vc4_dumb_create(struct drm_file *fil
1135                     struct drm_mode_create_dumb *args);
1136  struct dma_buf *vc4_prime_export(struct drm_device *dev,
1137                                  struct drm_gem_object *obj, int flags);
1138 +int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
1139 +                       struct drm_file *file_priv);
1140 +int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
1141 +                              struct drm_file *file_priv);
1142 +int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
1143 +                     struct drm_file *file_priv);
1144 +int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
1145 +int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
1146 +void *vc4_prime_vmap(struct drm_gem_object *obj);
1147 +void vc4_bo_cache_init(struct drm_device *dev);
1148 +void vc4_bo_cache_destroy(struct drm_device *dev);
1149 +int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
1150  
1151  /* vc4_crtc.c */
1152  extern struct platform_driver vc4_crtc_driver;
1153 @@ -126,10 +387,34 @@ void vc4_debugfs_cleanup(struct drm_mino
1154  /* vc4_drv.c */
1155  void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
1156  
1157 +/* vc4_gem.c */
1158 +void vc4_gem_init(struct drm_device *dev);
1159 +void vc4_gem_destroy(struct drm_device *dev);
1160 +int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1161 +                       struct drm_file *file_priv);
1162 +int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1163 +                        struct drm_file *file_priv);
1164 +int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1165 +                     struct drm_file *file_priv);
1166 +void vc4_submit_next_job(struct drm_device *dev);
1167 +int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
1168 +                      uint64_t timeout_ns, bool interruptible);
1169 +void vc4_job_handle_completed(struct vc4_dev *vc4);
1170 +int vc4_queue_seqno_cb(struct drm_device *dev,
1171 +                      struct vc4_seqno_cb *cb, uint64_t seqno,
1172 +                      void (*func)(struct vc4_seqno_cb *cb));
1173 +
1174  /* vc4_hdmi.c */
1175  extern struct platform_driver vc4_hdmi_driver;
1176  int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
1177  
1178 +/* vc4_irq.c */
1179 +irqreturn_t vc4_irq(int irq, void *arg);
1180 +void vc4_irq_preinstall(struct drm_device *dev);
1181 +int vc4_irq_postinstall(struct drm_device *dev);
1182 +void vc4_irq_uninstall(struct drm_device *dev);
1183 +void vc4_irq_reset(struct drm_device *dev);
1184 +
1185  /* vc4_hvs.c */
1186  extern struct platform_driver vc4_hvs_driver;
1187  void vc4_hvs_dump_state(struct drm_device *dev);
1188 @@ -143,3 +428,35 @@ struct drm_plane *vc4_plane_init(struct
1189                                  enum drm_plane_type type);
1190  u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
1191  u32 vc4_plane_dlist_size(struct drm_plane_state *state);
1192 +void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb);
1193 +
1194 +/* vc4_v3d.c */
1195 +extern struct platform_driver vc4_v3d_driver;
1196 +int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
1197 +int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
1198 +int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
1199 +
1200 +/* vc4_validate.c */
1201 +int
1202 +vc4_validate_bin_cl(struct drm_device *dev,
1203 +                   void *validated,
1204 +                   void *unvalidated,
1205 +                   struct vc4_exec_info *exec);
1206 +
1207 +int
1208 +vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
1209 +
1210 +struct vc4_validated_shader_info *
1211 +vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
1212 +
1213 +bool vc4_use_bo(struct vc4_exec_info *exec,
1214 +               uint32_t hindex,
1215 +               enum vc4_bo_mode mode,
1216 +               struct drm_gem_cma_object **obj);
1217 +
1218 +int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
1219 +
1220 +bool vc4_check_tex_size(struct vc4_exec_info *exec,
1221 +                       struct drm_gem_cma_object *fbo,
1222 +                       uint32_t offset, uint8_t tiling_format,
1223 +                       uint32_t width, uint32_t height, uint8_t cpp);
1224 --- /dev/null
1225 +++ b/drivers/gpu/drm/vc4/vc4_gem.c
1226 @@ -0,0 +1,686 @@
1227 +/*
1228 + * Copyright Â© 2014 Broadcom
1229 + *
1230 + * Permission is hereby granted, free of charge, to any person obtaining a
1231 + * copy of this software and associated documentation files (the "Software"),
1232 + * to deal in the Software without restriction, including without limitation
1233 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1234 + * and/or sell copies of the Software, and to permit persons to whom the
1235 + * Software is furnished to do so, subject to the following conditions:
1236 + *
1237 + * The above copyright notice and this permission notice (including the next
1238 + * paragraph) shall be included in all copies or substantial portions of the
1239 + * Software.
1240 + *
1241 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1242 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1243 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1244 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1245 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1246 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
1247 + * IN THE SOFTWARE.
1248 + */
1249 +
1250 +#include <linux/module.h>
1251 +#include <linux/platform_device.h>
1252 +#include <linux/device.h>
1253 +#include <linux/io.h>
1254 +
1255 +#include "uapi/drm/vc4_drm.h"
1256 +#include "vc4_drv.h"
1257 +#include "vc4_regs.h"
1258 +#include "vc4_trace.h"
1259 +
1260 +static void
1261 +vc4_queue_hangcheck(struct drm_device *dev)
1262 +{
1263 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1264 +
1265 +       mod_timer(&vc4->hangcheck.timer,
1266 +                 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
1267 +}
1268 +
1269 +static void
1270 +vc4_reset(struct drm_device *dev)
1271 +{
1272 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1273 +
1274 +       DRM_INFO("Resetting GPU.\n");
1275 +       vc4_v3d_set_power(vc4, false);
1276 +       vc4_v3d_set_power(vc4, true);
1277 +
1278 +       vc4_irq_reset(dev);
1279 +
1280 +       /* Rearm the hangcheck -- another job might have been waiting
1281 +        * for our hung one to get kicked off, and vc4_irq_reset()
1282 +        * would have started it.
1283 +        */
1284 +       vc4_queue_hangcheck(dev);
1285 +}
1286 +
1287 +static void
1288 +vc4_reset_work(struct work_struct *work)
1289 +{
1290 +       struct vc4_dev *vc4 =
1291 +               container_of(work, struct vc4_dev, hangcheck.reset_work);
1292 +
1293 +       vc4_reset(vc4->dev);
1294 +}
1295 +
1296 +static void
1297 +vc4_hangcheck_elapsed(unsigned long data)
1298 +{
1299 +       struct drm_device *dev = (struct drm_device *)data;
1300 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1301 +       uint32_t ct0ca, ct1ca;
1302 +
1303 +       /* If idle, we can stop watching for hangs. */
1304 +       if (list_empty(&vc4->job_list))
1305 +               return;
1306 +
1307 +       ct0ca = V3D_READ(V3D_CTNCA(0));
1308 +       ct1ca = V3D_READ(V3D_CTNCA(1));
1309 +
1310 +       /* If we've made any progress in execution, rearm the timer
1311 +        * and wait.
1312 +        */
1313 +       if (ct0ca != vc4->hangcheck.last_ct0ca ||
1314 +           ct1ca != vc4->hangcheck.last_ct1ca) {
1315 +               vc4->hangcheck.last_ct0ca = ct0ca;
1316 +               vc4->hangcheck.last_ct1ca = ct1ca;
1317 +               vc4_queue_hangcheck(dev);
1318 +               return;
1319 +       }
1320 +
1321 +       /* We've gone too long with no progress, reset.  This has to
1322 +        * be done from a work struct, since resetting can sleep and
1323 +        * this timer hook isn't allowed to.
1324 +        */
1325 +       schedule_work(&vc4->hangcheck.reset_work);
1326 +}
1327 +
1328 +static void
1329 +submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
1330 +{
1331 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1332 +
1333 +       /* Stop any existing thread and set state to "stopped at halt" */
1334 +       V3D_WRITE(V3D_CTNCS(thread), V3D_CTRUN);
1335 +       barrier();
1336 +
1337 +       V3D_WRITE(V3D_CTNCA(thread), start);
1338 +       barrier();
1339 +
1340 +       /* Set the end address of the control list.  Writing this
1341 +        * register is what starts the job.
1342 +        */
1343 +       V3D_WRITE(V3D_CTNEA(thread), end);
1344 +       barrier();
1345 +}
1346 +
1347 +int
1348 +vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
1349 +                  bool interruptible)
1350 +{
1351 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1352 +       int ret = 0;
1353 +       unsigned long timeout_expire;
1354 +       DEFINE_WAIT(wait);
1355 +
1356 +       if (vc4->finished_seqno >= seqno)
1357 +               return 0;
1358 +
1359 +       if (timeout_ns == 0)
1360 +               return -ETIME;
1361 +
1362 +       timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
1363 +
1364 +       trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
1365 +       for (;;) {
1366 +               prepare_to_wait(&vc4->job_wait_queue, &wait,
1367 +                               interruptible ? TASK_INTERRUPTIBLE :
1368 +                               TASK_UNINTERRUPTIBLE);
1369 +
1370 +               if (interruptible && signal_pending(current)) {
1371 +                       ret = -ERESTARTSYS;
1372 +                       break;
1373 +               }
1374 +
1375 +               if (vc4->finished_seqno >= seqno)
1376 +                       break;
1377 +
1378 +               if (timeout_ns != ~0ull) {
1379 +                       if (time_after_eq(jiffies, timeout_expire)) {
1380 +                               ret = -ETIME;
1381 +                               break;
1382 +                       }
1383 +                       schedule_timeout(timeout_expire - jiffies);
1384 +               } else {
1385 +                       schedule();
1386 +               }
1387 +       }
1388 +
1389 +       finish_wait(&vc4->job_wait_queue, &wait);
1390 +       trace_vc4_wait_for_seqno_end(dev, seqno);
1391 +
1392 +       if (ret && ret != -ERESTARTSYS) {
1393 +               DRM_ERROR("timeout waiting for render thread idle\n");
1394 +               return ret;
1395 +       }
1396 +
1397 +       return 0;
1398 +}
1399 +
1400 +static void
1401 +vc4_flush_caches(struct drm_device *dev)
1402 +{
1403 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1404 +
1405 +       /* Flush the GPU L2 caches.  These caches sit on top of system
1406 +        * L3 (the 128kb or so shared with the CPU), and are
1407 +        * non-allocating in the L3.
1408 +        */
1409 +       V3D_WRITE(V3D_L2CACTL,
1410 +                 V3D_L2CACTL_L2CCLR);
1411 +
1412 +       V3D_WRITE(V3D_SLCACTL,
1413 +                 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
1414 +                 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
1415 +                 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
1416 +                 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
1417 +}
1418 +
1419 +/* Sets the registers for the next job to be actually be executed in
1420 + * the hardware.
1421 + *
1422 + * The job_lock should be held during this.
1423 + */
1424 +void
1425 +vc4_submit_next_job(struct drm_device *dev)
1426 +{
1427 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1428 +       struct vc4_exec_info *exec = vc4_first_job(vc4);
1429 +
1430 +       if (!exec)
1431 +               return;
1432 +
1433 +       vc4_flush_caches(dev);
1434 +
1435 +       /* Disable the binner's pre-loaded overflow memory address */
1436 +       V3D_WRITE(V3D_BPOA, 0);
1437 +       V3D_WRITE(V3D_BPOS, 0);
1438 +
1439 +       if (exec->ct0ca != exec->ct0ea)
1440 +               submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
1441 +       submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
1442 +}
1443 +
1444 +static void
1445 +vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
1446 +{
1447 +       struct vc4_bo *bo;
1448 +       unsigned i;
1449 +
1450 +       for (i = 0; i < exec->bo_count; i++) {
1451 +               bo = to_vc4_bo(&exec->bo[i].bo->base);
1452 +               bo->seqno = seqno;
1453 +       }
1454 +
1455 +       list_for_each_entry(bo, &exec->unref_list, unref_head) {
1456 +               bo->seqno = seqno;
1457 +       }
1458 +}
1459 +
1460 +/* Queues a struct vc4_exec_info for execution.  If no job is
1461 + * currently executing, then submits it.
1462 + *
1463 + * Unlike most GPUs, our hardware only handles one command list at a
1464 + * time.  To queue multiple jobs at once, we'd need to edit the
1465 + * previous command list to have a jump to the new one at the end, and
1466 + * then bump the end address.  That's a change for a later date,
1467 + * though.
1468 + */
1469 +static void
1470 +vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
1471 +{
1472 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1473 +       uint64_t seqno = ++vc4->emit_seqno;
1474 +       unsigned long irqflags;
1475 +
1476 +       exec->seqno = seqno;
1477 +       vc4_update_bo_seqnos(exec, seqno);
1478 +
1479 +       spin_lock_irqsave(&vc4->job_lock, irqflags);
1480 +       list_add_tail(&exec->head, &vc4->job_list);
1481 +
1482 +       /* If no job was executing, kick ours off.  Otherwise, it'll
1483 +        * get started when the previous job's frame done interrupt
1484 +        * occurs.
1485 +        */
1486 +       if (vc4_first_job(vc4) == exec) {
1487 +               vc4_submit_next_job(dev);
1488 +               vc4_queue_hangcheck(dev);
1489 +       }
1490 +
1491 +       spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1492 +}
1493 +
1494 +/**
1495 + * Looks up a bunch of GEM handles for BOs and stores the array for
1496 + * use in the command validator that actually writes relocated
1497 + * addresses pointing to them.
1498 + */
1499 +static int
1500 +vc4_cl_lookup_bos(struct drm_device *dev,
1501 +                 struct drm_file *file_priv,
1502 +                 struct vc4_exec_info *exec)
1503 +{
1504 +       struct drm_vc4_submit_cl *args = exec->args;
1505 +       uint32_t *handles;
1506 +       int ret = 0;
1507 +       int i;
1508 +
1509 +       exec->bo_count = args->bo_handle_count;
1510 +
1511 +       if (!exec->bo_count) {
1512 +               /* See comment on bo_index for why we have to check
1513 +                * this.
1514 +                */
1515 +               DRM_ERROR("Rendering requires BOs to validate\n");
1516 +               return -EINVAL;
1517 +       }
1518 +
1519 +       exec->bo = kcalloc(exec->bo_count, sizeof(struct vc4_bo_exec_state),
1520 +                          GFP_KERNEL);
1521 +       if (!exec->bo) {
1522 +               DRM_ERROR("Failed to allocate validated BO pointers\n");
1523 +               return -ENOMEM;
1524 +       }
1525 +
1526 +       handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
1527 +       if (!handles) {
1528 +               DRM_ERROR("Failed to allocate incoming GEM handles\n");
1529 +               goto fail;
1530 +       }
1531 +
1532 +       ret = copy_from_user(handles,
1533 +                            (void __user *)(uintptr_t)args->bo_handles,
1534 +                            exec->bo_count * sizeof(uint32_t));
1535 +       if (ret) {
1536 +               DRM_ERROR("Failed to copy in GEM handles\n");
1537 +               goto fail;
1538 +       }
1539 +
1540 +       spin_lock(&file_priv->table_lock);
1541 +       for (i = 0; i < exec->bo_count; i++) {
1542 +               struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
1543 +                                                    handles[i]);
1544 +               if (!bo) {
1545 +                       DRM_ERROR("Failed to look up GEM BO %d: %d\n",
1546 +                                 i, handles[i]);
1547 +                       ret = -EINVAL;
1548 +                       spin_unlock(&file_priv->table_lock);
1549 +                       goto fail;
1550 +               }
1551 +               drm_gem_object_reference(bo);
1552 +               exec->bo[i].bo = (struct drm_gem_cma_object *)bo;
1553 +       }
1554 +       spin_unlock(&file_priv->table_lock);
1555 +
1556 +fail:
1557 +       kfree(handles);
1558 +       return 0;
1559 +}
1560 +
1561 +static int
1562 +vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
1563 +{
1564 +       struct drm_vc4_submit_cl *args = exec->args;
1565 +       void *temp = NULL;
1566 +       void *bin;
1567 +       int ret = 0;
1568 +       uint32_t bin_offset = 0;
1569 +       uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
1570 +                                            16);
1571 +       uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
1572 +       uint32_t exec_size = uniforms_offset + args->uniforms_size;
1573 +       uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
1574 +                                         args->shader_rec_count);
1575 +       struct vc4_bo *bo;
1576 +
1577 +       if (uniforms_offset < shader_rec_offset ||
1578 +           exec_size < uniforms_offset ||
1579 +           args->shader_rec_count >= (UINT_MAX /
1580 +                                         sizeof(struct vc4_shader_state)) ||
1581 +           temp_size < exec_size) {
1582 +               DRM_ERROR("overflow in exec arguments\n");
1583 +               goto fail;
1584 +       }
1585 +
1586 +       /* Allocate space where we'll store the copied in user command lists
1587 +        * and shader records.
1588 +        *
1589 +        * We don't just copy directly into the BOs because we need to
1590 +        * read the contents back for validation, and I think the
1591 +        * bo->vaddr is uncached access.
1592 +        */
1593 +       temp = kmalloc(temp_size, GFP_KERNEL);
1594 +       if (!temp) {
1595 +               DRM_ERROR("Failed to allocate storage for copying "
1596 +                         "in bin/render CLs.\n");
1597 +               ret = -ENOMEM;
1598 +               goto fail;
1599 +       }
1600 +       bin = temp + bin_offset;
1601 +       exec->shader_rec_u = temp + shader_rec_offset;
1602 +       exec->uniforms_u = temp + uniforms_offset;
1603 +       exec->shader_state = temp + exec_size;
1604 +       exec->shader_state_size = args->shader_rec_count;
1605 +
1606 +       ret = copy_from_user(bin,
1607 +                            (void __user *)(uintptr_t)args->bin_cl,
1608 +                            args->bin_cl_size);
1609 +       if (ret) {
1610 +               DRM_ERROR("Failed to copy in bin cl\n");
1611 +               goto fail;
1612 +       }
1613 +
1614 +       ret = copy_from_user(exec->shader_rec_u,
1615 +                            (void __user *)(uintptr_t)args->shader_rec,
1616 +                            args->shader_rec_size);
1617 +       if (ret) {
1618 +               DRM_ERROR("Failed to copy in shader recs\n");
1619 +               goto fail;
1620 +       }
1621 +
1622 +       ret = copy_from_user(exec->uniforms_u,
1623 +                            (void __user *)(uintptr_t)args->uniforms,
1624 +                            args->uniforms_size);
1625 +       if (ret) {
1626 +               DRM_ERROR("Failed to copy in uniforms cl\n");
1627 +               goto fail;
1628 +       }
1629 +
1630 +       bo = vc4_bo_create(dev, exec_size);
1631 +       if (!bo) {
1632 +               DRM_ERROR("Couldn't allocate BO for binning\n");
1633 +               ret = PTR_ERR(exec->exec_bo);
1634 +               goto fail;
1635 +       }
1636 +       exec->exec_bo = &bo->base;
1637 +
1638 +       list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
1639 +                     &exec->unref_list);
1640 +
1641 +       exec->ct0ca = exec->exec_bo->paddr + bin_offset;
1642 +
1643 +       exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
1644 +       exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
1645 +       exec->shader_rec_size = args->shader_rec_size;
1646 +
1647 +       exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
1648 +       exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
1649 +       exec->uniforms_size = args->uniforms_size;
1650 +
1651 +       ret = vc4_validate_bin_cl(dev,
1652 +                                 exec->exec_bo->vaddr + bin_offset,
1653 +                                 bin,
1654 +                                 exec);
1655 +       if (ret)
1656 +               goto fail;
1657 +
1658 +       ret = vc4_validate_shader_recs(dev, exec);
1659 +
1660 +fail:
1661 +       kfree(temp);
1662 +       return ret;
1663 +}
1664 +
1665 +static void
1666 +vc4_complete_exec(struct vc4_exec_info *exec)
1667 +{
1668 +       unsigned i;
1669 +
1670 +       if (exec->bo) {
1671 +               for (i = 0; i < exec->bo_count; i++)
1672 +                       drm_gem_object_unreference(&exec->bo[i].bo->base);
1673 +               kfree(exec->bo);
1674 +       }
1675 +
1676 +       while (!list_empty(&exec->unref_list)) {
1677 +               struct vc4_bo *bo = list_first_entry(&exec->unref_list,
1678 +                                                    struct vc4_bo, unref_head);
1679 +               list_del(&bo->unref_head);
1680 +               drm_gem_object_unreference(&bo->base.base);
1681 +       }
1682 +
1683 +       kfree(exec);
1684 +}
1685 +
1686 +void
1687 +vc4_job_handle_completed(struct vc4_dev *vc4)
1688 +{
1689 +       unsigned long irqflags;
1690 +       struct vc4_seqno_cb *cb, *cb_temp;
1691 +
1692 +       spin_lock_irqsave(&vc4->job_lock, irqflags);
1693 +       while (!list_empty(&vc4->job_done_list)) {
1694 +               struct vc4_exec_info *exec =
1695 +                       list_first_entry(&vc4->job_done_list,
1696 +                                        struct vc4_exec_info, head);
1697 +               list_del(&exec->head);
1698 +
1699 +               spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1700 +               vc4_complete_exec(exec);
1701 +               spin_lock_irqsave(&vc4->job_lock, irqflags);
1702 +       }
1703 +       spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1704 +
1705 +       list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
1706 +               if (cb->seqno <= vc4->finished_seqno) {
1707 +                       list_del_init(&cb->work.entry);
1708 +                       schedule_work(&cb->work);
1709 +               }
1710 +       }
1711 +}
1712 +
1713 +static void vc4_seqno_cb_work(struct work_struct *work)
1714 +{
1715 +       struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1716 +       cb->func(cb);
1717 +}
1718 +
1719 +int vc4_queue_seqno_cb(struct drm_device *dev,
1720 +                      struct vc4_seqno_cb *cb, uint64_t seqno,
1721 +                      void (*func)(struct vc4_seqno_cb *cb))
1722 +{
1723 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1724 +       int ret = 0;
1725 +
1726 +       cb->func = func;
1727 +       INIT_WORK(&cb->work, vc4_seqno_cb_work);
1728 +
1729 +       mutex_lock(&dev->struct_mutex);
1730 +       if (seqno > vc4->finished_seqno) {
1731 +               cb->seqno = seqno;
1732 +               list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1733 +       } else {
1734 +               schedule_work(&cb->work);
1735 +       }
1736 +       mutex_unlock(&dev->struct_mutex);
1737 +
1738 +       return ret;
1739 +}
1740 +
1741 +/* Scheduled when any job has been completed, this walks the list of
1742 + * jobs that had completed and unrefs their BOs and frees their exec
1743 + * structs.
1744 + */
1745 +static void
1746 +vc4_job_done_work(struct work_struct *work)
1747 +{
1748 +       struct vc4_dev *vc4 =
1749 +               container_of(work, struct vc4_dev, job_done_work);
1750 +       struct drm_device *dev = vc4->dev;
1751 +
1752 +       /* Need the struct lock for drm_gem_object_unreference(). */
1753 +       mutex_lock(&dev->struct_mutex);
1754 +       vc4_job_handle_completed(vc4);
1755 +       mutex_unlock(&dev->struct_mutex);
1756 +}
1757 +
1758 +static int
1759 +vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1760 +                               uint64_t seqno,
1761 +                               uint64_t *timeout_ns)
1762 +{
1763 +       unsigned long start = jiffies;
1764 +       int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1765 +
1766 +       if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1767 +               uint64_t delta = jiffies_to_nsecs(jiffies - start);
1768 +               if (*timeout_ns >= delta)
1769 +                       *timeout_ns -= delta;
1770 +       }
1771 +
1772 +       return ret;
1773 +}
1774 +
1775 +int
1776 +vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1777 +                    struct drm_file *file_priv)
1778 +{
1779 +       struct drm_vc4_wait_seqno *args = data;
1780 +
1781 +       return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1782 +                                              &args->timeout_ns);
1783 +}
1784 +
1785 +int
1786 +vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1787 +                 struct drm_file *file_priv)
1788 +{
1789 +       int ret;
1790 +       struct drm_vc4_wait_bo *args = data;
1791 +       struct drm_gem_object *gem_obj;
1792 +       struct vc4_bo *bo;
1793 +
1794 +       gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1795 +       if (!gem_obj) {
1796 +               DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
1797 +               return -EINVAL;
1798 +       }
1799 +       bo = to_vc4_bo(gem_obj);
1800 +
1801 +       ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno, &args->timeout_ns);
1802 +
1803 +       drm_gem_object_unreference(gem_obj);
1804 +       return ret;
1805 +}
1806 +
1807 +/**
1808 + * Submits a command list to the VC4.
1809 + *
1810 + * This is what is called batchbuffer emitting on other hardware.
1811 + */
1812 +int
1813 +vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1814 +                   struct drm_file *file_priv)
1815 +{
1816 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1817 +       struct drm_vc4_submit_cl *args = data;
1818 +       struct vc4_exec_info *exec;
1819 +       int ret;
1820 +
1821 +       if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
1822 +               DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
1823 +               return -EINVAL;
1824 +       }
1825 +
1826 +       exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1827 +       if (!exec) {
1828 +               DRM_ERROR("malloc failure on exec struct\n");
1829 +               return -ENOMEM;
1830 +       }
1831 +
1832 +       exec->args = args;
1833 +       INIT_LIST_HEAD(&exec->unref_list);
1834 +
1835 +       mutex_lock(&dev->struct_mutex);
1836 +
1837 +       ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1838 +       if (ret)
1839 +               goto fail;
1840 +
1841 +       if (exec->args->bin_cl_size != 0) {
1842 +               ret = vc4_get_bcl(dev, exec);
1843 +               if (ret)
1844 +                       goto fail;
1845 +       } else {
1846 +               exec->ct0ca = exec->ct0ea = 0;
1847 +       }
1848 +
1849 +       ret = vc4_get_rcl(dev, exec);
1850 +       if (ret)
1851 +               goto fail;
1852 +
1853 +       /* Clear this out of the struct we'll be putting in the queue,
1854 +        * since it's part of our stack.
1855 +        */
1856 +       exec->args = NULL;
1857 +
1858 +       vc4_queue_submit(dev, exec);
1859 +
1860 +       /* Return the seqno for our job. */
1861 +       args->seqno = vc4->emit_seqno;
1862 +
1863 +       mutex_unlock(&dev->struct_mutex);
1864 +
1865 +       return 0;
1866 +
1867 +fail:
1868 +       vc4_complete_exec(exec);
1869 +
1870 +       mutex_unlock(&dev->struct_mutex);
1871 +
1872 +       return ret;
1873 +}
1874 +
1875 +void
1876 +vc4_gem_init(struct drm_device *dev)
1877 +{
1878 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1879 +
1880 +       INIT_LIST_HEAD(&vc4->job_list);
1881 +       INIT_LIST_HEAD(&vc4->job_done_list);
1882 +       INIT_LIST_HEAD(&vc4->seqno_cb_list);
1883 +       spin_lock_init(&vc4->job_lock);
1884 +
1885 +       INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1886 +       setup_timer(&vc4->hangcheck.timer,
1887 +                   vc4_hangcheck_elapsed,
1888 +                   (unsigned long) dev);
1889 +
1890 +       INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1891 +}
1892 +
1893 +void
1894 +vc4_gem_destroy(struct drm_device *dev)
1895 +{
1896 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1897 +
1898 +       /* Waiting for exec to finish would need to be done before
1899 +        * unregistering V3D.
1900 +        */
1901 +       WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1902 +
1903 +       /* V3D should already have disabled its interrupt and cleared
1904 +        * the overflow allocation registers.  Now free the object.
1905 +        */
1906 +       if (vc4->overflow_mem) {
1907 +               drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
1908 +               vc4->overflow_mem = NULL;
1909 +       }
1910 +
1911 +       vc4_bo_cache_destroy(dev);
1912 +}
1913 --- /dev/null
1914 +++ b/drivers/gpu/drm/vc4/vc4_irq.c
1915 @@ -0,0 +1,211 @@
1916 +/*
1917 + * Copyright Â© 2014 Broadcom
1918 + *
1919 + * Permission is hereby granted, free of charge, to any person obtaining a
1920 + * copy of this software and associated documentation files (the "Software"),
1921 + * to deal in the Software without restriction, including without limitation
1922 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1923 + * and/or sell copies of the Software, and to permit persons to whom the
1924 + * Software is furnished to do so, subject to the following conditions:
1925 + *
1926 + * The above copyright notice and this permission notice (including the next
1927 + * paragraph) shall be included in all copies or substantial portions of the
1928 + * Software.
1929 + *
1930 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1931 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1932 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1933 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1934 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1935 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
1936 + * IN THE SOFTWARE.
1937 + */
1938 +
1939 +/** DOC: Interrupt management for the V3D engine.
1940 + *
1941 + * We have an interrupt status register (V3D_INTCTL) which reports
1942 + * interrupts, and where writing 1 bits clears those interrupts.
1943 + * There are also a pair of interrupt registers
1944 + * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or
1945 + * disables that specific interrupt, and 0s written are ignored
1946 + * (reading either one returns the set of enabled interrupts).
1947 + *
1948 + * When we take a render frame interrupt, we need to wake the
1949 + * processes waiting for some frame to be done, and get the next frame
1950 + * submitted ASAP (so the hardware doesn't sit idle when there's work
1951 + * to do).
1952 + *
1953 + * When we take the binner out of memory interrupt, we need to
1954 + * allocate some new memory and pass it to the binner so that the
1955 + * current job can make progress.
1956 + */
1957 +
1958 +#include "vc4_drv.h"
1959 +#include "vc4_regs.h"
1960 +
1961 +#define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \
1962 +                        V3D_INT_FRDONE)
1963 +
1964 +DECLARE_WAIT_QUEUE_HEAD(render_wait);
1965 +
1966 +static void
1967 +vc4_overflow_mem_work(struct work_struct *work)
1968 +{
1969 +       struct vc4_dev *vc4 =
1970 +               container_of(work, struct vc4_dev, overflow_mem_work);
1971 +       struct drm_device *dev = vc4->dev;
1972 +       struct vc4_bo *bo;
1973 +
1974 +       bo = vc4_bo_create(dev, 256 * 1024);
1975 +       if (!bo) {
1976 +               DRM_ERROR("Couldn't allocate binner overflow mem\n");
1977 +               return;
1978 +       }
1979 +
1980 +       /* If there's a job executing currently, then our previous
1981 +        * overflow allocation is getting used in that job and we need
1982 +        * to queue it to be released when the job is done.  But if no
1983 +        * job is executing at all, then we can free the old overflow
1984 +        * object direcctly.
1985 +        *
1986 +        * No lock necessary for this pointer since we're the only
1987 +        * ones that update the pointer, and our workqueue won't
1988 +        * reenter.
1989 +        */
1990 +       if (vc4->overflow_mem) {
1991 +               struct vc4_exec_info *current_exec;
1992 +               unsigned long irqflags;
1993 +
1994 +               spin_lock_irqsave(&vc4->job_lock, irqflags);
1995 +               current_exec = vc4_first_job(vc4);
1996 +               if (current_exec) {
1997 +                       vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
1998 +                       list_add_tail(&vc4->overflow_mem->unref_head,
1999 +                                     &current_exec->unref_list);
2000 +                       vc4->overflow_mem = NULL;
2001 +               }
2002 +               spin_unlock_irqrestore(&vc4->job_lock, irqflags);
2003 +       }
2004 +
2005 +       if (vc4->overflow_mem) {
2006 +               drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
2007 +       }
2008 +       vc4->overflow_mem = bo;
2009 +
2010 +       V3D_WRITE(V3D_BPOA, bo->base.paddr);
2011 +       V3D_WRITE(V3D_BPOS, bo->base.base.size);
2012 +       V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
2013 +       V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
2014 +}
2015 +
2016 +static void
2017 +vc4_irq_finish_job(struct drm_device *dev)
2018 +{
2019 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2020 +       struct vc4_exec_info *exec = vc4_first_job(vc4);
2021 +
2022 +       if (!exec)
2023 +               return;
2024 +
2025 +       vc4->finished_seqno++;
2026 +       list_move_tail(&exec->head, &vc4->job_done_list);
2027 +       vc4_submit_next_job(dev);
2028 +
2029 +       wake_up_all(&vc4->job_wait_queue);
2030 +       schedule_work(&vc4->job_done_work);
2031 +}
2032 +
2033 +irqreturn_t
2034 +vc4_irq(int irq, void *arg)
2035 +{
2036 +       struct drm_device *dev = arg;
2037 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2038 +       uint32_t intctl;
2039 +       irqreturn_t status = IRQ_NONE;
2040 +
2041 +       barrier();
2042 +       intctl = V3D_READ(V3D_INTCTL);
2043 +
2044 +       /* Acknowledge the interrupts we're handling here. The render
2045 +        * frame done interrupt will be cleared, while OUTOMEM will
2046 +        * stay high until the underlying cause is cleared.
2047 +        */
2048 +       V3D_WRITE(V3D_INTCTL, intctl);
2049 +
2050 +       if (intctl & V3D_INT_OUTOMEM) {
2051 +               /* Disable OUTOMEM until the work is done. */
2052 +               V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM);
2053 +               schedule_work(&vc4->overflow_mem_work);
2054 +               status = IRQ_HANDLED;
2055 +       }
2056 +
2057 +       if (intctl & V3D_INT_FRDONE) {
2058 +               spin_lock(&vc4->job_lock);
2059 +               vc4_irq_finish_job(dev);
2060 +               spin_unlock(&vc4->job_lock);
2061 +               status = IRQ_HANDLED;
2062 +       }
2063 +
2064 +       return status;
2065 +}
2066 +
2067 +void
2068 +vc4_irq_preinstall(struct drm_device *dev)
2069 +{
2070 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2071 +
2072 +       init_waitqueue_head(&vc4->job_wait_queue);
2073 +       INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work);
2074 +
2075 +       /* Clear any pending interrupts someone might have left around
2076 +        * for us.
2077 +        */
2078 +       V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
2079 +}
2080 +
2081 +int
2082 +vc4_irq_postinstall(struct drm_device *dev)
2083 +{
2084 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2085 +
2086 +       /* Enable both the render done and out of memory interrupts. */
2087 +       V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
2088 +
2089 +       return 0;
2090 +}
2091 +
2092 +void
2093 +vc4_irq_uninstall(struct drm_device *dev)
2094 +{
2095 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2096 +
2097 +       /* Disable sending interrupts for our driver's IRQs. */
2098 +       V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS);
2099 +
2100 +       /* Clear any pending interrupts we might have left. */
2101 +       V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
2102 +
2103 +       cancel_work_sync(&vc4->overflow_mem_work);
2104 +}
2105 +
2106 +/** Reinitializes interrupt registers when a GPU reset is performed. */
2107 +void vc4_irq_reset(struct drm_device *dev)
2108 +{
2109 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2110 +       unsigned long irqflags;
2111 +
2112 +       /* Acknowledge any stale IRQs. */
2113 +       V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
2114 +
2115 +       /*
2116 +        * Turn all our interrupts on.  Binner out of memory is the
2117 +        * only one we expect to trigger at this point, since we've
2118 +        * just come from poweron and haven't supplied any overflow
2119 +        * memory yet.
2120 +        */
2121 +       V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
2122 +
2123 +       spin_lock_irqsave(&vc4->job_lock, irqflags);
2124 +       vc4_irq_finish_job(dev);
2125 +       spin_unlock_irqrestore(&vc4->job_lock, irqflags);
2126 +}
2127 --- a/drivers/gpu/drm/vc4/vc4_kms.c
2128 +++ b/drivers/gpu/drm/vc4/vc4_kms.c
2129 @@ -15,6 +15,7 @@
2130   */
2131  
2132  #include "drm_crtc.h"
2133 +#include "drm_atomic.h"
2134  #include "drm_atomic_helper.h"
2135  #include "drm_crtc_helper.h"
2136  #include "drm_plane_helper.h"
2137 @@ -29,10 +30,151 @@ static void vc4_output_poll_changed(stru
2138                 drm_fbdev_cma_hotplug_event(vc4->fbdev);
2139  }
2140  
2141 +struct vc4_commit {
2142 +       struct drm_device *dev;
2143 +       struct drm_atomic_state *state;
2144 +       struct vc4_seqno_cb cb;
2145 +};
2146 +
2147 +static void
2148 +vc4_atomic_complete_commit(struct vc4_commit *c)
2149 +{
2150 +       struct drm_atomic_state *state = c->state;
2151 +       struct drm_device *dev = state->dev;
2152 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2153 +
2154 +       drm_atomic_helper_commit_modeset_disables(dev, state);
2155 +
2156 +       drm_atomic_helper_commit_planes(dev, state);
2157 +
2158 +       drm_atomic_helper_commit_modeset_enables(dev, state);
2159 +
2160 +       drm_atomic_helper_wait_for_vblanks(dev, state);
2161 +
2162 +       drm_atomic_helper_cleanup_planes(dev, state);
2163 +
2164 +       drm_atomic_state_free(state);
2165 +
2166 +       up(&vc4->async_modeset);
2167 +
2168 +       kfree(c);
2169 +}
2170 +
2171 +static void
2172 +vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
2173 +{
2174 +       struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
2175 +
2176 +       vc4_atomic_complete_commit(c);
2177 +}
2178 +
2179 +static struct vc4_commit *commit_init(struct drm_atomic_state *state)
2180 +{
2181 +       struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
2182 +
2183 +       if (!c)
2184 +               return NULL;
2185 +       c->dev = state->dev;
2186 +       c->state = state;
2187 +
2188 +       return c;
2189 +}
2190 +
2191 +/**
2192 + * vc4_atomic_commit - commit validated state object
2193 + * @dev: DRM device
2194 + * @state: the driver state object
2195 + * @async: asynchronous commit
2196 + *
2197 + * This function commits a with drm_atomic_helper_check() pre-validated state
2198 + * object. This can still fail when e.g. the framebuffer reservation fails. For
2199 + * now this doesn't implement asynchronous commits.
2200 + *
2201 + * RETURNS
2202 + * Zero for success or -errno.
2203 + */
2204 +static int vc4_atomic_commit(struct drm_device *dev,
2205 +                            struct drm_atomic_state *state,
2206 +                            bool async)
2207 +{
2208 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2209 +       int ret;
2210 +       int i;
2211 +       uint64_t wait_seqno = 0;
2212 +       struct vc4_commit *c;
2213 +
2214 +       c = commit_init(state);
2215 +       if (!c)
2216 +               return -ENOMEM;
2217 +
2218 +       /* Make sure that any outstanding modesets have finished. */
2219 +       ret = down_interruptible(&vc4->async_modeset);
2220 +       if (ret) {
2221 +               kfree(c);
2222 +               return ret;
2223 +       }
2224 +
2225 +       ret = drm_atomic_helper_prepare_planes(dev, state);
2226 +       if (ret) {
2227 +               kfree(c);
2228 +               up(&vc4->async_modeset);
2229 +               return ret;
2230 +       }
2231 +
2232 +       for (i = 0; i < dev->mode_config.num_total_plane; i++) {
2233 +               struct drm_plane *plane = state->planes[i];
2234 +               struct drm_plane_state *new_state = state->plane_states[i];
2235 +
2236 +               if (!plane)
2237 +                       continue;
2238 +
2239 +               if ((plane->state->fb != new_state->fb) && new_state->fb) {
2240 +                       struct drm_gem_cma_object *cma_bo =
2241 +                               drm_fb_cma_get_gem_obj(new_state->fb, 0);
2242 +                       struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
2243 +                       wait_seqno = max(bo->seqno, wait_seqno);
2244 +               }
2245 +       }
2246 +
2247 +       /*
2248 +        * This is the point of no return - everything below never fails except
2249 +        * when the hw goes bonghits. Which means we can commit the new state on
2250 +        * the software side now.
2251 +        */
2252 +
2253 +       drm_atomic_helper_swap_state(dev, state);
2254 +
2255 +       /*
2256 +        * Everything below can be run asynchronously without the need to grab
2257 +        * any modeset locks at all under one condition: It must be guaranteed
2258 +        * that the asynchronous work has either been cancelled (if the driver
2259 +        * supports it, which at least requires that the framebuffers get
2260 +        * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
2261 +        * before the new state gets committed on the software side with
2262 +        * drm_atomic_helper_swap_state().
2263 +        *
2264 +        * This scheme allows new atomic state updates to be prepared and
2265 +        * checked in parallel to the asynchronous completion of the previous
2266 +        * update. Which is important since compositors need to figure out the
2267 +        * composition of the next frame right after having submitted the
2268 +        * current layout.
2269 +        */
2270 +
2271 +       if (async) {
2272 +               vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
2273 +                                  vc4_atomic_complete_commit_seqno_cb);
2274 +       } else {
2275 +               vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
2276 +               vc4_atomic_complete_commit(c);
2277 +       }
2278 +
2279 +       return 0;
2280 +}
2281 +
2282  static const struct drm_mode_config_funcs vc4_mode_funcs = {
2283         .output_poll_changed = vc4_output_poll_changed,
2284         .atomic_check = drm_atomic_helper_check,
2285 -       .atomic_commit = drm_atomic_helper_commit,
2286 +       .atomic_commit = vc4_atomic_commit,
2287         .fb_create = drm_fb_cma_create,
2288  };
2289  
2290 @@ -41,6 +183,8 @@ int vc4_kms_load(struct drm_device *dev)
2291         struct vc4_dev *vc4 = to_vc4_dev(dev);
2292         int ret;
2293  
2294 +       sema_init(&vc4->async_modeset, 1);
2295 +
2296         ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
2297         if (ret < 0) {
2298                 dev_err(dev->dev, "failed to initialize vblank\n");
2299 @@ -51,6 +195,8 @@ int vc4_kms_load(struct drm_device *dev)
2300         dev->mode_config.max_height = 2048;
2301         dev->mode_config.funcs = &vc4_mode_funcs;
2302         dev->mode_config.preferred_depth = 24;
2303 +       dev->mode_config.async_page_flip = true;
2304 +
2305         dev->vblank_disable_allowed = true;
2306  
2307         drm_mode_config_reset(dev);
2308 --- /dev/null
2309 +++ b/drivers/gpu/drm/vc4/vc4_packet.h
2310 @@ -0,0 +1,384 @@
2311 +/*
2312 + * Copyright Â© 2014 Broadcom
2313 + *
2314 + * Permission is hereby granted, free of charge, to any person obtaining a
2315 + * copy of this software and associated documentation files (the "Software"),
2316 + * to deal in the Software without restriction, including without limitation
2317 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
2318 + * and/or sell copies of the Software, and to permit persons to whom the
2319 + * Software is furnished to do so, subject to the following conditions:
2320 + *
2321 + * The above copyright notice and this permission notice (including the next
2322 + * paragraph) shall be included in all copies or substantial portions of the
2323 + * Software.
2324 + *
2325 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2326 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2327 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
2328 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2329 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2330 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2331 + * IN THE SOFTWARE.
2332 + */
2333 +
2334 +#ifndef VC4_PACKET_H
2335 +#define VC4_PACKET_H
2336 +
2337 +#include "vc4_regs.h" /* for VC4_MASK, VC4_GET_FIELD, VC4_SET_FIELD */
2338 +
2339 +enum vc4_packet {
2340 +        VC4_PACKET_HALT = 0,
2341 +        VC4_PACKET_NOP = 1,
2342 +
2343 +        VC4_PACKET_FLUSH = 4,
2344 +        VC4_PACKET_FLUSH_ALL = 5,
2345 +        VC4_PACKET_START_TILE_BINNING = 6,
2346 +        VC4_PACKET_INCREMENT_SEMAPHORE = 7,
2347 +        VC4_PACKET_WAIT_ON_SEMAPHORE = 8,
2348 +
2349 +        VC4_PACKET_BRANCH = 16,
2350 +        VC4_PACKET_BRANCH_TO_SUB_LIST = 17,
2351 +
2352 +        VC4_PACKET_STORE_MS_TILE_BUFFER = 24,
2353 +        VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF = 25,
2354 +        VC4_PACKET_STORE_FULL_RES_TILE_BUFFER = 26,
2355 +        VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER = 27,
2356 +        VC4_PACKET_STORE_TILE_BUFFER_GENERAL = 28,
2357 +        VC4_PACKET_LOAD_TILE_BUFFER_GENERAL = 29,
2358 +
2359 +        VC4_PACKET_GL_INDEXED_PRIMITIVE = 32,
2360 +        VC4_PACKET_GL_ARRAY_PRIMITIVE = 33,
2361 +
2362 +        VC4_PACKET_COMPRESSED_PRIMITIVE = 48,
2363 +        VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE = 49,
2364 +
2365 +        VC4_PACKET_PRIMITIVE_LIST_FORMAT = 56,
2366 +
2367 +        VC4_PACKET_GL_SHADER_STATE = 64,
2368 +        VC4_PACKET_NV_SHADER_STATE = 65,
2369 +        VC4_PACKET_VG_SHADER_STATE = 66,
2370 +
2371 +        VC4_PACKET_CONFIGURATION_BITS = 96,
2372 +        VC4_PACKET_FLAT_SHADE_FLAGS = 97,
2373 +        VC4_PACKET_POINT_SIZE = 98,
2374 +        VC4_PACKET_LINE_WIDTH = 99,
2375 +        VC4_PACKET_RHT_X_BOUNDARY = 100,
2376 +        VC4_PACKET_DEPTH_OFFSET = 101,
2377 +        VC4_PACKET_CLIP_WINDOW = 102,
2378 +        VC4_PACKET_VIEWPORT_OFFSET = 103,
2379 +        VC4_PACKET_Z_CLIPPING = 104,
2380 +        VC4_PACKET_CLIPPER_XY_SCALING = 105,
2381 +        VC4_PACKET_CLIPPER_Z_SCALING = 106,
2382 +
2383 +        VC4_PACKET_TILE_BINNING_MODE_CONFIG = 112,
2384 +        VC4_PACKET_TILE_RENDERING_MODE_CONFIG = 113,
2385 +        VC4_PACKET_CLEAR_COLORS = 114,
2386 +        VC4_PACKET_TILE_COORDINATES = 115,
2387 +
2388 +        /* Not an actual hardware packet -- this is what we use to put
2389 +         * references to GEM bos in the command stream, since we need the u32
2390 +         * int the actual address packet in order to store the offset from the
2391 +         * start of the BO.
2392 +         */
2393 +        VC4_PACKET_GEM_HANDLES = 254,
2394 +} __attribute__ ((__packed__));
2395 +
2396 +#define VC4_PACKET_HALT_SIZE                                           1
2397 +#define VC4_PACKET_NOP_SIZE                                            1
2398 +#define VC4_PACKET_FLUSH_SIZE                                          1
2399 +#define VC4_PACKET_FLUSH_ALL_SIZE                                      1
2400 +#define VC4_PACKET_START_TILE_BINNING_SIZE                             1
2401 +#define VC4_PACKET_INCREMENT_SEMAPHORE_SIZE                            1
2402 +#define VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE                              1
2403 +#define VC4_PACKET_BRANCH_SIZE                                         5
2404 +#define VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE                             5
2405 +#define VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE                           1
2406 +#define VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF_SIZE                   1
2407 +#define VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE                     5
2408 +#define VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE                      5
2409 +#define VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE                      7
2410 +#define VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE                       7
2411 +#define VC4_PACKET_GL_INDEXED_PRIMITIVE_SIZE                           14
2412 +#define VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE                             10
2413 +#define VC4_PACKET_COMPRESSED_PRIMITIVE_SIZE                           1
2414 +#define VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE_SIZE                   1
2415 +#define VC4_PACKET_PRIMITIVE_LIST_FORMAT_SIZE                          2
2416 +#define VC4_PACKET_GL_SHADER_STATE_SIZE                                        5
2417 +#define VC4_PACKET_NV_SHADER_STATE_SIZE                                        5
2418 +#define VC4_PACKET_VG_SHADER_STATE_SIZE                                        5
2419 +#define VC4_PACKET_CONFIGURATION_BITS_SIZE                             4
2420 +#define VC4_PACKET_FLAT_SHADE_FLAGS_SIZE                               5
2421 +#define VC4_PACKET_POINT_SIZE_SIZE                                     5
2422 +#define VC4_PACKET_LINE_WIDTH_SIZE                                     5
2423 +#define VC4_PACKET_RHT_X_BOUNDARY_SIZE                                 3
2424 +#define VC4_PACKET_DEPTH_OFFSET_SIZE                                   5
2425 +#define VC4_PACKET_CLIP_WINDOW_SIZE                                    9
2426 +#define VC4_PACKET_VIEWPORT_OFFSET_SIZE                                        5
2427 +#define VC4_PACKET_Z_CLIPPING_SIZE                                     9
2428 +#define VC4_PACKET_CLIPPER_XY_SCALING_SIZE                             9
2429 +#define VC4_PACKET_CLIPPER_Z_SCALING_SIZE                              9
2430 +#define VC4_PACKET_TILE_BINNING_MODE_CONFIG_SIZE                       16
2431 +#define VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE                     11
2432 +#define VC4_PACKET_CLEAR_COLORS_SIZE                                   14
2433 +#define VC4_PACKET_TILE_COORDINATES_SIZE                               3
2434 +#define VC4_PACKET_GEM_HANDLES_SIZE                                    9
2435 +
2436 +/** @{
2437 + * Bits used by packets like VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
2438 + * VC4_PACKET_TILE_RENDERING_MODE_CONFIG.
2439 +*/
2440 +#define VC4_TILING_FORMAT_LINEAR    0
2441 +#define VC4_TILING_FORMAT_T         1
2442 +#define VC4_TILING_FORMAT_LT        2
2443 +/** @} */
2444 +
2445 +/** @{
2446 + *
2447 + * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
2448 + * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
2449 + */
2450 +#define VC4_LOADSTORE_FULL_RES_EOF                     (1 << 3)
2451 +#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL       (1 << 2)
2452 +#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS              (1 << 1)
2453 +#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR           (1 << 0)
2454 +
2455 +/** @{
2456 + *
2457 + * byte 2 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
2458 + * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL (low bits of the address)
2459 + */
2460 +
2461 +#define VC4_LOADSTORE_TILE_BUFFER_EOF                  (1 << 3)
2462 +#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_VG_MASK (1 << 2)
2463 +#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_ZS      (1 << 1)
2464 +#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_COLOR   (1 << 0)
2465 +
2466 +/** @} */
2467 +
2468 +/** @{
2469 + *
2470 + * byte 0-1 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
2471 + * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
2472 + */
2473 +#define VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR (1 << 15)
2474 +#define VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR     (1 << 14)
2475 +#define VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR  (1 << 13)
2476 +#define VC4_STORE_TILE_BUFFER_DISABLE_SWAP         (1 << 12)
2477 +
2478 +#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK      VC4_MASK(9, 8)
2479 +#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT     8
2480 +#define VC4_LOADSTORE_TILE_BUFFER_RGBA8888         0
2481 +#define VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER    1
2482 +#define VC4_LOADSTORE_TILE_BUFFER_BGR565           2
2483 +/** @} */
2484 +
2485 +/** @{
2486 + *
2487 + * byte 0 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
2488 + * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
2489 + */
2490 +#define VC4_STORE_TILE_BUFFER_MODE_MASK            VC4_MASK(7, 6)
2491 +#define VC4_STORE_TILE_BUFFER_MODE_SHIFT           6
2492 +#define VC4_STORE_TILE_BUFFER_MODE_SAMPLE0         (0 << 6)
2493 +#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X4     (1 << 6)
2494 +#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X16    (2 << 6)
2495 +
2496 +/** The values of the field are VC4_TILING_FORMAT_* */
2497 +#define VC4_LOADSTORE_TILE_BUFFER_TILING_MASK      VC4_MASK(5, 4)
2498 +#define VC4_LOADSTORE_TILE_BUFFER_TILING_SHIFT     4
2499 +
2500 +#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK      VC4_MASK(2, 0)
2501 +#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_SHIFT     0
2502 +#define VC4_LOADSTORE_TILE_BUFFER_NONE             0
2503 +#define VC4_LOADSTORE_TILE_BUFFER_COLOR            1
2504 +#define VC4_LOADSTORE_TILE_BUFFER_ZS               2
2505 +#define VC4_LOADSTORE_TILE_BUFFER_Z                3
2506 +#define VC4_LOADSTORE_TILE_BUFFER_VG_MASK          4
2507 +#define VC4_LOADSTORE_TILE_BUFFER_FULL             5
2508 +/** @} */
2509 +
2510 +#define VC4_INDEX_BUFFER_U8                        (0 << 4)
2511 +#define VC4_INDEX_BUFFER_U16                       (1 << 4)
2512 +
2513 +/* This flag is only present in NV shader state. */
2514 +#define VC4_SHADER_FLAG_SHADED_CLIP_COORDS         (1 << 3)
2515 +#define VC4_SHADER_FLAG_ENABLE_CLIPPING            (1 << 2)
2516 +#define VC4_SHADER_FLAG_VS_POINT_SIZE              (1 << 1)
2517 +#define VC4_SHADER_FLAG_FS_SINGLE_THREAD           (1 << 0)
2518 +
2519 +/** @{ byte 2 of config bits. */
2520 +#define VC4_CONFIG_BITS_EARLY_Z_UPDATE             (1 << 1)
2521 +#define VC4_CONFIG_BITS_EARLY_Z                    (1 << 0)
2522 +/** @} */
2523 +
2524 +/** @{ byte 1 of config bits. */
2525 +#define VC4_CONFIG_BITS_Z_UPDATE                   (1 << 7)
2526 +/** same values in this 3-bit field as PIPE_FUNC_* */
2527 +#define VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT           4
2528 +#define VC4_CONFIG_BITS_COVERAGE_READ_LEAVE        (1 << 3)
2529 +
2530 +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_NONZERO    (0 << 1)
2531 +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ODD        (1 << 1)
2532 +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_OR         (2 << 1)
2533 +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ZERO       (3 << 1)
2534 +
2535 +#define VC4_CONFIG_BITS_COVERAGE_PIPE_SELECT       (1 << 0)
2536 +/** @} */
2537 +
2538 +/** @{ byte 0 of config bits. */
2539 +#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_NONE (0 << 6)
2540 +#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X   (1 << 6)
2541 +#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_16X  (2 << 6)
2542 +
2543 +#define VC4_CONFIG_BITS_AA_POINTS_AND_LINES        (1 << 4)
2544 +#define VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET        (1 << 3)
2545 +#define VC4_CONFIG_BITS_CW_PRIMITIVES              (1 << 2)
2546 +#define VC4_CONFIG_BITS_ENABLE_PRIM_BACK           (1 << 1)
2547 +#define VC4_CONFIG_BITS_ENABLE_PRIM_FRONT          (1 << 0)
2548 +/** @} */
2549 +
2550 +/** @{ bits in the last u8 of VC4_PACKET_TILE_BINNING_MODE_CONFIG */
2551 +#define VC4_BIN_CONFIG_DB_NON_MS                   (1 << 7)
2552 +
2553 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK       VC4_MASK(6, 5)
2554 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_SHIFT      5
2555 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32         0
2556 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_64         1
2557 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128        2
2558 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_256        3
2559 +
2560 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK  VC4_MASK(4, 3)
2561 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_SHIFT 3
2562 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32    0
2563 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_64    1
2564 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_128   2
2565 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_256   3
2566 +
2567 +#define VC4_BIN_CONFIG_AUTO_INIT_TSDA              (1 << 2)
2568 +#define VC4_BIN_CONFIG_TILE_BUFFER_64BIT           (1 << 1)
2569 +#define VC4_BIN_CONFIG_MS_MODE_4X                  (1 << 0)
2570 +/** @} */
2571 +
2572 +/** @{ bits in the last u16 of VC4_PACKET_TILE_RENDERING_MODE_CONFIG */
2573 +#define VC4_RENDER_CONFIG_DB_NON_MS                (1 << 12)
2574 +#define VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE (1 << 11)
2575 +#define VC4_RENDER_CONFIG_EARLY_Z_DIRECTION_G      (1 << 10)
2576 +#define VC4_RENDER_CONFIG_COVERAGE_MODE            (1 << 9)
2577 +#define VC4_RENDER_CONFIG_ENABLE_VG_MASK           (1 << 8)
2578 +
2579 +/** The values of the field are VC4_TILING_FORMAT_* */
2580 +#define VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK       VC4_MASK(7, 6)
2581 +#define VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT      6
2582 +
2583 +#define VC4_RENDER_CONFIG_DECIMATE_MODE_1X         (0 << 4)
2584 +#define VC4_RENDER_CONFIG_DECIMATE_MODE_4X         (1 << 4)
2585 +#define VC4_RENDER_CONFIG_DECIMATE_MODE_16X        (2 << 4)
2586 +
2587 +#define VC4_RENDER_CONFIG_FORMAT_MASK              VC4_MASK(3, 2)
2588 +#define VC4_RENDER_CONFIG_FORMAT_SHIFT             2
2589 +#define VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED   0
2590 +#define VC4_RENDER_CONFIG_FORMAT_RGBA8888          1
2591 +#define VC4_RENDER_CONFIG_FORMAT_BGR565            2
2592 +
2593 +#define VC4_RENDER_CONFIG_TILE_BUFFER_64BIT        (1 << 1)
2594 +#define VC4_RENDER_CONFIG_MS_MODE_4X               (1 << 0)
2595 +
2596 +#define VC4_PRIMITIVE_LIST_FORMAT_16_INDEX         (1 << 4)
2597 +#define VC4_PRIMITIVE_LIST_FORMAT_32_XY            (3 << 4)
2598 +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_POINTS      (0 << 0)
2599 +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_LINES       (1 << 0)
2600 +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES   (2 << 0)
2601 +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_RHT         (3 << 0)
2602 +
2603 +enum vc4_texture_data_type {
2604 +        VC4_TEXTURE_TYPE_RGBA8888 = 0,
2605 +        VC4_TEXTURE_TYPE_RGBX8888 = 1,
2606 +        VC4_TEXTURE_TYPE_RGBA4444 = 2,
2607 +        VC4_TEXTURE_TYPE_RGBA5551 = 3,
2608 +        VC4_TEXTURE_TYPE_RGB565 = 4,
2609 +        VC4_TEXTURE_TYPE_LUMINANCE = 5,
2610 +        VC4_TEXTURE_TYPE_ALPHA = 6,
2611 +        VC4_TEXTURE_TYPE_LUMALPHA = 7,
2612 +        VC4_TEXTURE_TYPE_ETC1 = 8,
2613 +        VC4_TEXTURE_TYPE_S16F = 9,
2614 +        VC4_TEXTURE_TYPE_S8 = 10,
2615 +        VC4_TEXTURE_TYPE_S16 = 11,
2616 +        VC4_TEXTURE_TYPE_BW1 = 12,
2617 +        VC4_TEXTURE_TYPE_A4 = 13,
2618 +        VC4_TEXTURE_TYPE_A1 = 14,
2619 +        VC4_TEXTURE_TYPE_RGBA64 = 15,
2620 +        VC4_TEXTURE_TYPE_RGBA32R = 16,
2621 +        VC4_TEXTURE_TYPE_YUV422R = 17,
2622 +};
2623 +
2624 +#define VC4_TEX_P0_OFFSET_MASK                     VC4_MASK(31, 12)
2625 +#define VC4_TEX_P0_OFFSET_SHIFT                    12
2626 +#define VC4_TEX_P0_CSWIZ_MASK                      VC4_MASK(11, 10)
2627 +#define VC4_TEX_P0_CSWIZ_SHIFT                     10
2628 +#define VC4_TEX_P0_CMMODE_MASK                     VC4_MASK(9, 9)
2629 +#define VC4_TEX_P0_CMMODE_SHIFT                    9
2630 +#define VC4_TEX_P0_FLIPY_MASK                      VC4_MASK(8, 8)
2631 +#define VC4_TEX_P0_FLIPY_SHIFT                     8
2632 +#define VC4_TEX_P0_TYPE_MASK                       VC4_MASK(7, 4)
2633 +#define VC4_TEX_P0_TYPE_SHIFT                      4
2634 +#define VC4_TEX_P0_MIPLVLS_MASK                    VC4_MASK(3, 0)
2635 +#define VC4_TEX_P0_MIPLVLS_SHIFT                   0
2636 +
2637 +#define VC4_TEX_P1_TYPE4_MASK                      VC4_MASK(31, 31)
2638 +#define VC4_TEX_P1_TYPE4_SHIFT                     31
2639 +#define VC4_TEX_P1_HEIGHT_MASK                     VC4_MASK(30, 20)
2640 +#define VC4_TEX_P1_HEIGHT_SHIFT                    20
2641 +#define VC4_TEX_P1_ETCFLIP_MASK                    VC4_MASK(19, 19)
2642 +#define VC4_TEX_P1_ETCFLIP_SHIFT                   19
2643 +#define VC4_TEX_P1_WIDTH_MASK                      VC4_MASK(18, 8)
2644 +#define VC4_TEX_P1_WIDTH_SHIFT                     8
2645 +
2646 +#define VC4_TEX_P1_MAGFILT_MASK                    VC4_MASK(7, 7)
2647 +#define VC4_TEX_P1_MAGFILT_SHIFT                   7
2648 +# define VC4_TEX_P1_MAGFILT_LINEAR                 0
2649 +# define VC4_TEX_P1_MAGFILT_NEAREST                1
2650 +
2651 +#define VC4_TEX_P1_MINFILT_MASK                    VC4_MASK(6, 4)
2652 +#define VC4_TEX_P1_MINFILT_SHIFT                   4
2653 +# define VC4_TEX_P1_MINFILT_LINEAR                 0
2654 +# define VC4_TEX_P1_MINFILT_NEAREST                1
2655 +# define VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR          2
2656 +# define VC4_TEX_P1_MINFILT_NEAR_MIP_LIN           3
2657 +# define VC4_TEX_P1_MINFILT_LIN_MIP_NEAR           4
2658 +# define VC4_TEX_P1_MINFILT_LIN_MIP_LIN            5
2659 +
2660 +#define VC4_TEX_P1_WRAP_T_MASK                     VC4_MASK(3, 2)
2661 +#define VC4_TEX_P1_WRAP_T_SHIFT                    2
2662 +#define VC4_TEX_P1_WRAP_S_MASK                     VC4_MASK(1, 0)
2663 +#define VC4_TEX_P1_WRAP_S_SHIFT                    0
2664 +# define VC4_TEX_P1_WRAP_REPEAT                    0
2665 +# define VC4_TEX_P1_WRAP_CLAMP                     1
2666 +# define VC4_TEX_P1_WRAP_MIRROR                    2
2667 +# define VC4_TEX_P1_WRAP_BORDER                    3
2668 +
2669 +#define VC4_TEX_P2_PTYPE_MASK                      VC4_MASK(31, 30)
2670 +#define VC4_TEX_P2_PTYPE_SHIFT                     30
2671 +# define VC4_TEX_P2_PTYPE_IGNORED                  0
2672 +# define VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE          1
2673 +# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS   2
2674 +# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS      3
2675 +
2676 +/* VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE bits */
2677 +#define VC4_TEX_P2_CMST_MASK                       VC4_MASK(29, 12)
2678 +#define VC4_TEX_P2_CMST_SHIFT                      12
2679 +#define VC4_TEX_P2_BSLOD_MASK                      VC4_MASK(0, 0)
2680 +#define VC4_TEX_P2_BSLOD_SHIFT                     0
2681 +
2682 +/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS */
2683 +#define VC4_TEX_P2_CHEIGHT_MASK                    VC4_MASK(22, 12)
2684 +#define VC4_TEX_P2_CHEIGHT_SHIFT                   12
2685 +#define VC4_TEX_P2_CWIDTH_MASK                     VC4_MASK(10, 0)
2686 +#define VC4_TEX_P2_CWIDTH_SHIFT                    0
2687 +
2688 +/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS */
2689 +#define VC4_TEX_P2_CYOFF_MASK                      VC4_MASK(22, 12)
2690 +#define VC4_TEX_P2_CYOFF_SHIFT                     12
2691 +#define VC4_TEX_P2_CXOFF_MASK                      VC4_MASK(10, 0)
2692 +#define VC4_TEX_P2_CXOFF_SHIFT                     0
2693 +
2694 +#endif /* VC4_PACKET_H */
2695 --- a/drivers/gpu/drm/vc4/vc4_plane.c
2696 +++ b/drivers/gpu/drm/vc4/vc4_plane.c
2697 @@ -29,6 +29,14 @@ struct vc4_plane_state {
2698         u32 *dlist;
2699         u32 dlist_size; /* Number of dwords in allocated for the display list */
2700         u32 dlist_count; /* Number of used dwords in the display list. */
2701 +
2702 +       /* Offset in the dlist to pointer word 0. */
2703 +       u32 pw0_offset;
2704 +
2705 +       /* Offset where the plane's dlist was last stored in the
2706 +          hardware at vc4_crtc_atomic_flush() time.
2707 +       */
2708 +       u32 *hw_dlist;
2709  };
2710  
2711  static inline struct vc4_plane_state *
2712 @@ -207,6 +215,8 @@ static int vc4_plane_mode_set(struct drm
2713         /* Position Word 3: Context.  Written by the HVS. */
2714         vc4_dlist_write(vc4_state, 0xc0c0c0c0);
2715  
2716 +       vc4_state->pw0_offset = vc4_state->dlist_count;
2717 +
2718         /* Pointer Word 0: RGB / Y Pointer */
2719         vc4_dlist_write(vc4_state, bo->paddr + offset);
2720  
2721 @@ -258,6 +268,8 @@ u32 vc4_plane_write_dlist(struct drm_pla
2722         struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
2723         int i;
2724  
2725 +       vc4_state->hw_dlist = dlist;
2726 +
2727         /* Can't memcpy_toio() because it needs to be 32-bit writes. */
2728         for (i = 0; i < vc4_state->dlist_count; i++)
2729                 writel(vc4_state->dlist[i], &dlist[i]);
2730 @@ -272,6 +284,34 @@ u32 vc4_plane_dlist_size(struct drm_plan
2731         return vc4_state->dlist_count;
2732  }
2733  
2734 +/* Updates the plane to immediately (well, once the FIFO needs
2735 + * refilling) scan out from at a new framebuffer.
2736 + */
2737 +void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
2738 +{
2739 +       struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
2740 +       struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
2741 +       uint32_t addr;
2742 +
2743 +       /* We're skipping the address adjustment for negative origin,
2744 +        * because this is only called on the primary plane.
2745 +        */
2746 +       WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
2747 +       addr = bo->paddr + fb->offsets[0];
2748 +
2749 +       /* Write the new address into the hardware immediately.  The
2750 +        * scanout will start from this address as soon as the FIFO
2751 +        * needs to refill with pixels.
2752 +        */
2753 +       writel(addr, &vc4_state->hw_dlist[vc4_state->pw0_offset]);
2754 +
2755 +       /* Also update the CPU-side dlist copy, so that any later
2756 +        * atomic updates that don't do a new modeset on our plane
2757 +        * also use our updated address.
2758 +        */
2759 +       vc4_state->dlist[vc4_state->pw0_offset] = addr;
2760 +}
2761 +
2762  static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
2763         .prepare_fb = NULL,
2764         .cleanup_fb = NULL,
2765 --- /dev/null
2766 +++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
2767 @@ -0,0 +1,268 @@
2768 +/*
2769 + * Copyright Â© 2014 Broadcom
2770 + *
2771 + * Permission is hereby granted, free of charge, to any person obtaining a
2772 + * copy of this software and associated documentation files (the "Software"),
2773 + * to deal in the Software without restriction, including without limitation
2774 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
2775 + * and/or sell copies of the Software, and to permit persons to whom the
2776 + * Software is furnished to do so, subject to the following conditions:
2777 + *
2778 + * The above copyright notice and this permission notice (including the next
2779 + * paragraph) shall be included in all copies or substantial portions of the
2780 + * Software.
2781 + *
2782 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2783 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2784 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
2785 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2786 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2787 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2788 + * IN THE SOFTWARE.
2789 + */
2790 +
2791 +#ifndef VC4_QPU_DEFINES_H
2792 +#define VC4_QPU_DEFINES_H
2793 +
2794 +enum qpu_op_add {
2795 +        QPU_A_NOP,
2796 +        QPU_A_FADD,
2797 +        QPU_A_FSUB,
2798 +        QPU_A_FMIN,
2799 +        QPU_A_FMAX,
2800 +        QPU_A_FMINABS,
2801 +        QPU_A_FMAXABS,
2802 +        QPU_A_FTOI,
2803 +        QPU_A_ITOF,
2804 +        QPU_A_ADD = 12,
2805 +        QPU_A_SUB,
2806 +        QPU_A_SHR,
2807 +        QPU_A_ASR,
2808 +        QPU_A_ROR,
2809 +        QPU_A_SHL,
2810 +        QPU_A_MIN,
2811 +        QPU_A_MAX,
2812 +        QPU_A_AND,
2813 +        QPU_A_OR,
2814 +        QPU_A_XOR,
2815 +        QPU_A_NOT,
2816 +        QPU_A_CLZ,
2817 +        QPU_A_V8ADDS = 30,
2818 +        QPU_A_V8SUBS = 31,
2819 +};
2820 +
2821 +enum qpu_op_mul {
2822 +        QPU_M_NOP,
2823 +        QPU_M_FMUL,
2824 +        QPU_M_MUL24,
2825 +        QPU_M_V8MULD,
2826 +        QPU_M_V8MIN,
2827 +        QPU_M_V8MAX,
2828 +        QPU_M_V8ADDS,
2829 +        QPU_M_V8SUBS,
2830 +};
2831 +
2832 +enum qpu_raddr {
2833 +        QPU_R_FRAG_PAYLOAD_ZW = 15, /* W for A file, Z for B file */
2834 +        /* 0-31 are the plain regfile a or b fields */
2835 +        QPU_R_UNIF = 32,
2836 +        QPU_R_VARY = 35,
2837 +        QPU_R_ELEM_QPU = 38,
2838 +        QPU_R_NOP,
2839 +        QPU_R_XY_PIXEL_COORD = 41,
2840 +        QPU_R_MS_REV_FLAGS = 41,
2841 +        QPU_R_VPM = 48,
2842 +        QPU_R_VPM_LD_BUSY,
2843 +        QPU_R_VPM_LD_WAIT,
2844 +        QPU_R_MUTEX_ACQUIRE,
2845 +};
2846 +
2847 +enum qpu_waddr {
2848 +        /* 0-31 are the plain regfile a or b fields */
2849 +        QPU_W_ACC0 = 32, /* aka r0 */
2850 +        QPU_W_ACC1,
2851 +        QPU_W_ACC2,
2852 +        QPU_W_ACC3,
2853 +        QPU_W_TMU_NOSWAP,
2854 +        QPU_W_ACC5,
2855 +        QPU_W_HOST_INT,
2856 +        QPU_W_NOP,
2857 +        QPU_W_UNIFORMS_ADDRESS,
2858 +        QPU_W_QUAD_XY, /* X for regfile a, Y for regfile b */
2859 +        QPU_W_MS_FLAGS = 42,
2860 +        QPU_W_REV_FLAG = 42,
2861 +        QPU_W_TLB_STENCIL_SETUP = 43,
2862 +        QPU_W_TLB_Z,
2863 +        QPU_W_TLB_COLOR_MS,
2864 +        QPU_W_TLB_COLOR_ALL,
2865 +        QPU_W_TLB_ALPHA_MASK,
2866 +        QPU_W_VPM,
2867 +        QPU_W_VPMVCD_SETUP, /* LD for regfile a, ST for regfile b */
2868 +        QPU_W_VPM_ADDR, /* LD for regfile a, ST for regfile b */
2869 +        QPU_W_MUTEX_RELEASE,
2870 +        QPU_W_SFU_RECIP,
2871 +        QPU_W_SFU_RECIPSQRT,
2872 +        QPU_W_SFU_EXP,
2873 +        QPU_W_SFU_LOG,
2874 +        QPU_W_TMU0_S,
2875 +        QPU_W_TMU0_T,
2876 +        QPU_W_TMU0_R,
2877 +        QPU_W_TMU0_B,
2878 +        QPU_W_TMU1_S,
2879 +        QPU_W_TMU1_T,
2880 +        QPU_W_TMU1_R,
2881 +        QPU_W_TMU1_B,
2882 +};
2883 +
2884 +enum qpu_sig_bits {
2885 +        QPU_SIG_SW_BREAKPOINT,
2886 +        QPU_SIG_NONE,
2887 +        QPU_SIG_THREAD_SWITCH,
2888 +        QPU_SIG_PROG_END,
2889 +        QPU_SIG_WAIT_FOR_SCOREBOARD,
2890 +        QPU_SIG_SCOREBOARD_UNLOCK,
2891 +        QPU_SIG_LAST_THREAD_SWITCH,
2892 +        QPU_SIG_COVERAGE_LOAD,
2893 +        QPU_SIG_COLOR_LOAD,
2894 +        QPU_SIG_COLOR_LOAD_END,
2895 +        QPU_SIG_LOAD_TMU0,
2896 +        QPU_SIG_LOAD_TMU1,
2897 +        QPU_SIG_ALPHA_MASK_LOAD,
2898 +        QPU_SIG_SMALL_IMM,
2899 +        QPU_SIG_LOAD_IMM,
2900 +        QPU_SIG_BRANCH
2901 +};
2902 +
2903 +enum qpu_mux {
2904 +        /* hardware mux values */
2905 +        QPU_MUX_R0,
2906 +        QPU_MUX_R1,
2907 +        QPU_MUX_R2,
2908 +        QPU_MUX_R3,
2909 +        QPU_MUX_R4,
2910 +        QPU_MUX_R5,
2911 +        QPU_MUX_A,
2912 +        QPU_MUX_B,
2913 +
2914 +        /* non-hardware mux values */
2915 +        QPU_MUX_IMM,
2916 +};
2917 +
2918 +enum qpu_cond {
2919 +        QPU_COND_NEVER,
2920 +        QPU_COND_ALWAYS,
2921 +        QPU_COND_ZS,
2922 +        QPU_COND_ZC,
2923 +        QPU_COND_NS,
2924 +        QPU_COND_NC,
2925 +        QPU_COND_CS,
2926 +        QPU_COND_CC,
2927 +};
2928 +
2929 +enum qpu_pack_mul {
2930 +        QPU_PACK_MUL_NOP,
2931 +        QPU_PACK_MUL_8888 = 3, /* replicated to each 8 bits of the 32-bit dst. */
2932 +        QPU_PACK_MUL_8A,
2933 +        QPU_PACK_MUL_8B,
2934 +        QPU_PACK_MUL_8C,
2935 +        QPU_PACK_MUL_8D,
2936 +};
2937 +
2938 +enum qpu_pack_a {
2939 +        QPU_PACK_A_NOP,
2940 +        /* convert to 16 bit float if float input, or to int16. */
2941 +        QPU_PACK_A_16A,
2942 +        QPU_PACK_A_16B,
2943 +        /* replicated to each 8 bits of the 32-bit dst. */
2944 +        QPU_PACK_A_8888,
2945 +        /* Convert to 8-bit unsigned int. */
2946 +        QPU_PACK_A_8A,
2947 +        QPU_PACK_A_8B,
2948 +        QPU_PACK_A_8C,
2949 +        QPU_PACK_A_8D,
2950 +
2951 +        /* Saturating variants of the previous instructions. */
2952 +        QPU_PACK_A_32_SAT, /* int-only */
2953 +        QPU_PACK_A_16A_SAT, /* int or float */
2954 +        QPU_PACK_A_16B_SAT,
2955 +        QPU_PACK_A_8888_SAT,
2956 +        QPU_PACK_A_8A_SAT,
2957 +        QPU_PACK_A_8B_SAT,
2958 +        QPU_PACK_A_8C_SAT,
2959 +        QPU_PACK_A_8D_SAT,
2960 +};
2961 +
2962 +enum qpu_unpack_r4 {
2963 +        QPU_UNPACK_R4_NOP,
2964 +        QPU_UNPACK_R4_F16A_TO_F32,
2965 +        QPU_UNPACK_R4_F16B_TO_F32,
2966 +        QPU_UNPACK_R4_8D_REP,
2967 +        QPU_UNPACK_R4_8A,
2968 +        QPU_UNPACK_R4_8B,
2969 +        QPU_UNPACK_R4_8C,
2970 +        QPU_UNPACK_R4_8D,
2971 +};
2972 +
2973 +#define QPU_MASK(high, low) ((((uint64_t)1<<((high)-(low)+1))-1)<<(low))
2974 +/* Using the GNU statement expression extension */
2975 +#define QPU_SET_FIELD(value, field)                                       \
2976 +        ({                                                                \
2977 +                uint64_t fieldval = (uint64_t)(value) << field ## _SHIFT; \
2978 +                assert((fieldval & ~ field ## _MASK) == 0);               \
2979 +                fieldval & field ## _MASK;                                \
2980 +         })
2981 +
2982 +#define QPU_GET_FIELD(word, field) ((uint32_t)(((word)  & field ## _MASK) >> field ## _SHIFT))
2983 +
2984 +#define QPU_SIG_SHIFT                   60
2985 +#define QPU_SIG_MASK                    QPU_MASK(63, 60)
2986 +
2987 +#define QPU_UNPACK_SHIFT                57
2988 +#define QPU_UNPACK_MASK                 QPU_MASK(59, 57)
2989 +
2990 +/**
2991 + * If set, the pack field means PACK_MUL or R4 packing, instead of normal
2992 + * regfile a packing.
2993 + */
2994 +#define QPU_PM                          ((uint64_t)1 << 56)
2995 +
2996 +#define QPU_PACK_SHIFT                  52
2997 +#define QPU_PACK_MASK                   QPU_MASK(55, 52)
2998 +
2999 +#define QPU_COND_ADD_SHIFT              49
3000 +#define QPU_COND_ADD_MASK               QPU_MASK(51, 49)
3001 +#define QPU_COND_MUL_SHIFT              46
3002 +#define QPU_COND_MUL_MASK               QPU_MASK(48, 46)
3003 +
3004 +#define QPU_SF                          ((uint64_t)1 << 45)
3005 +
3006 +#define QPU_WADDR_ADD_SHIFT             38
3007 +#define QPU_WADDR_ADD_MASK              QPU_MASK(43, 38)
3008 +#define QPU_WADDR_MUL_SHIFT             32
3009 +#define QPU_WADDR_MUL_MASK              QPU_MASK(37, 32)
3010 +
3011 +#define QPU_OP_MUL_SHIFT                29
3012 +#define QPU_OP_MUL_MASK                 QPU_MASK(31, 29)
3013 +
3014 +#define QPU_RADDR_A_SHIFT               18
3015 +#define QPU_RADDR_A_MASK                QPU_MASK(23, 18)
3016 +#define QPU_RADDR_B_SHIFT               12
3017 +#define QPU_RADDR_B_MASK                QPU_MASK(17, 12)
3018 +#define QPU_SMALL_IMM_SHIFT             12
3019 +#define QPU_SMALL_IMM_MASK              QPU_MASK(17, 12)
3020 +
3021 +#define QPU_ADD_A_SHIFT                 9
3022 +#define QPU_ADD_A_MASK                  QPU_MASK(11, 9)
3023 +#define QPU_ADD_B_SHIFT                 6
3024 +#define QPU_ADD_B_MASK                  QPU_MASK(8, 6)
3025 +#define QPU_MUL_A_SHIFT                 3
3026 +#define QPU_MUL_A_MASK                  QPU_MASK(5, 3)
3027 +#define QPU_MUL_B_SHIFT                 0
3028 +#define QPU_MUL_B_MASK                  QPU_MASK(2, 0)
3029 +
3030 +#define QPU_WS                          ((uint64_t)1 << 44)
3031 +
3032 +#define QPU_OP_ADD_SHIFT                24
3033 +#define QPU_OP_ADD_MASK                 QPU_MASK(28, 24)
3034 +
3035 +#endif /* VC4_QPU_DEFINES_H */
3036 --- /dev/null
3037 +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
3038 @@ -0,0 +1,448 @@
3039 +/*
3040 + * Copyright Â© 2014-2015 Broadcom
3041 + *
3042 + * Permission is hereby granted, free of charge, to any person obtaining a
3043 + * copy of this software and associated documentation files (the "Software"),
3044 + * to deal in the Software without restriction, including without limitation
3045 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
3046 + * and/or sell copies of the Software, and to permit persons to whom the
3047 + * Software is furnished to do so, subject to the following conditions:
3048 + *
3049 + * The above copyright notice and this permission notice (including the next
3050 + * paragraph) shall be included in all copies or substantial portions of the
3051 + * Software.
3052 + *
3053 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3054 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3055 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
3056 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3057 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3058 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
3059 + * IN THE SOFTWARE.
3060 + */
3061 +
3062 +/**
3063 + * DOC: Render command list generation
3064 + *
3065 + * In the VC4 driver, render command list generation is performed by the
3066 + * kernel instead of userspace.  We do this because validating a
3067 + * user-submitted command list is hard to get right and has high CPU overhead,
3068 + * while the number of valid configurations for render command lists is
3069 + * actually fairly low.
3070 + */
3071 +
3072 +#include "uapi/drm/vc4_drm.h"
3073 +#include "vc4_drv.h"
3074 +#include "vc4_packet.h"
3075 +
3076 +struct vc4_rcl_setup {
3077 +       struct drm_gem_cma_object *color_read;
3078 +       struct drm_gem_cma_object *color_ms_write;
3079 +       struct drm_gem_cma_object *zs_read;
3080 +       struct drm_gem_cma_object *zs_write;
3081 +
3082 +       struct drm_gem_cma_object *rcl;
3083 +       u32 next_offset;
3084 +};
3085 +
3086 +static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val)
3087 +{
3088 +       *(u8 *)(setup->rcl->vaddr + setup->next_offset) = val;
3089 +       setup->next_offset += 1;
3090 +}
3091 +
3092 +static inline void rcl_u16(struct vc4_rcl_setup *setup, u16 val)
3093 +{
3094 +       *(u16 *)(setup->rcl->vaddr + setup->next_offset) = val;
3095 +       setup->next_offset += 2;
3096 +}
3097 +
3098 +static inline void rcl_u32(struct vc4_rcl_setup *setup, u32 val)
3099 +{
3100 +       *(u32 *)(setup->rcl->vaddr + setup->next_offset) = val;
3101 +       setup->next_offset += 4;
3102 +}
3103 +
3104 +
3105 +/*
3106 + * Emits a no-op STORE_TILE_BUFFER_GENERAL.
3107 + *
3108 + * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of
3109 + * some sort before another load is triggered.
3110 + */
3111 +static void vc4_store_before_load(struct vc4_rcl_setup *setup)
3112 +{
3113 +       rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
3114 +       rcl_u16(setup,
3115 +               VC4_SET_FIELD(VC4_LOADSTORE_TILE_BUFFER_NONE,
3116 +                             VC4_LOADSTORE_TILE_BUFFER_BUFFER) |
3117 +               VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR |
3118 +               VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR |
3119 +               VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR);
3120 +       rcl_u32(setup, 0); /* no address, since we're in None mode */
3121 +}
3122 +
3123 +/*
3124 + * Emits a PACKET_TILE_COORDINATES if one isn't already pending.
3125 + *
3126 + * The tile coordinates packet triggers a pending load if there is one, are
3127 + * used for clipping during rendering, and determine where loads/stores happen
3128 + * relative to their base address.
3129 + */
3130 +static void vc4_tile_coordinates(struct vc4_rcl_setup *setup,
3131 +                                uint32_t x, uint32_t y)
3132 +{
3133 +       rcl_u8(setup, VC4_PACKET_TILE_COORDINATES);
3134 +       rcl_u8(setup, x);
3135 +       rcl_u8(setup, y);
3136 +}
3137 +
3138 +static void emit_tile(struct vc4_exec_info *exec,
3139 +                     struct vc4_rcl_setup *setup,
3140 +                     uint8_t x, uint8_t y, bool first, bool last)
3141 +{
3142 +       struct drm_vc4_submit_cl *args = exec->args;
3143 +       bool has_bin = args->bin_cl_size != 0;
3144 +
3145 +       /* Note that the load doesn't actually occur until the
3146 +        * tile coords packet is processed, and only one load
3147 +        * may be outstanding at a time.
3148 +        */
3149 +       if (setup->color_read) {
3150 +               rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
3151 +               rcl_u16(setup, args->color_read.bits);
3152 +               rcl_u32(setup,
3153 +                       setup->color_read->paddr + args->color_read.offset);
3154 +       }
3155 +
3156 +       if (setup->zs_read) {
3157 +               if (setup->color_read) {
3158 +                       /* Exec previous load. */
3159 +                       vc4_tile_coordinates(setup, x, y);
3160 +                       vc4_store_before_load(setup);
3161 +               }
3162 +
3163 +               rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
3164 +               rcl_u16(setup, args->zs_read.bits);
3165 +               rcl_u32(setup, setup->zs_read->paddr + args->zs_read.offset);
3166 +       }
3167 +
3168 +       /* Clipping depends on tile coordinates having been
3169 +        * emitted, so we always need one here.
3170 +        */
3171 +       vc4_tile_coordinates(setup, x, y);
3172 +
3173 +       /* Wait for the binner before jumping to the first
3174 +        * tile's lists.
3175 +        */
3176 +       if (first && has_bin)
3177 +               rcl_u8(setup, VC4_PACKET_WAIT_ON_SEMAPHORE);
3178 +
3179 +       if (has_bin) {
3180 +               rcl_u8(setup, VC4_PACKET_BRANCH_TO_SUB_LIST);
3181 +               rcl_u32(setup, (exec->tile_bo->paddr +
3182 +                               exec->tile_alloc_offset +
3183 +                               (y * exec->bin_tiles_x + x) * 32));
3184 +       }
3185 +
3186 +       if (setup->zs_write) {
3187 +               rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
3188 +               rcl_u16(setup, args->zs_write.bits |
3189 +                       (setup->color_ms_write ?
3190 +                        VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR : 0));
3191 +               rcl_u32(setup,
3192 +                       (setup->zs_write->paddr + args->zs_write.offset) |
3193 +                       ((last && !setup->color_ms_write) ?
3194 +                        VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
3195 +       }
3196 +
3197 +       if (setup->color_ms_write) {
3198 +               if (setup->zs_write) {
3199 +                       /* Reset after previous store */
3200 +                       vc4_tile_coordinates(setup, x, y);
3201 +               }
3202 +
3203 +               if (last)
3204 +                       rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF);
3205 +               else
3206 +                       rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER);
3207 +       }
3208 +}
3209 +
3210 +static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
3211 +                            struct vc4_rcl_setup *setup)
3212 +{
3213 +       struct drm_vc4_submit_cl *args = exec->args;
3214 +       bool has_bin = args->bin_cl_size != 0;
3215 +       uint8_t min_x_tile = args->min_x_tile;
3216 +       uint8_t min_y_tile = args->min_y_tile;
3217 +       uint8_t max_x_tile = args->max_x_tile;
3218 +       uint8_t max_y_tile = args->max_y_tile;
3219 +       uint8_t xtiles = max_x_tile - min_x_tile + 1;
3220 +       uint8_t ytiles = max_y_tile - min_y_tile + 1;
3221 +       uint8_t x, y;
3222 +       uint32_t size, loop_body_size;
3223 +
3224 +       size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE;
3225 +       loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE;
3226 +
3227 +       if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
3228 +               size += VC4_PACKET_CLEAR_COLORS_SIZE +
3229 +                       VC4_PACKET_TILE_COORDINATES_SIZE +
3230 +                       VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
3231 +       }
3232 +
3233 +       if (setup->color_read) {
3234 +               loop_body_size += (VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE);
3235 +       }
3236 +       if (setup->zs_read) {
3237 +               if (setup->color_read) {
3238 +                       loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
3239 +                       loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
3240 +               }
3241 +               loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
3242 +       }
3243 +
3244 +       if (has_bin) {
3245 +               size += VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE;
3246 +               loop_body_size += VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE;
3247 +       }
3248 +
3249 +       if (setup->zs_write)
3250 +               loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
3251 +       if (setup->color_ms_write) {
3252 +               if (setup->zs_write)
3253 +                       loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
3254 +               loop_body_size += VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE;
3255 +       }
3256 +       size += xtiles * ytiles * loop_body_size;
3257 +
3258 +       setup->rcl = &vc4_bo_create(dev, size)->base;
3259 +       if (!setup->rcl)
3260 +               return -ENOMEM;
3261 +       list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
3262 +                     &exec->unref_list);
3263 +
3264 +       rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
3265 +       rcl_u32(setup,
3266 +               (setup->color_ms_write ?
3267 +                (setup->color_ms_write->paddr +
3268 +                 args->color_ms_write.offset) :
3269 +                0));
3270 +       rcl_u16(setup, args->width);
3271 +       rcl_u16(setup, args->height);
3272 +       rcl_u16(setup, args->color_ms_write.bits);
3273 +
3274 +       /* The tile buffer gets cleared when the previous tile is stored.  If
3275 +        * the clear values changed between frames, then the tile buffer has
3276 +        * stale clear values in it, so we have to do a store in None mode (no
3277 +        * writes) so that we trigger the tile buffer clear.
3278 +        */
3279 +       if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
3280 +               rcl_u8(setup, VC4_PACKET_CLEAR_COLORS);
3281 +               rcl_u32(setup, args->clear_color[0]);
3282 +               rcl_u32(setup, args->clear_color[1]);
3283 +               rcl_u32(setup, args->clear_z);
3284 +               rcl_u8(setup, args->clear_s);
3285 +
3286 +               vc4_tile_coordinates(setup, 0, 0);
3287 +
3288 +               rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
3289 +               rcl_u16(setup, VC4_LOADSTORE_TILE_BUFFER_NONE);
3290 +               rcl_u32(setup, 0); /* no address, since we're in None mode */
3291 +       }
3292 +
3293 +       for (y = min_y_tile; y <= max_y_tile; y++) {
3294 +               for (x = min_x_tile; x <= max_x_tile; x++) {
3295 +                       bool first = (x == min_x_tile && y == min_y_tile);
3296 +                       bool last = (x == max_x_tile && y == max_y_tile);
3297 +                       emit_tile(exec, setup, x, y, first, last);
3298 +               }
3299 +       }
3300 +
3301 +       BUG_ON(setup->next_offset != size);
3302 +       exec->ct1ca = setup->rcl->paddr;
3303 +       exec->ct1ea = setup->rcl->paddr + setup->next_offset;
3304 +
3305 +       return 0;
3306 +}
3307 +
3308 +static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
3309 +                                struct drm_gem_cma_object **obj,
3310 +                                struct drm_vc4_submit_rcl_surface *surf)
3311 +{
3312 +       uint8_t tiling = VC4_GET_FIELD(surf->bits,
3313 +                                      VC4_LOADSTORE_TILE_BUFFER_TILING);
3314 +       uint8_t buffer = VC4_GET_FIELD(surf->bits,
3315 +                                      VC4_LOADSTORE_TILE_BUFFER_BUFFER);
3316 +       uint8_t format = VC4_GET_FIELD(surf->bits,
3317 +                                      VC4_LOADSTORE_TILE_BUFFER_FORMAT);
3318 +       int cpp;
3319 +
3320 +       if (surf->pad != 0) {
3321 +               DRM_ERROR("Padding unset\n");
3322 +               return -EINVAL;
3323 +       }
3324 +
3325 +       if (surf->hindex == ~0)
3326 +               return 0;
3327 +
3328 +       if (!vc4_use_bo(exec, surf->hindex, VC4_MODE_RENDER, obj))
3329 +               return -EINVAL;
3330 +
3331 +       if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK |
3332 +                          VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK |
3333 +                          VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) {
3334 +               DRM_ERROR("Unknown bits in load/store: 0x%04x\n",
3335 +                         surf->bits);
3336 +               return -EINVAL;
3337 +       }
3338 +
3339 +       if (tiling > VC4_TILING_FORMAT_LT) {
3340 +               DRM_ERROR("Bad tiling format\n");
3341 +               return -EINVAL;
3342 +       }
3343 +
3344 +       if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) {
3345 +               if (format != 0) {
3346 +                       DRM_ERROR("No color format should be set for ZS\n");
3347 +                       return -EINVAL;
3348 +               }
3349 +               cpp = 4;
3350 +       } else if (buffer == VC4_LOADSTORE_TILE_BUFFER_COLOR) {
3351 +               switch (format) {
3352 +               case VC4_LOADSTORE_TILE_BUFFER_BGR565:
3353 +               case VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER:
3354 +                       cpp = 2;
3355 +                       break;
3356 +               case VC4_LOADSTORE_TILE_BUFFER_RGBA8888:
3357 +                       cpp = 4;
3358 +                       break;
3359 +               default:
3360 +                       DRM_ERROR("Bad tile buffer format\n");
3361 +                       return -EINVAL;
3362 +               }
3363 +       } else {
3364 +               DRM_ERROR("Bad load/store buffer %d.\n", buffer);
3365 +               return -EINVAL;
3366 +       }
3367 +
3368 +       if (surf->offset & 0xf) {
3369 +               DRM_ERROR("load/store buffer must be 16b aligned.\n");
3370 +               return -EINVAL;
3371 +       }
3372 +
3373 +       if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
3374 +                               exec->args->width, exec->args->height, cpp)) {
3375 +               return -EINVAL;
3376 +       }
3377 +
3378 +       return 0;
3379 +}
3380 +
3381 +static int
3382 +vc4_rcl_ms_surface_setup(struct vc4_exec_info *exec,
3383 +                        struct drm_gem_cma_object **obj,
3384 +                        struct drm_vc4_submit_rcl_surface *surf)
3385 +{
3386 +       uint8_t tiling = VC4_GET_FIELD(surf->bits,
3387 +                                      VC4_RENDER_CONFIG_MEMORY_FORMAT);
3388 +       uint8_t format = VC4_GET_FIELD(surf->bits,
3389 +                                      VC4_RENDER_CONFIG_FORMAT);
3390 +       int cpp;
3391 +
3392 +       if (surf->pad != 0) {
3393 +               DRM_ERROR("Padding unset\n");
3394 +               return -EINVAL;
3395 +       }
3396 +
3397 +       if (surf->bits & ~(VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK |
3398 +                          VC4_RENDER_CONFIG_FORMAT_MASK)) {
3399 +               DRM_ERROR("Unknown bits in render config: 0x%04x\n",
3400 +                         surf->bits);
3401 +               return -EINVAL;
3402 +       }
3403 +
3404 +       if (surf->hindex == ~0)
3405 +               return 0;
3406 +
3407 +       if (!vc4_use_bo(exec, surf->hindex, VC4_MODE_RENDER, obj))
3408 +               return -EINVAL;
3409 +
3410 +       if (tiling > VC4_TILING_FORMAT_LT) {
3411 +               DRM_ERROR("Bad tiling format\n");
3412 +               return -EINVAL;
3413 +       }
3414 +
3415 +       switch (format) {
3416 +       case VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED:
3417 +       case VC4_RENDER_CONFIG_FORMAT_BGR565:
3418 +               cpp = 2;
3419 +               break;
3420 +       case VC4_RENDER_CONFIG_FORMAT_RGBA8888:
3421 +               cpp = 4;
3422 +               break;
3423 +       default:
3424 +               DRM_ERROR("Bad tile buffer format\n");
3425 +               return -EINVAL;
3426 +       }
3427 +
3428 +       if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
3429 +                               exec->args->width, exec->args->height, cpp)) {
3430 +               return -EINVAL;
3431 +       }
3432 +
3433 +       return 0;
3434 +}
3435 +
3436 +int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
3437 +{
3438 +       struct vc4_rcl_setup setup = {0};
3439 +       struct drm_vc4_submit_cl *args = exec->args;
3440 +       bool has_bin = args->bin_cl_size != 0;
3441 +       int ret;
3442 +
3443 +       if (args->min_x_tile > args->max_x_tile ||
3444 +           args->min_y_tile > args->max_y_tile) {
3445 +               DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n",
3446 +                         args->min_x_tile, args->min_y_tile,
3447 +                         args->max_x_tile, args->max_y_tile);
3448 +               return -EINVAL;
3449 +       }
3450 +
3451 +       if (has_bin &&
3452 +           (args->max_x_tile > exec->bin_tiles_x ||
3453 +            args->max_y_tile > exec->bin_tiles_y)) {
3454 +               DRM_ERROR("Render tiles (%d,%d) outside of bin config (%d,%d)\n",
3455 +                         args->max_x_tile, args->max_y_tile,
3456 +                         exec->bin_tiles_x, exec->bin_tiles_y);
3457 +               return -EINVAL;
3458 +       }
3459 +
3460 +       ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read);
3461 +       if (ret)
3462 +               return ret;
3463 +
3464 +       ret = vc4_rcl_ms_surface_setup(exec, &setup.color_ms_write,
3465 +                                      &args->color_ms_write);
3466 +       if (ret)
3467 +               return ret;
3468 +
3469 +       ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read);
3470 +       if (ret)
3471 +               return ret;
3472 +
3473 +       ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write);
3474 +       if (ret)
3475 +               return ret;
3476 +
3477 +       /* We shouldn't even have the job submitted to us if there's no
3478 +        * surface to write out.
3479 +        */
3480 +       if (!setup.color_ms_write && !setup.zs_write) {
3481 +               DRM_ERROR("RCL requires color or Z/S write\n");
3482 +               return -EINVAL;
3483 +       }
3484 +
3485 +       return vc4_create_rcl_bo(dev, exec, &setup);
3486 +}
3487 --- /dev/null
3488 +++ b/drivers/gpu/drm/vc4/vc4_trace.h
3489 @@ -0,0 +1,63 @@
3490 +/*
3491 + * Copyright (C) 2015 Broadcom
3492 + *
3493 + * This program is free software; you can redistribute it and/or modify
3494 + * it under the terms of the GNU General Public License version 2 as
3495 + * published by the Free Software Foundation.
3496 + */
3497 +
3498 +#if !defined(_VC4_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
3499 +#define _VC4_TRACE_H_
3500 +
3501 +#include <linux/stringify.h>
3502 +#include <linux/types.h>
3503 +#include <linux/tracepoint.h>
3504 +
3505 +#undef TRACE_SYSTEM
3506 +#define TRACE_SYSTEM vc4
3507 +#define TRACE_INCLUDE_FILE vc4_trace
3508 +
3509 +TRACE_EVENT(vc4_wait_for_seqno_begin,
3510 +           TP_PROTO(struct drm_device *dev, uint64_t seqno, uint64_t timeout),
3511 +           TP_ARGS(dev, seqno, timeout),
3512 +
3513 +           TP_STRUCT__entry(
3514 +                            __field(u32, dev)
3515 +                            __field(u64, seqno)
3516 +                            __field(u64, timeout)
3517 +                            ),
3518 +
3519 +           TP_fast_assign(
3520 +                          __entry->dev = dev->primary->index;
3521 +                          __entry->seqno = seqno;
3522 +                          __entry->timeout = timeout;
3523 +                          ),
3524 +
3525 +           TP_printk("dev=%u, seqno=%llu, timeout=%llu",
3526 +                     __entry->dev, __entry->seqno, __entry->timeout)
3527 +);
3528 +
3529 +TRACE_EVENT(vc4_wait_for_seqno_end,
3530 +           TP_PROTO(struct drm_device *dev, uint64_t seqno),
3531 +           TP_ARGS(dev, seqno),
3532 +
3533 +           TP_STRUCT__entry(
3534 +                            __field(u32, dev)
3535 +                            __field(u64, seqno)
3536 +                            ),
3537 +
3538 +           TP_fast_assign(
3539 +                          __entry->dev = dev->primary->index;
3540 +                          __entry->seqno = seqno;
3541 +                          ),
3542 +
3543 +           TP_printk("dev=%u, seqno=%llu",
3544 +                     __entry->dev, __entry->seqno)
3545 +);
3546 +
3547 +#endif /* _VC4_TRACE_H_ */
3548 +
3549 +/* This part must be outside protection */
3550 +#undef TRACE_INCLUDE_PATH
3551 +#define TRACE_INCLUDE_PATH .
3552 +#include <trace/define_trace.h>
3553 --- /dev/null
3554 +++ b/drivers/gpu/drm/vc4/vc4_trace_points.c
3555 @@ -0,0 +1,14 @@
3556 +/*
3557 + * Copyright (C) 2015 Broadcom
3558 + *
3559 + * This program is free software; you can redistribute it and/or modify
3560 + * it under the terms of the GNU General Public License version 2 as
3561 + * published by the Free Software Foundation.
3562 + */
3563 +
3564 +#include "vc4_drv.h"
3565 +
3566 +#ifndef __CHECKER__
3567 +#define CREATE_TRACE_POINTS
3568 +#include "vc4_trace.h"
3569 +#endif
3570 --- /dev/null
3571 +++ b/drivers/gpu/drm/vc4/vc4_v3d.c
3572 @@ -0,0 +1,268 @@
3573 +/*
3574 + * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3575 + * Copyright (C) 2013 Red Hat
3576 + * Author: Rob Clark <robdclark@gmail.com>
3577 + *
3578 + * This program is free software; you can redistribute it and/or modify it
3579 + * under the terms of the GNU General Public License version 2 as published by
3580 + * the Free Software Foundation.
3581 + *
3582 + * This program is distributed in the hope that it will be useful, but WITHOUT
3583 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3584 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
3585 + * more details.
3586 + *
3587 + * You should have received a copy of the GNU General Public License along with
3588 + * this program.  If not, see <http://www.gnu.org/licenses/>.
3589 + */
3590 +
3591 +#include "linux/component.h"
3592 +#include "soc/bcm2835/raspberrypi-firmware.h"
3593 +#include "vc4_drv.h"
3594 +#include "vc4_regs.h"
3595 +
3596 +#ifdef CONFIG_DEBUG_FS
3597 +#define REGDEF(reg) { reg, #reg }
3598 +static const struct {
3599 +       uint32_t reg;
3600 +       const char *name;
3601 +} vc4_reg_defs[] = {
3602 +       REGDEF(V3D_IDENT0),
3603 +       REGDEF(V3D_IDENT1),
3604 +       REGDEF(V3D_IDENT2),
3605 +       REGDEF(V3D_SCRATCH),
3606 +       REGDEF(V3D_L2CACTL),
3607 +       REGDEF(V3D_SLCACTL),
3608 +       REGDEF(V3D_INTCTL),
3609 +       REGDEF(V3D_INTENA),
3610 +       REGDEF(V3D_INTDIS),
3611 +       REGDEF(V3D_CT0CS),
3612 +       REGDEF(V3D_CT1CS),
3613 +       REGDEF(V3D_CT0EA),
3614 +       REGDEF(V3D_CT1EA),
3615 +       REGDEF(V3D_CT0CA),
3616 +       REGDEF(V3D_CT1CA),
3617 +       REGDEF(V3D_CT00RA0),
3618 +       REGDEF(V3D_CT01RA0),
3619 +       REGDEF(V3D_CT0LC),
3620 +       REGDEF(V3D_CT1LC),
3621 +       REGDEF(V3D_CT0PC),
3622 +       REGDEF(V3D_CT1PC),
3623 +       REGDEF(V3D_PCS),
3624 +       REGDEF(V3D_BFC),
3625 +       REGDEF(V3D_RFC),
3626 +       REGDEF(V3D_BPCA),
3627 +       REGDEF(V3D_BPCS),
3628 +       REGDEF(V3D_BPOA),
3629 +       REGDEF(V3D_BPOS),
3630 +       REGDEF(V3D_BXCF),
3631 +       REGDEF(V3D_SQRSV0),
3632 +       REGDEF(V3D_SQRSV1),
3633 +       REGDEF(V3D_SQCNTL),
3634 +       REGDEF(V3D_SRQPC),
3635 +       REGDEF(V3D_SRQUA),
3636 +       REGDEF(V3D_SRQUL),
3637 +       REGDEF(V3D_SRQCS),
3638 +       REGDEF(V3D_VPACNTL),
3639 +       REGDEF(V3D_VPMBASE),
3640 +       REGDEF(V3D_PCTRC),
3641 +       REGDEF(V3D_PCTRE),
3642 +       REGDEF(V3D_PCTR0),
3643 +       REGDEF(V3D_PCTRS0),
3644 +       REGDEF(V3D_PCTR1),
3645 +       REGDEF(V3D_PCTRS1),
3646 +       REGDEF(V3D_PCTR2),
3647 +       REGDEF(V3D_PCTRS2),
3648 +       REGDEF(V3D_PCTR3),
3649 +       REGDEF(V3D_PCTRS3),
3650 +       REGDEF(V3D_PCTR4),
3651 +       REGDEF(V3D_PCTRS4),
3652 +       REGDEF(V3D_PCTR5),
3653 +       REGDEF(V3D_PCTRS5),
3654 +       REGDEF(V3D_PCTR6),
3655 +       REGDEF(V3D_PCTRS6),
3656 +       REGDEF(V3D_PCTR7),
3657 +       REGDEF(V3D_PCTRS7),
3658 +       REGDEF(V3D_PCTR8),
3659 +       REGDEF(V3D_PCTRS8),
3660 +       REGDEF(V3D_PCTR9),
3661 +       REGDEF(V3D_PCTRS9),
3662 +       REGDEF(V3D_PCTR10),
3663 +       REGDEF(V3D_PCTRS10),
3664 +       REGDEF(V3D_PCTR11),
3665 +       REGDEF(V3D_PCTRS11),
3666 +       REGDEF(V3D_PCTR12),
3667 +       REGDEF(V3D_PCTRS12),
3668 +       REGDEF(V3D_PCTR13),
3669 +       REGDEF(V3D_PCTRS13),
3670 +       REGDEF(V3D_PCTR14),
3671 +       REGDEF(V3D_PCTRS14),
3672 +       REGDEF(V3D_PCTR15),
3673 +       REGDEF(V3D_PCTRS15),
3674 +       REGDEF(V3D_BGE),
3675 +       REGDEF(V3D_FDBGO),
3676 +       REGDEF(V3D_FDBGB),
3677 +       REGDEF(V3D_FDBGR),
3678 +       REGDEF(V3D_FDBGS),
3679 +       REGDEF(V3D_ERRSTAT),
3680 +};
3681 +
3682 +int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused)
3683 +{
3684 +       struct drm_info_node *node = (struct drm_info_node *) m->private;
3685 +       struct drm_device *dev = node->minor->dev;
3686 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
3687 +       int i;
3688 +
3689 +       for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) {
3690 +               seq_printf(m, "%s (0x%04x): 0x%08x\n",
3691 +                          vc4_reg_defs[i].name, vc4_reg_defs[i].reg,
3692 +                          V3D_READ(vc4_reg_defs[i].reg));
3693 +       }
3694 +
3695 +       return 0;
3696 +}
3697 +
3698 +int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
3699 +{
3700 +       struct drm_info_node *node = (struct drm_info_node *) m->private;
3701 +       struct drm_device *dev = node->minor->dev;
3702 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
3703 +       uint32_t ident1 = V3D_READ(V3D_IDENT1);
3704 +       uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
3705 +       uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
3706 +       uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
3707 +
3708 +       seq_printf(m, "Revision:   %d\n", VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
3709 +       seq_printf(m, "Slices:     %d\n", nslc);
3710 +       seq_printf(m, "TMUs:       %d\n", nslc * tups);
3711 +       seq_printf(m, "QPUs:       %d\n", nslc * qups);
3712 +       seq_printf(m, "Semaphores: %d\n", VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
3713 +
3714 +       return 0;
3715 +}
3716 +#endif /* CONFIG_DEBUG_FS */
3717 +
3718 +/*
3719 + * Asks the firmware to turn on power to the V3D engine.
3720 + *
3721 + * This may be doable with just the clocks interface, though this
3722 + * packet does some other register setup from the firmware, too.
3723 + */
3724 +int
3725 +vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
3726 +{
3727 +       u32 packet = on;
3728 +
3729 +       return rpi_firmware_property(vc4->firmware,
3730 +                                    RPI_FIRMWARE_SET_ENABLE_QPU,
3731 +                                    &packet, sizeof(packet));
3732 +}
3733 +
3734 +static void vc4_v3d_init_hw(struct drm_device *dev)
3735 +{
3736 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
3737 +
3738 +       /* Take all the memory that would have been reserved for user
3739 +        * QPU programs, since we don't have an interface for running
3740 +        * them, anyway.
3741 +        */
3742 +       V3D_WRITE(V3D_VPMBASE, 0);
3743 +}
3744 +
3745 +static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
3746 +{
3747 +       struct platform_device *pdev = to_platform_device(dev);
3748 +       struct drm_device *drm = dev_get_drvdata(master);
3749 +       struct vc4_dev *vc4 = to_vc4_dev(drm);
3750 +       struct vc4_v3d *v3d = NULL;
3751 +       int ret;
3752 +
3753 +       v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL);
3754 +       if (!v3d)
3755 +               return -ENOMEM;
3756 +
3757 +       v3d->pdev = pdev;
3758 +
3759 +       v3d->regs = vc4_ioremap_regs(pdev, 0);
3760 +       if (IS_ERR(v3d->regs))
3761 +               return PTR_ERR(v3d->regs);
3762 +
3763 +       vc4->v3d = v3d;
3764 +
3765 +       ret = vc4_v3d_set_power(vc4, true);
3766 +       if (ret)
3767 +               return ret;
3768 +
3769 +       if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
3770 +               DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
3771 +                         V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
3772 +               return -EINVAL;
3773 +       }
3774 +
3775 +       /* Reset the binner overflow address/size at setup, to be sure
3776 +        * we don't reuse an old one.
3777 +        */
3778 +       V3D_WRITE(V3D_BPOA, 0);
3779 +       V3D_WRITE(V3D_BPOS, 0);
3780 +
3781 +       vc4_v3d_init_hw(drm);
3782 +
3783 +       ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
3784 +       if (ret) {
3785 +               DRM_ERROR("Failed to install IRQ handler\n");
3786 +               return ret;
3787 +       }
3788 +
3789 +       return 0;
3790 +}
3791 +
3792 +static void vc4_v3d_unbind(struct device *dev, struct device *master,
3793 +                           void *data)
3794 +{
3795 +       struct drm_device *drm = dev_get_drvdata(master);
3796 +       struct vc4_dev *vc4 = to_vc4_dev(drm);
3797 +
3798 +       drm_irq_uninstall(drm);
3799 +
3800 +       /* Disable the binner's overflow memory address, so the next
3801 +        * driver probe (if any) doesn't try to reuse our old
3802 +        * allocation.
3803 +        */
3804 +       V3D_WRITE(V3D_BPOA, 0);
3805 +       V3D_WRITE(V3D_BPOS, 0);
3806 +
3807 +       vc4_v3d_set_power(vc4, false);
3808 +
3809 +       vc4->v3d = NULL;
3810 +}
3811 +
3812 +static const struct component_ops vc4_v3d_ops = {
3813 +       .bind   = vc4_v3d_bind,
3814 +       .unbind = vc4_v3d_unbind,
3815 +};
3816 +
3817 +static int vc4_v3d_dev_probe(struct platform_device *pdev)
3818 +{
3819 +       return component_add(&pdev->dev, &vc4_v3d_ops);
3820 +}
3821 +
3822 +static int vc4_v3d_dev_remove(struct platform_device *pdev)
3823 +{
3824 +       component_del(&pdev->dev, &vc4_v3d_ops);
3825 +       return 0;
3826 +}
3827 +
3828 +static const struct of_device_id vc4_v3d_dt_match[] = {
3829 +       { .compatible = "brcm,vc4-v3d" },
3830 +       {}
3831 +};
3832 +
3833 +struct platform_driver vc4_v3d_driver = {
3834 +       .probe = vc4_v3d_dev_probe,
3835 +       .remove = vc4_v3d_dev_remove,
3836 +       .driver = {
3837 +               .name = "vc4_v3d",
3838 +               .of_match_table = vc4_v3d_dt_match,
3839 +       },
3840 +};
3841 --- /dev/null
3842 +++ b/drivers/gpu/drm/vc4/vc4_validate.c
3843 @@ -0,0 +1,958 @@
3844 +/*
3845 + * Copyright Â© 2014 Broadcom
3846 + *
3847 + * Permission is hereby granted, free of charge, to any person obtaining a
3848 + * copy of this software and associated documentation files (the "Software"),
3849 + * to deal in the Software without restriction, including without limitation
3850 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
3851 + * and/or sell copies of the Software, and to permit persons to whom the
3852 + * Software is furnished to do so, subject to the following conditions:
3853 + *
3854 + * The above copyright notice and this permission notice (including the next
3855 + * paragraph) shall be included in all copies or substantial portions of the
3856 + * Software.
3857 + *
3858 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3859 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3860 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
3861 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3862 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3863 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
3864 + * IN THE SOFTWARE.
3865 + */
3866 +
3867 +/**
3868 + * Command list validator for VC4.
3869 + *
3870 + * The VC4 has no IOMMU between it and system memory.  So, a user with
3871 + * access to execute command lists could escalate privilege by
3872 + * overwriting system memory (drawing to it as a framebuffer) or
3873 + * reading system memory it shouldn't (reading it as a texture, or
3874 + * uniform data, or vertex data).
3875 + *
3876 + * This validates command lists to ensure that all accesses are within
3877 + * the bounds of the GEM objects referenced.  It explicitly whitelists
3878 + * packets, and looks at the offsets in any address fields to make
3879 + * sure they're constrained within the BOs they reference.
3880 + *
3881 + * Note that because of the validation that's happening anyway, this
3882 + * is where GEM relocation processing happens.
3883 + */
3884 +
3885 +#include "uapi/drm/vc4_drm.h"
3886 +#include "vc4_drv.h"
3887 +#include "vc4_packet.h"
3888 +
3889 +#define VALIDATE_ARGS \
3890 +       struct vc4_exec_info *exec,                     \
3891 +       void *validated,                                \
3892 +       void *untrusted
3893 +
3894 +
3895 +/** Return the width in pixels of a 64-byte microtile. */
3896 +static uint32_t
3897 +utile_width(int cpp)
3898 +{
3899 +       switch (cpp) {
3900 +       case 1:
3901 +       case 2:
3902 +               return 8;
3903 +       case 4:
3904 +               return 4;
3905 +       case 8:
3906 +               return 2;
3907 +       default:
3908 +               DRM_ERROR("unknown cpp: %d\n", cpp);
3909 +               return 1;
3910 +       }
3911 +}
3912 +
3913 +/** Return the height in pixels of a 64-byte microtile. */
3914 +static uint32_t
3915 +utile_height(int cpp)
3916 +{
3917 +       switch (cpp) {
3918 +       case 1:
3919 +               return 8;
3920 +       case 2:
3921 +       case 4:
3922 +       case 8:
3923 +               return 4;
3924 +       default:
3925 +               DRM_ERROR("unknown cpp: %d\n", cpp);
3926 +               return 1;
3927 +       }
3928 +}
3929 +
3930 +/**
3931 + * The texture unit decides what tiling format a particular miplevel is using
3932 + * this function, so we lay out our miptrees accordingly.
3933 + */
3934 +static bool
3935 +size_is_lt(uint32_t width, uint32_t height, int cpp)
3936 +{
3937 +       return (width <= 4 * utile_width(cpp) ||
3938 +               height <= 4 * utile_height(cpp));
3939 +}
3940 +
3941 +bool
3942 +vc4_use_bo(struct vc4_exec_info *exec,
3943 +          uint32_t hindex,
3944 +          enum vc4_bo_mode mode,
3945 +          struct drm_gem_cma_object **obj)
3946 +{
3947 +       *obj = NULL;
3948 +
3949 +       if (hindex >= exec->bo_count) {
3950 +               DRM_ERROR("BO index %d greater than BO count %d\n",
3951 +                         hindex, exec->bo_count);
3952 +               return false;
3953 +       }
3954 +
3955 +       if (exec->bo[hindex].mode != mode) {
3956 +               if (exec->bo[hindex].mode == VC4_MODE_UNDECIDED) {
3957 +                       exec->bo[hindex].mode = mode;
3958 +               } else {
3959 +                       DRM_ERROR("BO index %d reused with mode %d vs %d\n",
3960 +                                 hindex, exec->bo[hindex].mode, mode);
3961 +                       return false;
3962 +               }
3963 +       }
3964 +
3965 +       *obj = exec->bo[hindex].bo;
3966 +       return true;
3967 +}
3968 +
3969 +static bool
3970 +vc4_use_handle(struct vc4_exec_info *exec,
3971 +              uint32_t gem_handles_packet_index,
3972 +              enum vc4_bo_mode mode,
3973 +              struct drm_gem_cma_object **obj)
3974 +{
3975 +       return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index],
3976 +                         mode, obj);
3977 +}
3978 +
3979 +static uint32_t
3980 +gl_shader_rec_size(uint32_t pointer_bits)
3981 +{
3982 +       uint32_t attribute_count = pointer_bits & 7;
3983 +       bool extended = pointer_bits & 8;
3984 +
3985 +       if (attribute_count == 0)
3986 +               attribute_count = 8;
3987 +
3988 +       if (extended)
3989 +               return 100 + attribute_count * 4;
3990 +       else
3991 +               return 36 + attribute_count * 8;
3992 +}
3993 +
3994 +bool
3995 +vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
3996 +                  uint32_t offset, uint8_t tiling_format,
3997 +                  uint32_t width, uint32_t height, uint8_t cpp)
3998 +{
3999 +       uint32_t aligned_width, aligned_height, stride, size;
4000 +       uint32_t utile_w = utile_width(cpp);
4001 +       uint32_t utile_h = utile_height(cpp);
4002 +
4003 +       /* The shaded vertex format stores signed 12.4 fixed point
4004 +        * (-2048,2047) offsets from the viewport center, so we should
4005 +        * never have a render target larger than 4096.  The texture
4006 +        * unit can only sample from 2048x2048, so it's even more
4007 +        * restricted.  This lets us avoid worrying about overflow in
4008 +        * our math.
4009 +        */
4010 +       if (width > 4096 || height > 4096) {
4011 +               DRM_ERROR("Surface dimesions (%d,%d) too large", width, height);
4012 +               return false;
4013 +       }
4014 +
4015 +       switch (tiling_format) {
4016 +       case VC4_TILING_FORMAT_LINEAR:
4017 +               aligned_width = round_up(width, utile_w);
4018 +               aligned_height = height;
4019 +               break;
4020 +       case VC4_TILING_FORMAT_T:
4021 +               aligned_width = round_up(width, utile_w * 8);
4022 +               aligned_height = round_up(height, utile_h * 8);
4023 +               break;
4024 +       case VC4_TILING_FORMAT_LT:
4025 +               aligned_width = round_up(width, utile_w);
4026 +               aligned_height = round_up(height, utile_h);
4027 +               break;
4028 +       default:
4029 +               DRM_ERROR("buffer tiling %d unsupported\n", tiling_format);
4030 +               return false;
4031 +       }
4032 +
4033 +       stride = aligned_width * cpp;
4034 +       size = stride * aligned_height;
4035 +
4036 +       if (size + offset < size ||
4037 +           size + offset > fbo->base.size) {
4038 +               DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %d)\n",
4039 +                         width, height,
4040 +                         aligned_width, aligned_height,
4041 +                         size, offset, fbo->base.size);
4042 +               return false;
4043 +       }
4044 +
4045 +       return true;
4046 +}
4047 +
4048 +static int
4049 +validate_flush_all(VALIDATE_ARGS)
4050 +{
4051 +       if (exec->found_increment_semaphore_packet) {
4052 +               DRM_ERROR("VC4_PACKET_FLUSH_ALL after "
4053 +                         "VC4_PACKET_INCREMENT_SEMAPHORE\n");
4054 +               return -EINVAL;
4055 +       }
4056 +
4057 +       return 0;
4058 +}
4059 +
4060 +static int
4061 +validate_start_tile_binning(VALIDATE_ARGS)
4062 +{
4063 +       if (exec->found_start_tile_binning_packet) {
4064 +               DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
4065 +               return -EINVAL;
4066 +       }
4067 +       exec->found_start_tile_binning_packet = true;
4068 +
4069 +       if (!exec->found_tile_binning_mode_config_packet) {
4070 +               DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
4071 +               return -EINVAL;
4072 +       }
4073 +
4074 +       return 0;
4075 +}
4076 +
4077 +static int
4078 +validate_increment_semaphore(VALIDATE_ARGS)
4079 +{
4080 +       if (exec->found_increment_semaphore_packet) {
4081 +               DRM_ERROR("Duplicate VC4_PACKET_INCREMENT_SEMAPHORE\n");
4082 +               return -EINVAL;
4083 +       }
4084 +       exec->found_increment_semaphore_packet = true;
4085 +
4086 +       /* Once we've found the semaphore increment, there should be one FLUSH
4087 +        * then the end of the command list.  The FLUSH actually triggers the
4088 +        * increment, so we only need to make sure there
4089 +        */
4090 +
4091 +       return 0;
4092 +}
4093 +
4094 +static int
4095 +validate_indexed_prim_list(VALIDATE_ARGS)
4096 +{
4097 +       struct drm_gem_cma_object *ib;
4098 +       uint32_t length = *(uint32_t *)(untrusted + 1);
4099 +       uint32_t offset = *(uint32_t *)(untrusted + 5);
4100 +       uint32_t max_index = *(uint32_t *)(untrusted + 9);
4101 +       uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1;
4102 +       struct vc4_shader_state *shader_state;
4103 +
4104 +       if (exec->found_increment_semaphore_packet) {
4105 +               DRM_ERROR("Drawing after VC4_PACKET_INCREMENT_SEMAPHORE\n");
4106 +               return -EINVAL;
4107 +       }
4108 +
4109 +       /* Check overflow condition */
4110 +       if (exec->shader_state_count == 0) {
4111 +               DRM_ERROR("shader state must precede primitives\n");
4112 +               return -EINVAL;
4113 +       }
4114 +       shader_state = &exec->shader_state[exec->shader_state_count - 1];
4115 +
4116 +       if (max_index > shader_state->max_index)
4117 +               shader_state->max_index = max_index;
4118 +
4119 +       if (!vc4_use_handle(exec, 0, VC4_MODE_RENDER, &ib))
4120 +               return -EINVAL;
4121 +
4122 +       if (offset > ib->base.size ||
4123 +           (ib->base.size - offset) / index_size < length) {
4124 +               DRM_ERROR("IB access overflow (%d + %d*%d > %d)\n",
4125 +                         offset, length, index_size, ib->base.size);
4126 +               return -EINVAL;
4127 +       }
4128 +
4129 +       *(uint32_t *)(validated + 5) = ib->paddr + offset;
4130 +
4131 +       return 0;
4132 +}
4133 +
4134 +static int
4135 +validate_gl_array_primitive(VALIDATE_ARGS)
4136 +{
4137 +       uint32_t length = *(uint32_t *)(untrusted + 1);
4138 +       uint32_t base_index = *(uint32_t *)(untrusted + 5);
4139 +       uint32_t max_index;
4140 +       struct vc4_shader_state *shader_state;
4141 +
4142 +       if (exec->found_increment_semaphore_packet) {
4143 +               DRM_ERROR("Drawing after VC4_PACKET_INCREMENT_SEMAPHORE\n");
4144 +               return -EINVAL;
4145 +       }
4146 +
4147 +       /* Check overflow condition */
4148 +       if (exec->shader_state_count == 0) {
4149 +               DRM_ERROR("shader state must precede primitives\n");
4150 +               return -EINVAL;
4151 +       }
4152 +       shader_state = &exec->shader_state[exec->shader_state_count - 1];
4153 +
4154 +       if (length + base_index < length) {
4155 +               DRM_ERROR("primitive vertex count overflow\n");
4156 +               return -EINVAL;
4157 +       }
4158 +       max_index = length + base_index - 1;
4159 +
4160 +       if (max_index > shader_state->max_index)
4161 +               shader_state->max_index = max_index;
4162 +
4163 +       return 0;
4164 +}
4165 +
4166 +static int
4167 +validate_gl_shader_state(VALIDATE_ARGS)
4168 +{
4169 +       uint32_t i = exec->shader_state_count++;
4170 +
4171 +       if (i >= exec->shader_state_size) {
4172 +               DRM_ERROR("More requests for shader states than declared\n");
4173 +               return -EINVAL;
4174 +       }
4175 +
4176 +       exec->shader_state[i].packet = VC4_PACKET_GL_SHADER_STATE;
4177 +       exec->shader_state[i].addr = *(uint32_t *)untrusted;
4178 +       exec->shader_state[i].max_index = 0;
4179 +
4180 +       if (exec->shader_state[i].addr & ~0xf) {
4181 +               DRM_ERROR("high bits set in GL shader rec reference\n");
4182 +               return -EINVAL;
4183 +       }
4184 +
4185 +       *(uint32_t *)validated = (exec->shader_rec_p +
4186 +                                 exec->shader_state[i].addr);
4187 +
4188 +       exec->shader_rec_p +=
4189 +               roundup(gl_shader_rec_size(exec->shader_state[i].addr), 16);
4190 +
4191 +       return 0;
4192 +}
4193 +
4194 +static int
4195 +validate_nv_shader_state(VALIDATE_ARGS)
4196 +{
4197 +       uint32_t i = exec->shader_state_count++;
4198 +
4199 +       if (i >= exec->shader_state_size) {
4200 +               DRM_ERROR("More requests for shader states than declared\n");
4201 +               return -EINVAL;
4202 +       }
4203 +
4204 +       exec->shader_state[i].packet = VC4_PACKET_NV_SHADER_STATE;
4205 +       exec->shader_state[i].addr = *(uint32_t *)untrusted;
4206 +
4207 +       if (exec->shader_state[i].addr & 15) {
4208 +               DRM_ERROR("NV shader state address 0x%08x misaligned\n",
4209 +                         exec->shader_state[i].addr);
4210 +               return -EINVAL;
4211 +       }
4212 +
4213 +       *(uint32_t *)validated = (exec->shader_state[i].addr +
4214 +                                 exec->shader_rec_p);
4215 +
4216 +       return 0;
4217 +}
4218 +
4219 +static int
4220 +validate_tile_binning_config(VALIDATE_ARGS)
4221 +{
4222 +       struct drm_device *dev = exec->exec_bo->base.dev;
4223 +       uint8_t flags;
4224 +       uint32_t tile_state_size, tile_alloc_size;
4225 +       uint32_t tile_count;
4226 +
4227 +       if (exec->found_tile_binning_mode_config_packet) {
4228 +               DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
4229 +               return -EINVAL;
4230 +       }
4231 +       exec->found_tile_binning_mode_config_packet = true;
4232 +
4233 +       exec->bin_tiles_x = *(uint8_t *)(untrusted + 12);
4234 +       exec->bin_tiles_y = *(uint8_t *)(untrusted + 13);
4235 +       tile_count = exec->bin_tiles_x * exec->bin_tiles_y;
4236 +       flags = *(uint8_t *)(untrusted + 14);
4237 +
4238 +       if (exec->bin_tiles_x == 0 ||
4239 +           exec->bin_tiles_y == 0) {
4240 +               DRM_ERROR("Tile binning config of %dx%d too small\n",
4241 +                         exec->bin_tiles_x, exec->bin_tiles_y);
4242 +               return -EINVAL;
4243 +       }
4244 +
4245 +       if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
4246 +                    VC4_BIN_CONFIG_TILE_BUFFER_64BIT |
4247 +                    VC4_BIN_CONFIG_MS_MODE_4X)) {
4248 +               DRM_ERROR("unsupported bining config flags 0x%02x\n", flags);
4249 +               return -EINVAL;
4250 +       }
4251 +
4252 +       /* The tile state data array is 48 bytes per tile, and we put it at
4253 +        * the start of a BO containing both it and the tile alloc.
4254 +        */
4255 +       tile_state_size = 48 * tile_count;
4256 +
4257 +       /* Since the tile alloc array will follow us, align. */
4258 +       exec->tile_alloc_offset = roundup(tile_state_size, 4096);
4259 +
4260 +       *(uint8_t *)(validated + 14) =
4261 +               ((flags & ~(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK |
4262 +                           VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK)) |
4263 +                VC4_BIN_CONFIG_AUTO_INIT_TSDA |
4264 +                VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32,
4265 +                              VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE) |
4266 +                VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128,
4267 +                              VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE));
4268 +
4269 +       /* Initial block size. */
4270 +       tile_alloc_size = 32 * tile_count;
4271 +
4272 +       /*
4273 +        * The initial allocation gets rounded to the next 256 bytes before
4274 +        * the hardware starts fulfilling further allocations.
4275 +        */
4276 +       tile_alloc_size = roundup(tile_alloc_size, 256);
4277 +
4278 +       /* Add space for the extra allocations.  This is what gets used first,
4279 +        * before overflow memory.  It must have at least 4096 bytes, but we
4280 +        * want to avoid overflow memory usage if possible.
4281 +        */
4282 +       tile_alloc_size += 1024 * 1024;
4283 +
4284 +       exec->tile_bo = &vc4_bo_create(dev, exec->tile_alloc_offset +
4285 +                                      tile_alloc_size)->base;
4286 +       if (!exec->tile_bo)
4287 +               return -ENOMEM;
4288 +       list_add_tail(&to_vc4_bo(&exec->tile_bo->base)->unref_head,
4289 +                    &exec->unref_list);
4290 +
4291 +       /* tile alloc address. */
4292 +       *(uint32_t *)(validated + 0) = (exec->tile_bo->paddr +
4293 +                                       exec->tile_alloc_offset);
4294 +       /* tile alloc size. */
4295 +       *(uint32_t *)(validated + 4) = tile_alloc_size;
4296 +       /* tile state address. */
4297 +       *(uint32_t *)(validated + 8) = exec->tile_bo->paddr;
4298 +
4299 +       return 0;
4300 +}
4301 +
4302 +static int
4303 +validate_gem_handles(VALIDATE_ARGS)
4304 +{
4305 +       memcpy(exec->bo_index, untrusted, sizeof(exec->bo_index));
4306 +       return 0;
4307 +}
4308 +
4309 +#define VC4_DEFINE_PACKET(packet, name, func) \
4310 +       [packet] = { packet ## _SIZE, name, func }
4311 +
4312 +static const struct cmd_info {
4313 +       uint16_t len;
4314 +       const char *name;
4315 +       int (*func)(struct vc4_exec_info *exec, void *validated,
4316 +                   void *untrusted);
4317 +} cmd_info[] = {
4318 +       VC4_DEFINE_PACKET(VC4_PACKET_HALT, "halt", NULL),
4319 +       VC4_DEFINE_PACKET(VC4_PACKET_NOP, "nop", NULL),
4320 +       VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, "flush", NULL),
4321 +       VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, "flush all state", validate_flush_all),
4322 +       VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING, "start tile binning", validate_start_tile_binning),
4323 +       VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE, "increment semaphore", validate_increment_semaphore),
4324 +
4325 +       VC4_DEFINE_PACKET(VC4_PACKET_GL_INDEXED_PRIMITIVE, "Indexed Primitive List", validate_indexed_prim_list),
4326 +
4327 +       VC4_DEFINE_PACKET(VC4_PACKET_GL_ARRAY_PRIMITIVE, "Vertex Array Primitives", validate_gl_array_primitive),
4328 +
4329 +       /* This is only used by clipped primitives (packets 48 and 49), which
4330 +        * we don't support parsing yet.
4331 +        */
4332 +       VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT, "primitive list format", NULL),
4333 +
4334 +       VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE, "GL Shader State", validate_gl_shader_state),
4335 +       VC4_DEFINE_PACKET(VC4_PACKET_NV_SHADER_STATE, "NV Shader State", validate_nv_shader_state),
4336 +
4337 +       VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS, "configuration bits", NULL),
4338 +       VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS, "flat shade flags", NULL),
4339 +       VC4_DEFINE_PACKET(VC4_PACKET_POINT_SIZE, "point size", NULL),
4340 +       VC4_DEFINE_PACKET(VC4_PACKET_LINE_WIDTH, "line width", NULL),
4341 +       VC4_DEFINE_PACKET(VC4_PACKET_RHT_X_BOUNDARY, "RHT X boundary", NULL),
4342 +       VC4_DEFINE_PACKET(VC4_PACKET_DEPTH_OFFSET, "Depth Offset", NULL),
4343 +       VC4_DEFINE_PACKET(VC4_PACKET_CLIP_WINDOW, "Clip Window", NULL),
4344 +       VC4_DEFINE_PACKET(VC4_PACKET_VIEWPORT_OFFSET, "Viewport Offset", NULL),
4345 +       VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_XY_SCALING, "Clipper XY Scaling", NULL),
4346 +       /* Note: The docs say this was also 105, but it was 106 in the
4347 +        * initial userland code drop.
4348 +        */
4349 +       VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_Z_SCALING, "Clipper Z Scale and Offset", NULL),
4350 +
4351 +       VC4_DEFINE_PACKET(VC4_PACKET_TILE_BINNING_MODE_CONFIG, "tile binning configuration", validate_tile_binning_config),
4352 +
4353 +       VC4_DEFINE_PACKET(VC4_PACKET_GEM_HANDLES, "GEM handles", validate_gem_handles),
4354 +};
4355 +
4356 +int
4357 +vc4_validate_bin_cl(struct drm_device *dev,
4358 +                   void *validated,
4359 +                   void *unvalidated,
4360 +                   struct vc4_exec_info *exec)
4361 +{
4362 +       uint32_t len = exec->args->bin_cl_size;
4363 +       uint32_t dst_offset = 0;
4364 +       uint32_t src_offset = 0;
4365 +
4366 +       while (src_offset < len) {
4367 +               void *dst_pkt = validated + dst_offset;
4368 +               void *src_pkt = unvalidated + src_offset;
4369 +               u8 cmd = *(uint8_t *)src_pkt;
4370 +               const struct cmd_info *info;
4371 +
4372 +               if (cmd > ARRAY_SIZE(cmd_info)) {
4373 +                       DRM_ERROR("0x%08x: packet %d out of bounds\n",
4374 +                                 src_offset, cmd);
4375 +                       return -EINVAL;
4376 +               }
4377 +
4378 +               info = &cmd_info[cmd];
4379 +               if (!info->name) {
4380 +                       DRM_ERROR("0x%08x: packet %d invalid\n",
4381 +                                 src_offset, cmd);
4382 +                       return -EINVAL;
4383 +               }
4384 +
4385 +#if 0
4386 +               DRM_INFO("0x%08x: packet %d (%s) size %d processing...\n",
4387 +                        src_offset, cmd, info->name, info->len);
4388 +#endif
4389 +
4390 +               if (src_offset + info->len > len) {
4391 +                       DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
4392 +                                 "exceeds bounds (0x%08x)\n",
4393 +                                 src_offset, cmd, info->name, info->len,
4394 +                                 src_offset + len);
4395 +                       return -EINVAL;
4396 +               }
4397 +
4398 +               if (cmd != VC4_PACKET_GEM_HANDLES)
4399 +                       memcpy(dst_pkt, src_pkt, info->len);
4400 +
4401 +               if (info->func && info->func(exec,
4402 +                                            dst_pkt + 1,
4403 +                                            src_pkt + 1)) {
4404 +                       DRM_ERROR("0x%08x: packet %d (%s) failed to "
4405 +                                 "validate\n",
4406 +                                 src_offset, cmd, info->name);
4407 +                       return -EINVAL;
4408 +               }
4409 +
4410 +               src_offset += info->len;
4411 +               /* GEM handle loading doesn't produce HW packets. */
4412 +               if (cmd != VC4_PACKET_GEM_HANDLES)
4413 +                       dst_offset += info->len;
4414 +
4415 +               /* When the CL hits halt, it'll stop reading anything else. */
4416 +               if (cmd == VC4_PACKET_HALT)
4417 +                       break;
4418 +       }
4419 +
4420 +       exec->ct0ea = exec->ct0ca + dst_offset;
4421 +
4422 +       if (!exec->found_start_tile_binning_packet) {
4423 +               DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
4424 +               return -EINVAL;
4425 +       }
4426 +
4427 +       if (!exec->found_increment_semaphore_packet) {
4428 +               DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE\n");
4429 +               return -EINVAL;
4430 +       }
4431 +
4432 +       return 0;
4433 +}
4434 +
4435 +static bool
4436 +reloc_tex(struct vc4_exec_info *exec,
4437 +         void *uniform_data_u,
4438 +         struct vc4_texture_sample_info *sample,
4439 +         uint32_t texture_handle_index)
4440 +
4441 +{
4442 +       struct drm_gem_cma_object *tex;
4443 +       uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
4444 +       uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
4445 +       uint32_t p2 = (sample->p_offset[2] != ~0 ?
4446 +                      *(uint32_t *)(uniform_data_u + sample->p_offset[2]) : 0);
4447 +       uint32_t p3 = (sample->p_offset[3] != ~0 ?
4448 +                      *(uint32_t *)(uniform_data_u + sample->p_offset[3]) : 0);
4449 +       uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0];
4450 +       uint32_t offset = p0 & VC4_TEX_P0_OFFSET_MASK;
4451 +       uint32_t miplevels = VC4_GET_FIELD(p0, VC4_TEX_P0_MIPLVLS);
4452 +       uint32_t width = VC4_GET_FIELD(p1, VC4_TEX_P1_WIDTH);
4453 +       uint32_t height = VC4_GET_FIELD(p1, VC4_TEX_P1_HEIGHT);
4454 +       uint32_t cpp, tiling_format, utile_w, utile_h;
4455 +       uint32_t i;
4456 +       uint32_t cube_map_stride = 0;
4457 +       enum vc4_texture_data_type type;
4458 +
4459 +       if (!vc4_use_bo(exec, texture_handle_index, VC4_MODE_RENDER, &tex))
4460 +               return false;
4461 +
4462 +       if (sample->is_direct) {
4463 +               uint32_t remaining_size = tex->base.size - p0;
4464 +               if (p0 > tex->base.size - 4) {
4465 +                       DRM_ERROR("UBO offset greater than UBO size\n");
4466 +                       goto fail;
4467 +               }
4468 +               if (p1 > remaining_size - 4) {
4469 +                       DRM_ERROR("UBO clamp would allow reads outside of UBO\n");
4470 +                       goto fail;
4471 +               }
4472 +               *validated_p0 = tex->paddr + p0;
4473 +               return true;
4474 +       }
4475 +
4476 +       if (width == 0)
4477 +               width = 2048;
4478 +       if (height == 0)
4479 +               height = 2048;
4480 +
4481 +       if (p0 & VC4_TEX_P0_CMMODE_MASK) {
4482 +               if (VC4_GET_FIELD(p2, VC4_TEX_P2_PTYPE) ==
4483 +                   VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE)
4484 +                       cube_map_stride = p2 & VC4_TEX_P2_CMST_MASK;
4485 +               if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) ==
4486 +                   VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) {
4487 +                       if (cube_map_stride) {
4488 +                               DRM_ERROR("Cube map stride set twice\n");
4489 +                               goto fail;
4490 +                       }
4491 +
4492 +                       cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK;
4493 +               }
4494 +               if (!cube_map_stride) {
4495 +                       DRM_ERROR("Cube map stride not set\n");
4496 +                       goto fail;
4497 +               }
4498 +       }
4499 +
4500 +       type = (VC4_GET_FIELD(p0, VC4_TEX_P0_TYPE) |
4501 +               (VC4_GET_FIELD(p1, VC4_TEX_P1_TYPE4) << 4));
4502 +
4503 +       switch (type) {
4504 +       case VC4_TEXTURE_TYPE_RGBA8888:
4505 +       case VC4_TEXTURE_TYPE_RGBX8888:
4506 +       case VC4_TEXTURE_TYPE_RGBA32R:
4507 +               cpp = 4;
4508 +               break;
4509 +       case VC4_TEXTURE_TYPE_RGBA4444:
4510 +       case VC4_TEXTURE_TYPE_RGBA5551:
4511 +       case VC4_TEXTURE_TYPE_RGB565:
4512 +       case VC4_TEXTURE_TYPE_LUMALPHA:
4513 +       case VC4_TEXTURE_TYPE_S16F:
4514 +       case VC4_TEXTURE_TYPE_S16:
4515 +               cpp = 2;
4516 +               break;
4517 +       case VC4_TEXTURE_TYPE_LUMINANCE:
4518 +       case VC4_TEXTURE_TYPE_ALPHA:
4519 +       case VC4_TEXTURE_TYPE_S8:
4520 +               cpp = 1;
4521 +               break;
4522 +       case VC4_TEXTURE_TYPE_ETC1:
4523 +       case VC4_TEXTURE_TYPE_BW1:
4524 +       case VC4_TEXTURE_TYPE_A4:
4525 +       case VC4_TEXTURE_TYPE_A1:
4526 +       case VC4_TEXTURE_TYPE_RGBA64:
4527 +       case VC4_TEXTURE_TYPE_YUV422R:
4528 +       default:
4529 +               DRM_ERROR("Texture format %d unsupported\n", type);
4530 +               goto fail;
4531 +       }
4532 +       utile_w = utile_width(cpp);
4533 +       utile_h = utile_height(cpp);
4534 +
4535 +       if (type == VC4_TEXTURE_TYPE_RGBA32R) {
4536 +               tiling_format = VC4_TILING_FORMAT_LINEAR;
4537 +       } else {
4538 +               if (size_is_lt(width, height, cpp))
4539 +                       tiling_format = VC4_TILING_FORMAT_LT;
4540 +               else
4541 +                       tiling_format = VC4_TILING_FORMAT_T;
4542 +       }
4543 +
4544 +       if (!vc4_check_tex_size(exec, tex, offset + cube_map_stride * 5,
4545 +                               tiling_format, width, height, cpp)) {
4546 +               goto fail;
4547 +       }
4548 +
4549 +       /* The mipmap levels are stored before the base of the texture.  Make
4550 +        * sure there is actually space in the BO.
4551 +        */
4552 +       for (i = 1; i <= miplevels; i++) {
4553 +               uint32_t level_width = max(width >> i, 1u);
4554 +               uint32_t level_height = max(height >> i, 1u);
4555 +               uint32_t aligned_width, aligned_height;
4556 +               uint32_t level_size;
4557 +
4558 +               /* Once the levels get small enough, they drop from T to LT. */
4559 +               if (tiling_format == VC4_TILING_FORMAT_T &&
4560 +                   size_is_lt(level_width, level_height, cpp)) {
4561 +                       tiling_format = VC4_TILING_FORMAT_LT;
4562 +               }
4563 +
4564 +               switch (tiling_format) {
4565 +               case VC4_TILING_FORMAT_T:
4566 +                       aligned_width = round_up(level_width, utile_w * 8);
4567 +                       aligned_height = round_up(level_height, utile_h * 8);
4568 +                       break;
4569 +               case VC4_TILING_FORMAT_LT:
4570 +                       aligned_width = round_up(level_width, utile_w);
4571 +                       aligned_height = round_up(level_height, utile_h);
4572 +                       break;
4573 +               default:
4574 +                       aligned_width = round_up(level_width, utile_w);
4575 +                       aligned_height = level_height;
4576 +                       break;
4577 +               }
4578 +
4579 +               level_size = aligned_width * cpp * aligned_height;
4580 +
4581 +               if (offset < level_size) {
4582 +                       DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db "
4583 +                                 "overflowed buffer bounds (offset %d)\n",
4584 +                                 i, level_width, level_height,
4585 +                                 aligned_width, aligned_height,
4586 +                                 level_size, offset);
4587 +                       goto fail;
4588 +               }
4589 +
4590 +               offset -= level_size;
4591 +       }
4592 +
4593 +       *validated_p0 = tex->paddr + p0;
4594 +
4595 +       return true;
4596 + fail:
4597 +       DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
4598 +       DRM_INFO("Texture p1 at %d: 0x%08x\n", sample->p_offset[1], p1);
4599 +       DRM_INFO("Texture p2 at %d: 0x%08x\n", sample->p_offset[2], p2);
4600 +       DRM_INFO("Texture p3 at %d: 0x%08x\n", sample->p_offset[3], p3);
4601 +       return false;
4602 +}
4603 +
4604 +static int
4605 +validate_shader_rec(struct drm_device *dev,
4606 +                   struct vc4_exec_info *exec,
4607 +                   struct vc4_shader_state *state)
4608 +{
4609 +       uint32_t *src_handles;
4610 +       void *pkt_u, *pkt_v;
4611 +       enum shader_rec_reloc_type {
4612 +               RELOC_CODE,
4613 +               RELOC_VBO,
4614 +       };
4615 +       struct shader_rec_reloc {
4616 +               enum shader_rec_reloc_type type;
4617 +               uint32_t offset;
4618 +       };
4619 +       static const struct shader_rec_reloc gl_relocs[] = {
4620 +               { RELOC_CODE, 4 },  /* fs */
4621 +               { RELOC_CODE, 16 }, /* vs */
4622 +               { RELOC_CODE, 28 }, /* cs */
4623 +       };
4624 +       static const struct shader_rec_reloc nv_relocs[] = {
4625 +               { RELOC_CODE, 4 }, /* fs */
4626 +               { RELOC_VBO, 12 }
4627 +       };
4628 +       const struct shader_rec_reloc *relocs;
4629 +       struct drm_gem_cma_object *bo[ARRAY_SIZE(gl_relocs) + 8];
4630 +       uint32_t nr_attributes = 0, nr_fixed_relocs, nr_relocs, packet_size;
4631 +       int i;
4632 +       struct vc4_validated_shader_info *validated_shader;
4633 +
4634 +       if (state->packet == VC4_PACKET_NV_SHADER_STATE) {
4635 +               relocs = nv_relocs;
4636 +               nr_fixed_relocs = ARRAY_SIZE(nv_relocs);
4637 +
4638 +               packet_size = 16;
4639 +       } else {
4640 +               relocs = gl_relocs;
4641 +               nr_fixed_relocs = ARRAY_SIZE(gl_relocs);
4642 +
4643 +               nr_attributes = state->addr & 0x7;
4644 +               if (nr_attributes == 0)
4645 +                       nr_attributes = 8;
4646 +               packet_size = gl_shader_rec_size(state->addr);
4647 +       }
4648 +       nr_relocs = nr_fixed_relocs + nr_attributes;
4649 +
4650 +       if (nr_relocs * 4 > exec->shader_rec_size) {
4651 +               DRM_ERROR("overflowed shader recs reading %d handles "
4652 +                         "from %d bytes left\n",
4653 +                         nr_relocs, exec->shader_rec_size);
4654 +               return -EINVAL;
4655 +       }
4656 +       src_handles = exec->shader_rec_u;
4657 +       exec->shader_rec_u += nr_relocs * 4;
4658 +       exec->shader_rec_size -= nr_relocs * 4;
4659 +
4660 +       if (packet_size > exec->shader_rec_size) {
4661 +               DRM_ERROR("overflowed shader recs copying %db packet "
4662 +                         "from %d bytes left\n",
4663 +                         packet_size, exec->shader_rec_size);
4664 +               return -EINVAL;
4665 +       }
4666 +       pkt_u = exec->shader_rec_u;
4667 +       pkt_v = exec->shader_rec_v;
4668 +       memcpy(pkt_v, pkt_u, packet_size);
4669 +       exec->shader_rec_u += packet_size;
4670 +       /* Shader recs have to be aligned to 16 bytes (due to the attribute
4671 +        * flags being in the low bytes), so round the next validated shader
4672 +        * rec address up.  This should be safe, since we've got so many
4673 +        * relocations in a shader rec packet.
4674 +        */
4675 +       BUG_ON(roundup(packet_size, 16) - packet_size > nr_relocs * 4);
4676 +       exec->shader_rec_v += roundup(packet_size, 16);
4677 +       exec->shader_rec_size -= packet_size;
4678 +
4679 +       for (i = 0; i < nr_relocs; i++) {
4680 +               enum vc4_bo_mode mode;
4681 +
4682 +               if (i < nr_fixed_relocs && relocs[i].type == RELOC_CODE)
4683 +                       mode = VC4_MODE_SHADER;
4684 +               else
4685 +                       mode = VC4_MODE_RENDER;
4686 +
4687 +               if (!vc4_use_bo(exec, src_handles[i], mode, &bo[i])) {
4688 +                       return false;
4689 +               }
4690 +       }
4691 +
4692 +       for (i = 0; i < nr_fixed_relocs; i++) {
4693 +               uint32_t o = relocs[i].offset;
4694 +               uint32_t src_offset = *(uint32_t *)(pkt_u + o);
4695 +               uint32_t *texture_handles_u;
4696 +               void *uniform_data_u;
4697 +               uint32_t tex;
4698 +
4699 +               *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
4700 +
4701 +               switch (relocs[i].type) {
4702 +               case RELOC_CODE:
4703 +                       if (src_offset != 0) {
4704 +                               DRM_ERROR("Shaders must be at offset 0 of "
4705 +                                         "the BO.\n");
4706 +                               goto fail;
4707 +                       }
4708 +
4709 +                       validated_shader = to_vc4_bo(&bo[i]->base)->validated_shader;
4710 +                       if (!validated_shader)
4711 +                               goto fail;
4712 +
4713 +                       if (validated_shader->uniforms_src_size >
4714 +                           exec->uniforms_size) {
4715 +                               DRM_ERROR("Uniforms src buffer overflow\n");
4716 +                               goto fail;
4717 +                       }
4718 +
4719 +                       texture_handles_u = exec->uniforms_u;
4720 +                       uniform_data_u = (texture_handles_u +
4721 +                                         validated_shader->num_texture_samples);
4722 +
4723 +                       memcpy(exec->uniforms_v, uniform_data_u,
4724 +                              validated_shader->uniforms_size);
4725 +
4726 +                       for (tex = 0;
4727 +                            tex < validated_shader->num_texture_samples;
4728 +                            tex++) {
4729 +                               if (!reloc_tex(exec,
4730 +                                              uniform_data_u,
4731 +                                              &validated_shader->texture_samples[tex],
4732 +                                              texture_handles_u[tex])) {
4733 +                                       goto fail;
4734 +                               }
4735 +                       }
4736 +
4737 +                       *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
4738 +
4739 +                       exec->uniforms_u += validated_shader->uniforms_src_size;
4740 +                       exec->uniforms_v += validated_shader->uniforms_size;
4741 +                       exec->uniforms_p += validated_shader->uniforms_size;
4742 +
4743 +                       break;
4744 +
4745 +               case RELOC_VBO:
4746 +                       break;
4747 +               }
4748 +       }
4749 +
4750 +       for (i = 0; i < nr_attributes; i++) {
4751 +               struct drm_gem_cma_object *vbo = bo[nr_fixed_relocs + i];
4752 +               uint32_t o = 36 + i * 8;
4753 +               uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
4754 +               uint32_t attr_size = *(uint8_t *)(pkt_u + o + 4) + 1;
4755 +               uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
4756 +               uint32_t max_index;
4757 +
4758 +               if (state->addr & 0x8)
4759 +                       stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
4760 +
4761 +               if (vbo->base.size < offset ||
4762 +                   vbo->base.size - offset < attr_size) {
4763 +                       DRM_ERROR("BO offset overflow (%d + %d > %d)\n",
4764 +                                 offset, attr_size, vbo->base.size);
4765 +                       return -EINVAL;
4766 +               }
4767 +
4768 +               if (stride != 0) {
4769 +                       max_index = ((vbo->base.size - offset - attr_size) /
4770 +                                    stride);
4771 +                       if (state->max_index > max_index) {
4772 +                               DRM_ERROR("primitives use index %d out of supplied %d\n",
4773 +                                         state->max_index, max_index);
4774 +                               return -EINVAL;
4775 +                       }
4776 +               }
4777 +
4778 +               *(uint32_t *)(pkt_v + o) = vbo->paddr + offset;
4779 +       }
4780 +
4781 +       return 0;
4782 +
4783 +fail:
4784 +       return -EINVAL;
4785 +}
4786 +
4787 +int
4788 +vc4_validate_shader_recs(struct drm_device *dev,
4789 +                        struct vc4_exec_info *exec)
4790 +{
4791 +       uint32_t i;
4792 +       int ret = 0;
4793 +
4794 +       for (i = 0; i < exec->shader_state_count; i++) {
4795 +               ret = validate_shader_rec(dev, exec, &exec->shader_state[i]);
4796 +               if (ret)
4797 +                       return ret;
4798 +       }
4799 +
4800 +       return ret;
4801 +}
4802 --- /dev/null
4803 +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
4804 @@ -0,0 +1,521 @@
4805 +/*
4806 + * Copyright Â© 2014 Broadcom
4807 + *
4808 + * Permission is hereby granted, free of charge, to any person obtaining a
4809 + * copy of this software and associated documentation files (the "Software"),
4810 + * to deal in the Software without restriction, including without limitation
4811 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
4812 + * and/or sell copies of the Software, and to permit persons to whom the
4813 + * Software is furnished to do so, subject to the following conditions:
4814 + *
4815 + * The above copyright notice and this permission notice (including the next
4816 + * paragraph) shall be included in all copies or substantial portions of the
4817 + * Software.
4818 + *
4819 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4820 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4821 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
4822 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4823 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4824 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
4825 + * IN THE SOFTWARE.
4826 + */
4827 +
4828 +/**
4829 + * DOC: Shader validator for VC4.
4830 + *
4831 + * The VC4 has no IOMMU between it and system memory.  So, a user with access
4832 + * to execute shaders could escalate privilege by overwriting system memory
4833 + * (using the VPM write address register in the general-purpose DMA mode) or
4834 + * reading system memory it shouldn't (reading it as a texture, or uniform
4835 + * data, or vertex data).
4836 + *
4837 + * This walks over a shader starting from some offset within a BO, ensuring
4838 + * that its accesses are appropriately bounded, and recording how many texture
4839 + * accesses are made and where so that we can do relocations for them in the
4840 + * uniform stream.
4841 + *
4842 + * The kernel API has shaders stored in user-mapped BOs.  The BOs will be
4843 + * forcibly unmapped from the process before validation, and any cache of
4844 + * validated state will be flushed if the mapping is faulted back in.
4845 + *
4846 + * Storing the shaders in BOs means that the validation process will be slow
4847 + * due to uncached reads, but since shaders are long-lived and shader BOs are
4848 + * never actually modified, this shouldn't be a problem.
4849 + */
4850 +
4851 +#include "vc4_drv.h"
4852 +#include "vc4_qpu_defines.h"
4853 +
4854 +struct vc4_shader_validation_state {
4855 +       struct vc4_texture_sample_info tmu_setup[2];
4856 +       int tmu_write_count[2];
4857 +
4858 +       /* For registers that were last written to by a MIN instruction with
4859 +        * one argument being a uniform, the address of the uniform.
4860 +        * Otherwise, ~0.
4861 +        *
4862 +        * This is used for the validation of direct address memory reads.
4863 +        */
4864 +       uint32_t live_min_clamp_offsets[32 + 32 + 4];
4865 +       bool live_max_clamp_regs[32 + 32 + 4];
4866 +};
4867 +
4868 +static uint32_t
4869 +waddr_to_live_reg_index(uint32_t waddr, bool is_b)
4870 +{
4871 +       if (waddr < 32) {
4872 +               if (is_b)
4873 +                       return 32 + waddr;
4874 +               else
4875 +                       return waddr;
4876 +       } else if (waddr <= QPU_W_ACC3) {
4877 +
4878 +               return 64 + waddr - QPU_W_ACC0;
4879 +       } else {
4880 +               return ~0;
4881 +       }
4882 +}
4883 +
4884 +static uint32_t
4885 +raddr_add_a_to_live_reg_index(uint64_t inst)
4886 +{
4887 +       uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
4888 +       uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
4889 +       uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
4890 +       uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
4891 +
4892 +       if (add_a == QPU_MUX_A) {
4893 +               return raddr_a;
4894 +       } else if (add_a == QPU_MUX_B && sig != QPU_SIG_SMALL_IMM) {
4895 +               return 32 + raddr_b;
4896 +       } else if (add_a <= QPU_MUX_R3) {
4897 +               return 64 + add_a;
4898 +       } else {
4899 +               return ~0;
4900 +       }
4901 +}
4902 +
4903 +static bool
4904 +is_tmu_submit(uint32_t waddr)
4905 +{
4906 +       return (waddr == QPU_W_TMU0_S ||
4907 +               waddr == QPU_W_TMU1_S);
4908 +}
4909 +
4910 +static bool
4911 +is_tmu_write(uint32_t waddr)
4912 +{
4913 +       return (waddr >= QPU_W_TMU0_S &&
4914 +               waddr <= QPU_W_TMU1_B);
4915 +}
4916 +
4917 +static bool
4918 +record_validated_texture_sample(struct vc4_validated_shader_info *validated_shader,
4919 +                               struct vc4_shader_validation_state *validation_state,
4920 +                               int tmu)
4921 +{
4922 +       uint32_t s = validated_shader->num_texture_samples;
4923 +       int i;
4924 +       struct vc4_texture_sample_info *temp_samples;
4925 +
4926 +       temp_samples = krealloc(validated_shader->texture_samples,
4927 +                               (s + 1) * sizeof(*temp_samples),
4928 +                               GFP_KERNEL);
4929 +       if (!temp_samples)
4930 +               return false;
4931 +
4932 +       memcpy(&temp_samples[s],
4933 +              &validation_state->tmu_setup[tmu],
4934 +              sizeof(*temp_samples));
4935 +
4936 +       validated_shader->num_texture_samples = s + 1;
4937 +       validated_shader->texture_samples = temp_samples;
4938 +
4939 +       for (i = 0; i < 4; i++)
4940 +               validation_state->tmu_setup[tmu].p_offset[i] = ~0;
4941 +
4942 +       return true;
4943 +}
4944 +
4945 +static bool
4946 +check_tmu_write(uint64_t inst,
4947 +               struct vc4_validated_shader_info *validated_shader,
4948 +               struct vc4_shader_validation_state *validation_state,
4949 +               bool is_mul)
4950 +{
4951 +       uint32_t waddr = (is_mul ?
4952 +                         QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
4953 +                         QPU_GET_FIELD(inst, QPU_WADDR_ADD));
4954 +       uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
4955 +       uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
4956 +       int tmu = waddr > QPU_W_TMU0_B;
4957 +       bool submit = is_tmu_submit(waddr);
4958 +       bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0;
4959 +       uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
4960 +
4961 +       if (is_direct) {
4962 +               uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
4963 +               uint32_t clamp_reg, clamp_offset;
4964 +
4965 +               if (sig == QPU_SIG_SMALL_IMM) {
4966 +                       DRM_ERROR("direct TMU read used small immediate\n");
4967 +                       return false;
4968 +               }
4969 +
4970 +               /* Make sure that this texture load is an add of the base
4971 +                * address of the UBO to a clamped offset within the UBO.
4972 +                */
4973 +               if (is_mul ||
4974 +                   QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
4975 +                       DRM_ERROR("direct TMU load wasn't an add\n");
4976 +                       return false;
4977 +               }
4978 +
4979 +               /* We assert that the the clamped address is the first
4980 +                * argument, and the UBO base address is the second argument.
4981 +                * This is arbitrary, but simpler than supporting flipping the
4982 +                * two either way.
4983 +                */
4984 +               clamp_reg = raddr_add_a_to_live_reg_index(inst);
4985 +               if (clamp_reg == ~0) {
4986 +                       DRM_ERROR("direct TMU load wasn't clamped\n");
4987 +                       return false;
4988 +               }
4989 +
4990 +               clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
4991 +               if (clamp_offset == ~0) {
4992 +                       DRM_ERROR("direct TMU load wasn't clamped\n");
4993 +                       return false;
4994 +               }
4995 +
4996 +               /* Store the clamp value's offset in p1 (see reloc_tex() in
4997 +                * vc4_validate.c).
4998 +                */
4999 +               validation_state->tmu_setup[tmu].p_offset[1] =
5000 +                       clamp_offset;
5001 +
5002 +               if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
5003 +                   !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
5004 +                       DRM_ERROR("direct TMU load didn't add to a uniform\n");
5005 +                       return false;
5006 +               }
5007 +
5008 +               validation_state->tmu_setup[tmu].is_direct = true;
5009 +       } else {
5010 +               if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
5011 +                                             raddr_b == QPU_R_UNIF)) {
5012 +                       DRM_ERROR("uniform read in the same instruction as "
5013 +                                 "texture setup.\n");
5014 +                       return false;
5015 +               }
5016 +       }
5017 +
5018 +       if (validation_state->tmu_write_count[tmu] >= 4) {
5019 +               DRM_ERROR("TMU%d got too many parameters before dispatch\n",
5020 +                         tmu);
5021 +               return false;
5022 +       }
5023 +       validation_state->tmu_setup[tmu].p_offset[validation_state->tmu_write_count[tmu]] =
5024 +               validated_shader->uniforms_size;
5025 +       validation_state->tmu_write_count[tmu]++;
5026 +       /* Since direct uses a RADDR uniform reference, it will get counted in
5027 +        * check_instruction_reads()
5028 +        */
5029 +       if (!is_direct)
5030 +               validated_shader->uniforms_size += 4;
5031 +
5032 +       if (submit) {
5033 +               if (!record_validated_texture_sample(validated_shader,
5034 +                                                    validation_state, tmu)) {
5035 +                       return false;
5036 +               }
5037 +
5038 +               validation_state->tmu_write_count[tmu] = 0;
5039 +       }
5040 +
5041 +       return true;
5042 +}
5043 +
5044 +static bool
5045 +check_register_write(uint64_t inst,
5046 +                    struct vc4_validated_shader_info *validated_shader,
5047 +                    struct vc4_shader_validation_state *validation_state,
5048 +                    bool is_mul)
5049 +{
5050 +       uint32_t waddr = (is_mul ?
5051 +                         QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
5052 +                         QPU_GET_FIELD(inst, QPU_WADDR_ADD));
5053 +
5054 +       switch (waddr) {
5055 +       case QPU_W_UNIFORMS_ADDRESS:
5056 +               /* XXX: We'll probably need to support this for reladdr, but
5057 +                * it's definitely a security-related one.
5058 +                */
5059 +               DRM_ERROR("uniforms address load unsupported\n");
5060 +               return false;
5061 +
5062 +       case QPU_W_TLB_COLOR_MS:
5063 +       case QPU_W_TLB_COLOR_ALL:
5064 +       case QPU_W_TLB_Z:
5065 +               /* These only interact with the tile buffer, not main memory,
5066 +                * so they're safe.
5067 +                */
5068 +               return true;
5069 +
5070 +       case QPU_W_TMU0_S:
5071 +       case QPU_W_TMU0_T:
5072 +       case QPU_W_TMU0_R:
5073 +       case QPU_W_TMU0_B:
5074 +       case QPU_W_TMU1_S:
5075 +       case QPU_W_TMU1_T:
5076 +       case QPU_W_TMU1_R:
5077 +       case QPU_W_TMU1_B:
5078 +               return check_tmu_write(inst, validated_shader, validation_state,
5079 +                                      is_mul);
5080 +
5081 +       case QPU_W_HOST_INT:
5082 +       case QPU_W_TMU_NOSWAP:
5083 +       case QPU_W_TLB_ALPHA_MASK:
5084 +       case QPU_W_MUTEX_RELEASE:
5085 +               /* XXX: I haven't thought about these, so don't support them
5086 +                * for now.
5087 +                */
5088 +               DRM_ERROR("Unsupported waddr %d\n", waddr);
5089 +               return false;
5090 +
5091 +       case QPU_W_VPM_ADDR:
5092 +               DRM_ERROR("General VPM DMA unsupported\n");
5093 +               return false;
5094 +
5095 +       case QPU_W_VPM:
5096 +       case QPU_W_VPMVCD_SETUP:
5097 +               /* We allow VPM setup in general, even including VPM DMA
5098 +                * configuration setup, because the (unsafe) DMA can only be
5099 +                * triggered by QPU_W_VPM_ADDR writes.
5100 +                */
5101 +               return true;
5102 +
5103 +       case QPU_W_TLB_STENCIL_SETUP:
5104 +                return true;
5105 +       }
5106 +
5107 +       return true;
5108 +}
5109 +
5110 +static void
5111 +track_live_clamps(uint64_t inst,
5112 +                 struct vc4_validated_shader_info *validated_shader,
5113 +                 struct vc4_shader_validation_state *validation_state)
5114 +{
5115 +       uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
5116 +       uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
5117 +       uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
5118 +       uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
5119 +       uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
5120 +       uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
5121 +       uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
5122 +       uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
5123 +       uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
5124 +       bool ws = inst & QPU_WS;
5125 +       uint32_t lri_add_a, lri_add, lri_mul;
5126 +       bool add_a_is_min_0;
5127 +
5128 +       /* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0),
5129 +        * before we clear previous live state.
5130 +        */
5131 +       lri_add_a = raddr_add_a_to_live_reg_index(inst);
5132 +       add_a_is_min_0 = (lri_add_a != ~0 &&
5133 +                         validation_state->live_max_clamp_regs[lri_add_a]);
5134 +
5135 +       /* Clear live state for registers written by our instruction. */
5136 +       lri_add = waddr_to_live_reg_index(waddr_add, ws);
5137 +       lri_mul = waddr_to_live_reg_index(waddr_mul, !ws);
5138 +       if (lri_mul != ~0) {
5139 +               validation_state->live_max_clamp_regs[lri_mul] = false;
5140 +               validation_state->live_min_clamp_offsets[lri_mul] = ~0;
5141 +       }
5142 +       if (lri_add != ~0) {
5143 +               validation_state->live_max_clamp_regs[lri_add] = false;
5144 +               validation_state->live_min_clamp_offsets[lri_add] = ~0;
5145 +       } else {
5146 +               /* Nothing further to do for live tracking, since only ADDs
5147 +                * generate new live clamp registers.
5148 +                */
5149 +               return;
5150 +       }
5151 +
5152 +       /* Now, handle remaining live clamp tracking for the ADD operation. */
5153 +
5154 +       if (cond_add != QPU_COND_ALWAYS)
5155 +               return;
5156 +
5157 +       if (op_add == QPU_A_MAX) {
5158 +               /* Track live clamps of a value to a minimum of 0 (in either
5159 +                * arg).
5160 +                */
5161 +               if (sig != QPU_SIG_SMALL_IMM || raddr_b != 0 ||
5162 +                   (add_a != QPU_MUX_B && add_b != QPU_MUX_B)) {
5163 +                       return;
5164 +               }
5165 +
5166 +               validation_state->live_max_clamp_regs[lri_add] = true;
5167 +       } if (op_add == QPU_A_MIN) {
5168 +               /* Track live clamps of a value clamped to a minimum of 0 and
5169 +                * a maximum of some uniform's offset.
5170 +                */
5171 +               if (!add_a_is_min_0)
5172 +                       return;
5173 +
5174 +               if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
5175 +                   !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF &&
5176 +                     sig != QPU_SIG_SMALL_IMM)) {
5177 +                       return;
5178 +               }
5179 +
5180 +               validation_state->live_min_clamp_offsets[lri_add] =
5181 +                       validated_shader->uniforms_size;
5182 +       }
5183 +}
5184 +
5185 +static bool
5186 +check_instruction_writes(uint64_t inst,
5187 +                        struct vc4_validated_shader_info *validated_shader,
5188 +                        struct vc4_shader_validation_state *validation_state)
5189 +{
5190 +       uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
5191 +       uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
5192 +       bool ok;
5193 +
5194 +       if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
5195 +               DRM_ERROR("ADD and MUL both set up textures\n");
5196 +               return false;
5197 +       }
5198 +
5199 +       ok = (check_register_write(inst, validated_shader, validation_state, false) &&
5200 +             check_register_write(inst, validated_shader, validation_state, true));
5201 +
5202 +       track_live_clamps(inst, validated_shader, validation_state);
5203 +
5204 +       return ok;
5205 +}
5206 +
5207 +static bool
5208 +check_instruction_reads(uint64_t inst,
5209 +                       struct vc4_validated_shader_info *validated_shader)
5210 +{
5211 +       uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
5212 +       uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
5213 +       uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
5214 +
5215 +       if (raddr_a == QPU_R_UNIF ||
5216 +           (raddr_b == QPU_R_UNIF && sig != QPU_SIG_SMALL_IMM)) {
5217 +               /* This can't overflow the uint32_t, because we're reading 8
5218 +                * bytes of instruction to increment by 4 here, so we'd
5219 +                * already be OOM.
5220 +                */
5221 +               validated_shader->uniforms_size += 4;
5222 +       }
5223 +
5224 +       return true;
5225 +}
5226 +
5227 +struct vc4_validated_shader_info *
5228 +vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
5229 +{
5230 +       bool found_shader_end = false;
5231 +       int shader_end_ip = 0;
5232 +       uint32_t ip, max_ip;
5233 +       uint64_t *shader;
5234 +       struct vc4_validated_shader_info *validated_shader;
5235 +       struct vc4_shader_validation_state validation_state;
5236 +       int i;
5237 +
5238 +       memset(&validation_state, 0, sizeof(validation_state));
5239 +
5240 +       for (i = 0; i < 8; i++)
5241 +               validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
5242 +       for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
5243 +               validation_state.live_min_clamp_offsets[i] = ~0;
5244 +
5245 +       shader = shader_obj->vaddr;
5246 +       max_ip = shader_obj->base.size / sizeof(uint64_t);
5247 +
5248 +       validated_shader = kcalloc(sizeof(*validated_shader), 1, GFP_KERNEL);
5249 +       if (!validated_shader)
5250 +               return NULL;
5251 +
5252 +       for (ip = 0; ip < max_ip; ip++) {
5253 +               uint64_t inst = shader[ip];
5254 +               uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
5255 +
5256 +               switch (sig) {
5257 +               case QPU_SIG_NONE:
5258 +               case QPU_SIG_WAIT_FOR_SCOREBOARD:
5259 +               case QPU_SIG_SCOREBOARD_UNLOCK:
5260 +               case QPU_SIG_COLOR_LOAD:
5261 +               case QPU_SIG_LOAD_TMU0:
5262 +               case QPU_SIG_LOAD_TMU1:
5263 +               case QPU_SIG_PROG_END:
5264 +               case QPU_SIG_SMALL_IMM:
5265 +                       if (!check_instruction_writes(inst, validated_shader,
5266 +                                                     &validation_state)) {
5267 +                               DRM_ERROR("Bad write at ip %d\n", ip);
5268 +                               goto fail;
5269 +                       }
5270 +
5271 +                       if (!check_instruction_reads(inst, validated_shader))
5272 +                               goto fail;
5273 +
5274 +                       if (sig == QPU_SIG_PROG_END) {
5275 +                               found_shader_end = true;
5276 +                               shader_end_ip = ip;
5277 +                       }
5278 +
5279 +                       break;
5280 +
5281 +               case QPU_SIG_LOAD_IMM:
5282 +                       if (!check_instruction_writes(inst, validated_shader,
5283 +                                                     &validation_state)) {
5284 +                               DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
5285 +                               goto fail;
5286 +                       }
5287 +                       break;
5288 +
5289 +               default:
5290 +                       DRM_ERROR("Unsupported QPU signal %d at "
5291 +                                 "instruction %d\n", sig, ip);
5292 +                       goto fail;
5293 +               }
5294 +
5295 +               /* There are two delay slots after program end is signaled
5296 +                * that are still executed, then we're finished.
5297 +                */
5298 +               if (found_shader_end && ip == shader_end_ip + 2)
5299 +                       break;
5300 +       }
5301 +
5302 +       if (ip == max_ip) {
5303 +               DRM_ERROR("shader failed to terminate before "
5304 +                         "shader BO end at %d\n",
5305 +                         shader_obj->base.size);
5306 +               goto fail;
5307 +       }
5308 +
5309 +       /* Again, no chance of integer overflow here because the worst case
5310 +        * scenario is 8 bytes of uniforms plus handles per 8-byte
5311 +        * instruction.
5312 +        */
5313 +       validated_shader->uniforms_src_size =
5314 +               (validated_shader->uniforms_size +
5315 +                4 * validated_shader->num_texture_samples);
5316 +
5317 +       return validated_shader;
5318 +
5319 +fail:
5320 +       if (validated_shader) {
5321 +               kfree(validated_shader->texture_samples);
5322 +               kfree(validated_shader);
5323 +       }
5324 +       return NULL;
5325 +}
5326 --- /dev/null
5327 +++ b/include/uapi/drm/vc4_drm.h
5328 @@ -0,0 +1,229 @@
5329 +/*
5330 + * Copyright Â© 2014-2015 Broadcom
5331 + *
5332 + * Permission is hereby granted, free of charge, to any person obtaining a
5333 + * copy of this software and associated documentation files (the "Software"),
5334 + * to deal in the Software without restriction, including without limitation
5335 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
5336 + * and/or sell copies of the Software, and to permit persons to whom the
5337 + * Software is furnished to do so, subject to the following conditions:
5338 + *
5339 + * The above copyright notice and this permission notice (including the next
5340 + * paragraph) shall be included in all copies or substantial portions of the
5341 + * Software.
5342 + *
5343 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
5344 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
5345 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
5346 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
5347 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
5348 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
5349 + * IN THE SOFTWARE.
5350 + */
5351 +
5352 +#ifndef _UAPI_VC4_DRM_H_
5353 +#define _UAPI_VC4_DRM_H_
5354 +
5355 +#include <drm/drm.h>
5356 +
5357 +#define DRM_VC4_SUBMIT_CL                         0x00
5358 +#define DRM_VC4_WAIT_SEQNO                        0x01
5359 +#define DRM_VC4_WAIT_BO                           0x02
5360 +#define DRM_VC4_CREATE_BO                         0x03
5361 +#define DRM_VC4_MMAP_BO                           0x04
5362 +#define DRM_VC4_CREATE_SHADER_BO                  0x05
5363 +
5364 +#define DRM_IOCTL_VC4_SUBMIT_CL           DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
5365 +#define DRM_IOCTL_VC4_WAIT_SEQNO          DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
5366 +#define DRM_IOCTL_VC4_WAIT_BO             DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
5367 +#define DRM_IOCTL_VC4_CREATE_BO           DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
5368 +#define DRM_IOCTL_VC4_MMAP_BO             DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
5369 +#define DRM_IOCTL_VC4_CREATE_SHADER_BO    DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
5370 +
5371 +struct drm_vc4_submit_rcl_surface {
5372 +       uint32_t hindex; /* Handle index, or ~0 if not present. */
5373 +       uint32_t offset; /* Offset to start of buffer. */
5374 +       /*
5375 +         * Bits for either render config (color_ms_write) or load/store packet.
5376 +        */
5377 +       uint16_t bits;
5378 +       uint16_t pad;
5379 +};
5380 +
5381 +/**
5382 + * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
5383 + * engine.
5384 + *
5385 + * Drivers typically use GPU BOs to store batchbuffers / command lists and
5386 + * their associated state.  However, because the VC4 lacks an MMU, we have to
5387 + * do validation of memory accesses by the GPU commands.  If we were to store
5388 + * our commands in BOs, we'd need to do uncached readback from them to do the
5389 + * validation process, which is too expensive.  Instead, userspace accumulates
5390 + * commands and associated state in plain memory, then the kernel copies the
5391 + * data to its own address space, and then validates and stores it in a GPU
5392 + * BO.
5393 + */
5394 +struct drm_vc4_submit_cl {
5395 +       /* Pointer to the binner command list.
5396 +        *
5397 +        * This is the first set of commands executed, which runs the
5398 +        * coordinate shader to determine where primitives land on the screen,
5399 +        * then writes out the state updates and draw calls necessary per tile
5400 +        * to the tile allocation BO.
5401 +        */
5402 +       uint64_t bin_cl;
5403 +
5404 +       /* Pointer to the shader records.
5405 +        *
5406 +        * Shader records are the structures read by the hardware that contain
5407 +        * pointers to uniforms, shaders, and vertex attributes.  The
5408 +        * reference to the shader record has enough information to determine
5409 +        * how many pointers are necessary (fixed number for shaders/uniforms,
5410 +        * and an attribute count), so those BO indices into bo_handles are
5411 +        * just stored as uint32_ts before each shader record passed in.
5412 +        */
5413 +       uint64_t shader_rec;
5414 +
5415 +       /* Pointer to uniform data and texture handles for the textures
5416 +        * referenced by the shader.
5417 +        *
5418 +        * For each shader state record, there is a set of uniform data in the
5419 +        * order referenced by the record (FS, VS, then CS).  Each set of
5420 +        * uniform data has a uint32_t index into bo_handles per texture
5421 +        * sample operation, in the order the QPU_W_TMUn_S writes appear in
5422 +        * the program.  Following the texture BO handle indices is the actual
5423 +        * uniform data.
5424 +        *
5425 +        * The individual uniform state blocks don't have sizes passed in,
5426 +        * because the kernel has to determine the sizes anyway during shader
5427 +        * code validation.
5428 +        */
5429 +       uint64_t uniforms;
5430 +       uint64_t bo_handles;
5431 +
5432 +       /* Size in bytes of the binner command list. */
5433 +       uint32_t bin_cl_size;
5434 +       /* Size in bytes of the set of shader records. */
5435 +       uint32_t shader_rec_size;
5436 +       /* Number of shader records.
5437 +        *
5438 +        * This could just be computed from the contents of shader_records and
5439 +        * the address bits of references to them from the bin CL, but it
5440 +        * keeps the kernel from having to resize some allocations it makes.
5441 +        */
5442 +       uint32_t shader_rec_count;
5443 +       /* Size in bytes of the uniform state. */
5444 +       uint32_t uniforms_size;
5445 +
5446 +       /* Number of BO handles passed in (size is that times 4). */
5447 +       uint32_t bo_handle_count;
5448 +
5449 +       /* RCL setup: */
5450 +       uint16_t width;
5451 +       uint16_t height;
5452 +       uint8_t min_x_tile;
5453 +       uint8_t min_y_tile;
5454 +       uint8_t max_x_tile;
5455 +       uint8_t max_y_tile;
5456 +       struct drm_vc4_submit_rcl_surface color_read;
5457 +       struct drm_vc4_submit_rcl_surface color_ms_write;
5458 +       struct drm_vc4_submit_rcl_surface zs_read;
5459 +       struct drm_vc4_submit_rcl_surface zs_write;
5460 +       uint32_t clear_color[2];
5461 +       uint32_t clear_z;
5462 +       uint8_t clear_s;
5463 +
5464 +       uint32_t pad:24;
5465 +
5466 +#define VC4_SUBMIT_CL_USE_CLEAR_COLOR                  (1 << 0)
5467 +       uint32_t flags;
5468 +
5469 +       /* Returned value of the seqno of this render job (for the
5470 +        * wait ioctl).
5471 +        */
5472 +       uint64_t seqno;
5473 +};
5474 +
5475 +/**
5476 + * struct drm_vc4_wait_seqno - ioctl argument for waiting for
5477 + * DRM_VC4_SUBMIT_CL completion using its returned seqno.
5478 + *
5479 + * timeout_ns is the timeout in nanoseconds, where "0" means "don't
5480 + * block, just return the status."
5481 + */
5482 +struct drm_vc4_wait_seqno {
5483 +       uint64_t seqno;
5484 +       uint64_t timeout_ns;
5485 +};
5486 +
5487 +/**
5488 + * struct drm_vc4_wait_bo - ioctl argument for waiting for
5489 + * completion of the last DRM_VC4_SUBMIT_CL on a BO.
5490 + *
5491 + * This is useful for cases where multiple processes might be
5492 + * rendering to a BO and you want to wait for all rendering to be
5493 + * completed.
5494 + */
5495 +struct drm_vc4_wait_bo {
5496 +       uint32_t handle;
5497 +       uint32_t pad;
5498 +       uint64_t timeout_ns;
5499 +};
5500 +
5501 +/**
5502 + * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
5503 + *
5504 + * There are currently no values for the flags argument, but it may be
5505 + * used in a future extension.
5506 + */
5507 +struct drm_vc4_create_bo {
5508 +       uint32_t size;
5509 +       uint32_t flags;
5510 +       /** Returned GEM handle for the BO. */
5511 +       uint32_t handle;
5512 +       uint32_t pad;
5513 +};
5514 +
5515 +/**
5516 + * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
5517 + * shader BOs.
5518 + *
5519 + * Since allowing a shader to be overwritten while it's also being
5520 + * executed from would allow privlege escalation, shaders must be
5521 + * created using this ioctl, and they can't be mmapped later.
5522 + */
5523 +struct drm_vc4_create_shader_bo {
5524 +       /* Size of the data argument. */
5525 +       uint32_t size;
5526 +       /* Flags, currently must be 0. */
5527 +       uint32_t flags;
5528 +
5529 +       /* Pointer to the data. */
5530 +       uint64_t data;
5531 +
5532 +       /** Returned GEM handle for the BO. */
5533 +       uint32_t handle;
5534 +       /* Pad, must be 0. */
5535 +       uint32_t pad;
5536 +};
5537 +
5538 +/**
5539 + * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
5540 + *
5541 + * This doesn't actually perform an mmap.  Instead, it returns the
5542 + * offset you need to use in an mmap on the DRM device node.  This
5543 + * means that tools like valgrind end up knowing about the mapped
5544 + * memory.
5545 + *
5546 + * There are currently no values for the flags argument, but it may be
5547 + * used in a future extension.
5548 + */
5549 +struct drm_vc4_mmap_bo {
5550 +       /** Handle for the object being mapped. */
5551 +       uint32_t handle;
5552 +       uint32_t flags;
5553 +       /** offset into the drm node to use for subsequent mmap call. */
5554 +       uint64_t offset;
5555 +};
5556 +
5557 +#endif /* _UAPI_VC4_DRM_H_ */