Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / virtio / virtgpu_ioctl.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/virtgpu_drm.h>
30 #include <drm/ttm/ttm_execbuf_util.h>
31 #include <linux/sync_file.h>
32
33 #include "virtgpu_drv.h"
34
35 static void convert_to_hw_box(struct virtio_gpu_box *dst,
36                               const struct drm_virtgpu_3d_box *src)
37 {
38         dst->x = cpu_to_le32(src->x);
39         dst->y = cpu_to_le32(src->y);
40         dst->z = cpu_to_le32(src->z);
41         dst->w = cpu_to_le32(src->w);
42         dst->h = cpu_to_le32(src->h);
43         dst->d = cpu_to_le32(src->d);
44 }
45
46 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
47                                 struct drm_file *file_priv)
48 {
49         struct virtio_gpu_device *vgdev = dev->dev_private;
50         struct drm_virtgpu_map *virtio_gpu_map = data;
51
52         return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
53                                          virtio_gpu_map->handle,
54                                          &virtio_gpu_map->offset);
55 }
56
57 int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
58                                     struct list_head *head)
59 {
60         struct ttm_operation_ctx ctx = { false, false };
61         struct ttm_validate_buffer *buf;
62         struct ttm_buffer_object *bo;
63         struct virtio_gpu_object *qobj;
64         int ret;
65
66         ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
67         if (ret != 0)
68                 return ret;
69
70         list_for_each_entry(buf, head, head) {
71                 bo = buf->bo;
72                 qobj = container_of(bo, struct virtio_gpu_object, tbo);
73                 ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
74                 if (ret) {
75                         ttm_eu_backoff_reservation(ticket, head);
76                         return ret;
77                 }
78         }
79         return 0;
80 }
81
82 void virtio_gpu_unref_list(struct list_head *head)
83 {
84         struct ttm_validate_buffer *buf;
85         struct ttm_buffer_object *bo;
86         struct virtio_gpu_object *qobj;
87
88         list_for_each_entry(buf, head, head) {
89                 bo = buf->bo;
90                 qobj = container_of(bo, struct virtio_gpu_object, tbo);
91
92                 drm_gem_object_put_unlocked(&qobj->gem_base);
93         }
94 }
95
96 /*
97  * Usage of execbuffer:
98  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
99  * However, the command as passed from user space must *not* contain the initial
100  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
101  */
102 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
103                                  struct drm_file *drm_file)
104 {
105         struct drm_virtgpu_execbuffer *exbuf = data;
106         struct virtio_gpu_device *vgdev = dev->dev_private;
107         struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
108         struct drm_gem_object *gobj;
109         struct virtio_gpu_fence *out_fence;
110         struct virtio_gpu_object *qobj;
111         int ret;
112         uint32_t *bo_handles = NULL;
113         void __user *user_bo_handles = NULL;
114         struct list_head validate_list;
115         struct ttm_validate_buffer *buflist = NULL;
116         int i;
117         struct ww_acquire_ctx ticket;
118         struct sync_file *sync_file;
119         int in_fence_fd = exbuf->fence_fd;
120         int out_fence_fd = -1;
121         void *buf;
122
123         if (vgdev->has_virgl_3d == false)
124                 return -ENOSYS;
125
126         if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
127                 return -EINVAL;
128
129         exbuf->fence_fd = -1;
130
131         if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
132                 struct dma_fence *in_fence;
133
134                 in_fence = sync_file_get_fence(in_fence_fd);
135
136                 if (!in_fence)
137                         return -EINVAL;
138
139                 /*
140                  * Wait if the fence is from a foreign context, or if the fence
141                  * array contains any fence from a foreign context.
142                  */
143                 ret = 0;
144                 if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
145                         ret = dma_fence_wait(in_fence, true);
146
147                 dma_fence_put(in_fence);
148                 if (ret)
149                         return ret;
150         }
151
152         if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
153                 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
154                 if (out_fence_fd < 0)
155                         return out_fence_fd;
156         }
157
158         INIT_LIST_HEAD(&validate_list);
159         if (exbuf->num_bo_handles) {
160
161                 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
162                                            sizeof(uint32_t), GFP_KERNEL);
163                 buflist = kvmalloc_array(exbuf->num_bo_handles,
164                                            sizeof(struct ttm_validate_buffer),
165                                            GFP_KERNEL | __GFP_ZERO);
166                 if (!bo_handles || !buflist) {
167                         ret = -ENOMEM;
168                         goto out_unused_fd;
169                 }
170
171                 user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
172                 if (copy_from_user(bo_handles, user_bo_handles,
173                                    exbuf->num_bo_handles * sizeof(uint32_t))) {
174                         ret = -EFAULT;
175                         goto out_unused_fd;
176                 }
177
178                 for (i = 0; i < exbuf->num_bo_handles; i++) {
179                         gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
180                         if (!gobj) {
181                                 ret = -ENOENT;
182                                 goto out_unused_fd;
183                         }
184
185                         qobj = gem_to_virtio_gpu_obj(gobj);
186                         buflist[i].bo = &qobj->tbo;
187
188                         list_add(&buflist[i].head, &validate_list);
189                 }
190                 kvfree(bo_handles);
191                 bo_handles = NULL;
192         }
193
194         ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
195         if (ret)
196                 goto out_free;
197
198         buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
199         if (IS_ERR(buf)) {
200                 ret = PTR_ERR(buf);
201                 goto out_unresv;
202         }
203
204         out_fence = virtio_gpu_fence_alloc(vgdev);
205         if(!out_fence) {
206                 ret = -ENOMEM;
207                 goto out_memdup;
208         }
209
210         if (out_fence_fd >= 0) {
211                 sync_file = sync_file_create(&out_fence->f);
212                 if (!sync_file) {
213                         dma_fence_put(&out_fence->f);
214                         ret = -ENOMEM;
215                         goto out_memdup;
216                 }
217
218                 exbuf->fence_fd = out_fence_fd;
219                 fd_install(out_fence_fd, sync_file->file);
220         }
221
222         virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
223                               vfpriv->ctx_id, out_fence);
224
225         ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
226
227         /* fence the command bo */
228         virtio_gpu_unref_list(&validate_list);
229         kvfree(buflist);
230         return 0;
231
232 out_memdup:
233         kfree(buf);
234 out_unresv:
235         ttm_eu_backoff_reservation(&ticket, &validate_list);
236 out_free:
237         virtio_gpu_unref_list(&validate_list);
238 out_unused_fd:
239         kvfree(bo_handles);
240         kvfree(buflist);
241
242         if (out_fence_fd >= 0)
243                 put_unused_fd(out_fence_fd);
244
245         return ret;
246 }
247
248 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
249                                      struct drm_file *file_priv)
250 {
251         struct virtio_gpu_device *vgdev = dev->dev_private;
252         struct drm_virtgpu_getparam *param = data;
253         int value;
254
255         switch (param->param) {
256         case VIRTGPU_PARAM_3D_FEATURES:
257                 value = vgdev->has_virgl_3d == true ? 1 : 0;
258                 break;
259         case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
260                 value = 1;
261                 break;
262         default:
263                 return -EINVAL;
264         }
265         if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
266                 return -EFAULT;
267
268         return 0;
269 }
270
271 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
272                                             struct drm_file *file_priv)
273 {
274         struct virtio_gpu_device *vgdev = dev->dev_private;
275         struct drm_virtgpu_resource_create *rc = data;
276         struct virtio_gpu_fence *fence;
277         int ret;
278         struct virtio_gpu_object *qobj;
279         struct drm_gem_object *obj;
280         uint32_t handle = 0;
281         struct virtio_gpu_object_params params = { 0 };
282
283         if (vgdev->has_virgl_3d == false) {
284                 if (rc->depth > 1)
285                         return -EINVAL;
286                 if (rc->nr_samples > 1)
287                         return -EINVAL;
288                 if (rc->last_level > 1)
289                         return -EINVAL;
290                 if (rc->target != 2)
291                         return -EINVAL;
292                 if (rc->array_size > 1)
293                         return -EINVAL;
294         }
295
296         params.format = rc->format;
297         params.width = rc->width;
298         params.height = rc->height;
299         params.size = rc->size;
300         if (vgdev->has_virgl_3d) {
301                 params.virgl = true;
302                 params.target = rc->target;
303                 params.bind = rc->bind;
304                 params.depth = rc->depth;
305                 params.array_size = rc->array_size;
306                 params.last_level = rc->last_level;
307                 params.nr_samples = rc->nr_samples;
308                 params.flags = rc->flags;
309         }
310         /* allocate a single page size object */
311         if (params.size == 0)
312                 params.size = PAGE_SIZE;
313
314         fence = virtio_gpu_fence_alloc(vgdev);
315         if (!fence)
316                 return -ENOMEM;
317         qobj = virtio_gpu_alloc_object(dev, &params, fence);
318         dma_fence_put(&fence->f);
319         if (IS_ERR(qobj))
320                 return PTR_ERR(qobj);
321         obj = &qobj->gem_base;
322
323         ret = drm_gem_handle_create(file_priv, obj, &handle);
324         if (ret) {
325                 drm_gem_object_release(obj);
326                 return ret;
327         }
328         drm_gem_object_put_unlocked(obj);
329
330         rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
331         rc->bo_handle = handle;
332         return 0;
333 }
334
335 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
336                                           struct drm_file *file_priv)
337 {
338         struct drm_virtgpu_resource_info *ri = data;
339         struct drm_gem_object *gobj = NULL;
340         struct virtio_gpu_object *qobj = NULL;
341
342         gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
343         if (gobj == NULL)
344                 return -ENOENT;
345
346         qobj = gem_to_virtio_gpu_obj(gobj);
347
348         ri->size = qobj->gem_base.size;
349         ri->res_handle = qobj->hw_res_handle;
350         drm_gem_object_put_unlocked(gobj);
351         return 0;
352 }
353
354 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
355                                                void *data,
356                                                struct drm_file *file)
357 {
358         struct virtio_gpu_device *vgdev = dev->dev_private;
359         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
360         struct drm_virtgpu_3d_transfer_from_host *args = data;
361         struct ttm_operation_ctx ctx = { true, false };
362         struct drm_gem_object *gobj = NULL;
363         struct virtio_gpu_object *qobj = NULL;
364         struct virtio_gpu_fence *fence;
365         int ret;
366         u32 offset = args->offset;
367         struct virtio_gpu_box box;
368
369         if (vgdev->has_virgl_3d == false)
370                 return -ENOSYS;
371
372         gobj = drm_gem_object_lookup(file, args->bo_handle);
373         if (gobj == NULL)
374                 return -ENOENT;
375
376         qobj = gem_to_virtio_gpu_obj(gobj);
377
378         ret = virtio_gpu_object_reserve(qobj, false);
379         if (ret)
380                 goto out;
381
382         ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
383         if (unlikely(ret))
384                 goto out_unres;
385
386         convert_to_hw_box(&box, &args->box);
387
388         fence = virtio_gpu_fence_alloc(vgdev);
389         if (!fence) {
390                 ret = -ENOMEM;
391                 goto out_unres;
392         }
393         virtio_gpu_cmd_transfer_from_host_3d
394                 (vgdev, qobj->hw_res_handle,
395                  vfpriv->ctx_id, offset, args->level,
396                  &box, fence);
397         reservation_object_add_excl_fence(qobj->tbo.resv,
398                                           &fence->f);
399
400         dma_fence_put(&fence->f);
401 out_unres:
402         virtio_gpu_object_unreserve(qobj);
403 out:
404         drm_gem_object_put_unlocked(gobj);
405         return ret;
406 }
407
408 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
409                                              struct drm_file *file)
410 {
411         struct virtio_gpu_device *vgdev = dev->dev_private;
412         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
413         struct drm_virtgpu_3d_transfer_to_host *args = data;
414         struct ttm_operation_ctx ctx = { true, false };
415         struct drm_gem_object *gobj = NULL;
416         struct virtio_gpu_object *qobj = NULL;
417         struct virtio_gpu_fence *fence;
418         struct virtio_gpu_box box;
419         int ret;
420         u32 offset = args->offset;
421
422         gobj = drm_gem_object_lookup(file, args->bo_handle);
423         if (gobj == NULL)
424                 return -ENOENT;
425
426         qobj = gem_to_virtio_gpu_obj(gobj);
427
428         ret = virtio_gpu_object_reserve(qobj, false);
429         if (ret)
430                 goto out;
431
432         ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
433         if (unlikely(ret))
434                 goto out_unres;
435
436         convert_to_hw_box(&box, &args->box);
437         if (!vgdev->has_virgl_3d) {
438                 virtio_gpu_cmd_transfer_to_host_2d
439                         (vgdev, qobj, offset,
440                          box.w, box.h, box.x, box.y, NULL);
441         } else {
442                 fence = virtio_gpu_fence_alloc(vgdev);
443                 if (!fence) {
444                         ret = -ENOMEM;
445                         goto out_unres;
446                 }
447                 virtio_gpu_cmd_transfer_to_host_3d
448                         (vgdev, qobj,
449                          vfpriv ? vfpriv->ctx_id : 0, offset,
450                          args->level, &box, fence);
451                 reservation_object_add_excl_fence(qobj->tbo.resv,
452                                                   &fence->f);
453                 dma_fence_put(&fence->f);
454         }
455
456 out_unres:
457         virtio_gpu_object_unreserve(qobj);
458 out:
459         drm_gem_object_put_unlocked(gobj);
460         return ret;
461 }
462
463 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
464                             struct drm_file *file)
465 {
466         struct drm_virtgpu_3d_wait *args = data;
467         struct drm_gem_object *gobj = NULL;
468         struct virtio_gpu_object *qobj = NULL;
469         int ret;
470         bool nowait = false;
471
472         gobj = drm_gem_object_lookup(file, args->handle);
473         if (gobj == NULL)
474                 return -ENOENT;
475
476         qobj = gem_to_virtio_gpu_obj(gobj);
477
478         if (args->flags & VIRTGPU_WAIT_NOWAIT)
479                 nowait = true;
480         ret = virtio_gpu_object_wait(qobj, nowait);
481
482         drm_gem_object_put_unlocked(gobj);
483         return ret;
484 }
485
486 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
487                                 void *data, struct drm_file *file)
488 {
489         struct virtio_gpu_device *vgdev = dev->dev_private;
490         struct drm_virtgpu_get_caps *args = data;
491         unsigned size, host_caps_size;
492         int i;
493         int found_valid = -1;
494         int ret;
495         struct virtio_gpu_drv_cap_cache *cache_ent;
496         void *ptr;
497
498         if (vgdev->num_capsets == 0)
499                 return -ENOSYS;
500
501         /* don't allow userspace to pass 0 */
502         if (args->size == 0)
503                 return -EINVAL;
504
505         spin_lock(&vgdev->display_info_lock);
506         for (i = 0; i < vgdev->num_capsets; i++) {
507                 if (vgdev->capsets[i].id == args->cap_set_id) {
508                         if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
509                                 found_valid = i;
510                                 break;
511                         }
512                 }
513         }
514
515         if (found_valid == -1) {
516                 spin_unlock(&vgdev->display_info_lock);
517                 return -EINVAL;
518         }
519
520         host_caps_size = vgdev->capsets[found_valid].max_size;
521         /* only copy to user the minimum of the host caps size or the guest caps size */
522         size = min(args->size, host_caps_size);
523
524         list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
525                 if (cache_ent->id == args->cap_set_id &&
526                     cache_ent->version == args->cap_set_ver) {
527                         spin_unlock(&vgdev->display_info_lock);
528                         goto copy_exit;
529                 }
530         }
531         spin_unlock(&vgdev->display_info_lock);
532
533         /* not in cache - need to talk to hw */
534         virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
535                                   &cache_ent);
536
537 copy_exit:
538         ret = wait_event_timeout(vgdev->resp_wq,
539                                  atomic_read(&cache_ent->is_valid), 5 * HZ);
540         if (!ret)
541                 return -EBUSY;
542
543         /* is_valid check must proceed before copy of the cache entry. */
544         smp_rmb();
545
546         ptr = cache_ent->caps_cache;
547
548         if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
549                 return -EFAULT;
550
551         return 0;
552 }
553
554 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
555         DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
556                           DRM_AUTH | DRM_RENDER_ALLOW),
557
558         DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
559                           DRM_AUTH | DRM_RENDER_ALLOW),
560
561         DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
562                           DRM_AUTH | DRM_RENDER_ALLOW),
563
564         DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
565                           virtio_gpu_resource_create_ioctl,
566                           DRM_AUTH | DRM_RENDER_ALLOW),
567
568         DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
569                           DRM_AUTH | DRM_RENDER_ALLOW),
570
571         /* make transfer async to the main ring? - no sure, can we
572          * thread these in the underlying GL
573          */
574         DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
575                           virtio_gpu_transfer_from_host_ioctl,
576                           DRM_AUTH | DRM_RENDER_ALLOW),
577         DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
578                           virtio_gpu_transfer_to_host_ioctl,
579                           DRM_AUTH | DRM_RENDER_ALLOW),
580
581         DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
582                           DRM_AUTH | DRM_RENDER_ALLOW),
583
584         DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
585                           DRM_AUTH | DRM_RENDER_ALLOW),
586 };