Linux-libre 5.4.48-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / i915 / gvt / dmabuf.c
1 /*
2  * Copyright 2017 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
25  *
26  * Contributors:
27  *    Xiaoguang Chen
28  *    Tina Zhang <tina.zhang@intel.com>
29  */
30
31 #include <linux/dma-buf.h>
32 #include <linux/vfio.h>
33
34 #include "i915_drv.h"
35 #include "gvt.h"
36
37 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
38
39 static int vgpu_gem_get_pages(
40                 struct drm_i915_gem_object *obj)
41 {
42         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
43         struct sg_table *st;
44         struct scatterlist *sg;
45         int i, ret;
46         gen8_pte_t __iomem *gtt_entries;
47         struct intel_vgpu_fb_info *fb_info;
48         u32 page_num;
49
50         fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
51         if (WARN_ON(!fb_info))
52                 return -ENODEV;
53
54         st = kmalloc(sizeof(*st), GFP_KERNEL);
55         if (unlikely(!st))
56                 return -ENOMEM;
57
58         page_num = obj->base.size >> PAGE_SHIFT;
59         ret = sg_alloc_table(st, page_num, GFP_KERNEL);
60         if (ret) {
61                 kfree(st);
62                 return ret;
63         }
64         gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
65                 (fb_info->start >> PAGE_SHIFT);
66         for_each_sg(st->sgl, sg, page_num, i) {
67                 sg->offset = 0;
68                 sg->length = PAGE_SIZE;
69                 sg_dma_address(sg) =
70                         GEN8_DECODE_PTE(readq(&gtt_entries[i]));
71                 sg_dma_len(sg) = PAGE_SIZE;
72         }
73
74         __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
75
76         return 0;
77 }
78
79 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
80                 struct sg_table *pages)
81 {
82         sg_free_table(pages);
83         kfree(pages);
84 }
85
86 static void dmabuf_gem_object_free(struct kref *kref)
87 {
88         struct intel_vgpu_dmabuf_obj *obj =
89                 container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
90         struct intel_vgpu *vgpu = obj->vgpu;
91         struct list_head *pos;
92         struct intel_vgpu_dmabuf_obj *dmabuf_obj;
93
94         if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
95                 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
96                         dmabuf_obj = container_of(pos,
97                                         struct intel_vgpu_dmabuf_obj, list);
98                         if (dmabuf_obj == obj) {
99                                 list_del(pos);
100                                 intel_gvt_hypervisor_put_vfio_device(vgpu);
101                                 idr_remove(&vgpu->object_idr,
102                                            dmabuf_obj->dmabuf_id);
103                                 kfree(dmabuf_obj->info);
104                                 kfree(dmabuf_obj);
105                                 break;
106                         }
107                 }
108         } else {
109                 /* Free the orphan dmabuf_objs here */
110                 kfree(obj->info);
111                 kfree(obj);
112         }
113 }
114
115
116 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
117 {
118         kref_get(&obj->kref);
119 }
120
121 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
122 {
123         kref_put(&obj->kref, dmabuf_gem_object_free);
124 }
125
126 static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
127 {
128
129         struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
130         struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
131         struct intel_vgpu *vgpu = obj->vgpu;
132
133         if (vgpu) {
134                 mutex_lock(&vgpu->dmabuf_lock);
135                 gem_obj->base.dma_buf = NULL;
136                 dmabuf_obj_put(obj);
137                 mutex_unlock(&vgpu->dmabuf_lock);
138         } else {
139                 /* vgpu is NULL, as it has been removed already */
140                 gem_obj->base.dma_buf = NULL;
141                 dmabuf_obj_put(obj);
142         }
143 }
144
145 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
146         .flags = I915_GEM_OBJECT_IS_PROXY,
147         .get_pages = vgpu_gem_get_pages,
148         .put_pages = vgpu_gem_put_pages,
149         .release = vgpu_gem_release,
150 };
151
152 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
153                 struct intel_vgpu_fb_info *info)
154 {
155         struct drm_i915_private *dev_priv = to_i915(dev);
156         struct drm_i915_gem_object *obj;
157
158         obj = i915_gem_object_alloc();
159         if (obj == NULL)
160                 return NULL;
161
162         drm_gem_private_object_init(dev, &obj->base,
163                 roundup(info->size, PAGE_SIZE));
164         i915_gem_object_init(obj, &intel_vgpu_gem_ops);
165
166         obj->read_domains = I915_GEM_DOMAIN_GTT;
167         obj->write_domain = 0;
168         if (INTEL_GEN(dev_priv) >= 9) {
169                 unsigned int tiling_mode = 0;
170                 unsigned int stride = 0;
171
172                 switch (info->drm_format_mod) {
173                 case DRM_FORMAT_MOD_LINEAR:
174                         tiling_mode = I915_TILING_NONE;
175                         break;
176                 case I915_FORMAT_MOD_X_TILED:
177                         tiling_mode = I915_TILING_X;
178                         stride = info->stride;
179                         break;
180                 case I915_FORMAT_MOD_Y_TILED:
181                 case I915_FORMAT_MOD_Yf_TILED:
182                         tiling_mode = I915_TILING_Y;
183                         stride = info->stride;
184                         break;
185                 default:
186                         gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
187                                      info->drm_format_mod);
188                 }
189                 obj->tiling_and_stride = tiling_mode | stride;
190         } else {
191                 obj->tiling_and_stride = info->drm_format_mod ?
192                                         I915_TILING_X : 0;
193         }
194
195         return obj;
196 }
197
198 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
199 {
200         if (c && c->x_hot <= c->width && c->y_hot <= c->height)
201                 return true;
202         else
203                 return false;
204 }
205
206 static int vgpu_get_plane_info(struct drm_device *dev,
207                 struct intel_vgpu *vgpu,
208                 struct intel_vgpu_fb_info *info,
209                 int plane_id)
210 {
211         struct intel_vgpu_primary_plane_format p;
212         struct intel_vgpu_cursor_plane_format c;
213         int ret, tile_height = 1;
214
215         memset(info, 0, sizeof(*info));
216
217         if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
218                 ret = intel_vgpu_decode_primary_plane(vgpu, &p);
219                 if (ret)
220                         return ret;
221                 info->start = p.base;
222                 info->start_gpa = p.base_gpa;
223                 info->width = p.width;
224                 info->height = p.height;
225                 info->stride = p.stride;
226                 info->drm_format = p.drm_format;
227
228                 switch (p.tiled) {
229                 case PLANE_CTL_TILED_LINEAR:
230                         info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
231                         break;
232                 case PLANE_CTL_TILED_X:
233                         info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
234                         tile_height = 8;
235                         break;
236                 case PLANE_CTL_TILED_Y:
237                         info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
238                         tile_height = 32;
239                         break;
240                 case PLANE_CTL_TILED_YF:
241                         info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
242                         tile_height = 32;
243                         break;
244                 default:
245                         gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
246                 }
247         } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
248                 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
249                 if (ret)
250                         return ret;
251                 info->start = c.base;
252                 info->start_gpa = c.base_gpa;
253                 info->width = c.width;
254                 info->height = c.height;
255                 info->stride = c.width * (c.bpp / 8);
256                 info->drm_format = c.drm_format;
257                 info->drm_format_mod = 0;
258                 info->x_pos = c.x_pos;
259                 info->y_pos = c.y_pos;
260
261                 if (validate_hotspot(&c)) {
262                         info->x_hot = c.x_hot;
263                         info->y_hot = c.y_hot;
264                 } else {
265                         info->x_hot = UINT_MAX;
266                         info->y_hot = UINT_MAX;
267                 }
268         } else {
269                 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
270                 return -EINVAL;
271         }
272
273         info->size = info->stride * roundup(info->height, tile_height);
274         if (info->size == 0) {
275                 gvt_vgpu_err("fb size is zero\n");
276                 return -EINVAL;
277         }
278
279         if (info->start & (PAGE_SIZE - 1)) {
280                 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
281                 return -EFAULT;
282         }
283
284         if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
285                 gvt_vgpu_err("invalid gma addr\n");
286                 return -EFAULT;
287         }
288
289         return 0;
290 }
291
292 static struct intel_vgpu_dmabuf_obj *
293 pick_dmabuf_by_info(struct intel_vgpu *vgpu,
294                     struct intel_vgpu_fb_info *latest_info)
295 {
296         struct list_head *pos;
297         struct intel_vgpu_fb_info *fb_info;
298         struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
299         struct intel_vgpu_dmabuf_obj *ret = NULL;
300
301         list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
302                 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
303                                                 list);
304                 if ((dmabuf_obj == NULL) ||
305                     (dmabuf_obj->info == NULL))
306                         continue;
307
308                 fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
309                 if ((fb_info->start == latest_info->start) &&
310                     (fb_info->start_gpa == latest_info->start_gpa) &&
311                     (fb_info->size == latest_info->size) &&
312                     (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
313                     (fb_info->drm_format == latest_info->drm_format) &&
314                     (fb_info->width == latest_info->width) &&
315                     (fb_info->height == latest_info->height)) {
316                         ret = dmabuf_obj;
317                         break;
318                 }
319         }
320
321         return ret;
322 }
323
324 static struct intel_vgpu_dmabuf_obj *
325 pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
326 {
327         struct list_head *pos;
328         struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
329         struct intel_vgpu_dmabuf_obj *ret = NULL;
330
331         list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
332                 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
333                                                 list);
334                 if (!dmabuf_obj)
335                         continue;
336
337                 if (dmabuf_obj->dmabuf_id == id) {
338                         ret = dmabuf_obj;
339                         break;
340                 }
341         }
342
343         return ret;
344 }
345
346 static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
347                       struct intel_vgpu_fb_info *fb_info)
348 {
349         gvt_dmabuf->drm_format = fb_info->drm_format;
350         gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
351         gvt_dmabuf->width = fb_info->width;
352         gvt_dmabuf->height = fb_info->height;
353         gvt_dmabuf->stride = fb_info->stride;
354         gvt_dmabuf->size = fb_info->size;
355         gvt_dmabuf->x_pos = fb_info->x_pos;
356         gvt_dmabuf->y_pos = fb_info->y_pos;
357         gvt_dmabuf->x_hot = fb_info->x_hot;
358         gvt_dmabuf->y_hot = fb_info->y_hot;
359 }
360
361 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
362 {
363         struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
364         struct vfio_device_gfx_plane_info *gfx_plane_info = args;
365         struct intel_vgpu_dmabuf_obj *dmabuf_obj;
366         struct intel_vgpu_fb_info fb_info;
367         int ret = 0;
368
369         if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
370                                        VFIO_GFX_PLANE_TYPE_PROBE))
371                 return ret;
372         else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
373                         (!gfx_plane_info->flags))
374                 return -EINVAL;
375
376         ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
377                                         gfx_plane_info->drm_plane_type);
378         if (ret != 0)
379                 goto out;
380
381         mutex_lock(&vgpu->dmabuf_lock);
382         /* If exists, pick up the exposed dmabuf_obj */
383         dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
384         if (dmabuf_obj) {
385                 update_fb_info(gfx_plane_info, &fb_info);
386                 gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
387
388                 /* This buffer may be released between query_plane ioctl and
389                  * get_dmabuf ioctl. Add the refcount to make sure it won't
390                  * be released between the two ioctls.
391                  */
392                 if (!dmabuf_obj->initref) {
393                         dmabuf_obj->initref = true;
394                         dmabuf_obj_get(dmabuf_obj);
395                 }
396                 ret = 0;
397                 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
398                             vgpu->id, kref_read(&dmabuf_obj->kref),
399                             gfx_plane_info->dmabuf_id);
400                 mutex_unlock(&vgpu->dmabuf_lock);
401                 goto out;
402         }
403
404         mutex_unlock(&vgpu->dmabuf_lock);
405
406         /* Need to allocate a new one*/
407         dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
408         if (unlikely(!dmabuf_obj)) {
409                 gvt_vgpu_err("alloc dmabuf_obj failed\n");
410                 ret = -ENOMEM;
411                 goto out;
412         }
413
414         dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
415                                    GFP_KERNEL);
416         if (unlikely(!dmabuf_obj->info)) {
417                 gvt_vgpu_err("allocate intel vgpu fb info failed\n");
418                 ret = -ENOMEM;
419                 goto out_free_dmabuf;
420         }
421         memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
422
423         ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
424
425         dmabuf_obj->vgpu = vgpu;
426
427         ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
428         if (ret < 0)
429                 goto out_free_info;
430         gfx_plane_info->dmabuf_id = ret;
431         dmabuf_obj->dmabuf_id = ret;
432
433         dmabuf_obj->initref = true;
434
435         kref_init(&dmabuf_obj->kref);
436
437         mutex_lock(&vgpu->dmabuf_lock);
438         if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
439                 gvt_vgpu_err("get vfio device failed\n");
440                 mutex_unlock(&vgpu->dmabuf_lock);
441                 goto out_free_info;
442         }
443         mutex_unlock(&vgpu->dmabuf_lock);
444
445         update_fb_info(gfx_plane_info, &fb_info);
446
447         INIT_LIST_HEAD(&dmabuf_obj->list);
448         mutex_lock(&vgpu->dmabuf_lock);
449         list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
450         mutex_unlock(&vgpu->dmabuf_lock);
451
452         gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
453                     __func__, kref_read(&dmabuf_obj->kref), ret);
454
455         return 0;
456
457 out_free_info:
458         kfree(dmabuf_obj->info);
459 out_free_dmabuf:
460         kfree(dmabuf_obj);
461 out:
462         /* ENODEV means plane isn't ready, which might be a normal case. */
463         return (ret == -ENODEV) ? 0 : ret;
464 }
465
466 /* To associate an exposed dmabuf with the dmabuf_obj */
467 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
468 {
469         struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
470         struct intel_vgpu_dmabuf_obj *dmabuf_obj;
471         struct drm_i915_gem_object *obj;
472         struct dma_buf *dmabuf;
473         int dmabuf_fd;
474         int ret = 0;
475
476         mutex_lock(&vgpu->dmabuf_lock);
477
478         dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
479         if (dmabuf_obj == NULL) {
480                 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
481                 ret = -EINVAL;
482                 goto out;
483         }
484
485         obj = vgpu_create_gem(dev, dmabuf_obj->info);
486         if (obj == NULL) {
487                 gvt_vgpu_err("create gvt gem obj failed\n");
488                 ret = -ENOMEM;
489                 goto out;
490         }
491
492         obj->gvt_info = dmabuf_obj->info;
493
494         dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
495         if (IS_ERR(dmabuf)) {
496                 gvt_vgpu_err("export dma-buf failed\n");
497                 ret = PTR_ERR(dmabuf);
498                 goto out_free_gem;
499         }
500
501         ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
502         if (ret < 0) {
503                 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
504                 goto out_free_dmabuf;
505         }
506         dmabuf_fd = ret;
507
508         dmabuf_obj_get(dmabuf_obj);
509
510         if (dmabuf_obj->initref) {
511                 dmabuf_obj->initref = false;
512                 dmabuf_obj_put(dmabuf_obj);
513         }
514
515         mutex_unlock(&vgpu->dmabuf_lock);
516
517         gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
518                     "        file count: %ld, GEM ref: %d\n",
519                     vgpu->id, dmabuf_obj->dmabuf_id,
520                     kref_read(&dmabuf_obj->kref),
521                     dmabuf_fd,
522                     file_count(dmabuf->file),
523                     kref_read(&obj->base.refcount));
524
525         i915_gem_object_put(obj);
526
527         return dmabuf_fd;
528
529 out_free_dmabuf:
530         dma_buf_put(dmabuf);
531 out_free_gem:
532         i915_gem_object_put(obj);
533 out:
534         mutex_unlock(&vgpu->dmabuf_lock);
535         return ret;
536 }
537
538 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
539 {
540         struct list_head *pos, *n;
541         struct intel_vgpu_dmabuf_obj *dmabuf_obj;
542
543         mutex_lock(&vgpu->dmabuf_lock);
544         list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
545                 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
546                                                 list);
547                 dmabuf_obj->vgpu = NULL;
548
549                 idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
550                 intel_gvt_hypervisor_put_vfio_device(vgpu);
551                 list_del(pos);
552
553                 /* dmabuf_obj might be freed in dmabuf_obj_put */
554                 if (dmabuf_obj->initref) {
555                         dmabuf_obj->initref = false;
556                         dmabuf_obj_put(dmabuf_obj);
557                 }
558
559         }
560         mutex_unlock(&vgpu->dmabuf_lock);
561 }