Linux-libre 5.4.48-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31
32 #include <drm/amdgpu_drm.h>
33 #include <drm/drm_syncobj.h>
34 #include "amdgpu.h"
35 #include "amdgpu_trace.h"
36 #include "amdgpu_gmc.h"
37 #include "amdgpu_gem.h"
38
39 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
40                                       struct drm_amdgpu_cs_chunk_fence *data,
41                                       uint32_t *offset)
42 {
43         struct drm_gem_object *gobj;
44         struct amdgpu_bo *bo;
45         unsigned long size;
46         int r;
47
48         gobj = drm_gem_object_lookup(p->filp, data->handle);
49         if (gobj == NULL)
50                 return -EINVAL;
51
52         bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
53         p->uf_entry.priority = 0;
54         p->uf_entry.tv.bo = &bo->tbo;
55         /* One for TTM and one for the CS job */
56         p->uf_entry.tv.num_shared = 2;
57
58         drm_gem_object_put_unlocked(gobj);
59
60         size = amdgpu_bo_size(bo);
61         if (size != PAGE_SIZE || (data->offset + 8) > size) {
62                 r = -EINVAL;
63                 goto error_unref;
64         }
65
66         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
67                 r = -EINVAL;
68                 goto error_unref;
69         }
70
71         *offset = data->offset;
72
73         return 0;
74
75 error_unref:
76         amdgpu_bo_unref(&bo);
77         return r;
78 }
79
80 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
81                                       struct drm_amdgpu_bo_list_in *data)
82 {
83         int r;
84         struct drm_amdgpu_bo_list_entry *info = NULL;
85
86         r = amdgpu_bo_create_list_entry_array(data, &info);
87         if (r)
88                 return r;
89
90         r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
91                                   &p->bo_list);
92         if (r)
93                 goto error_free;
94
95         kvfree(info);
96         return 0;
97
98 error_free:
99         if (info)
100                 kvfree(info);
101
102         return r;
103 }
104
105 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
106 {
107         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
108         struct amdgpu_vm *vm = &fpriv->vm;
109         uint64_t *chunk_array_user;
110         uint64_t *chunk_array;
111         unsigned size, num_ibs = 0;
112         uint32_t uf_offset = 0;
113         int i;
114         int ret;
115
116         if (cs->in.num_chunks == 0)
117                 return 0;
118
119         chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
120         if (!chunk_array)
121                 return -ENOMEM;
122
123         p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
124         if (!p->ctx) {
125                 ret = -EINVAL;
126                 goto free_chunk;
127         }
128
129         mutex_lock(&p->ctx->lock);
130
131         /* skip guilty context job */
132         if (atomic_read(&p->ctx->guilty) == 1) {
133                 ret = -ECANCELED;
134                 goto free_chunk;
135         }
136
137         /* get chunks */
138         chunk_array_user = u64_to_user_ptr(cs->in.chunks);
139         if (copy_from_user(chunk_array, chunk_array_user,
140                            sizeof(uint64_t)*cs->in.num_chunks)) {
141                 ret = -EFAULT;
142                 goto free_chunk;
143         }
144
145         p->nchunks = cs->in.num_chunks;
146         p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
147                             GFP_KERNEL);
148         if (!p->chunks) {
149                 ret = -ENOMEM;
150                 goto free_chunk;
151         }
152
153         for (i = 0; i < p->nchunks; i++) {
154                 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
155                 struct drm_amdgpu_cs_chunk user_chunk;
156                 uint32_t __user *cdata;
157
158                 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
159                 if (copy_from_user(&user_chunk, chunk_ptr,
160                                        sizeof(struct drm_amdgpu_cs_chunk))) {
161                         ret = -EFAULT;
162                         i--;
163                         goto free_partial_kdata;
164                 }
165                 p->chunks[i].chunk_id = user_chunk.chunk_id;
166                 p->chunks[i].length_dw = user_chunk.length_dw;
167
168                 size = p->chunks[i].length_dw;
169                 cdata = u64_to_user_ptr(user_chunk.chunk_data);
170
171                 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
172                 if (p->chunks[i].kdata == NULL) {
173                         ret = -ENOMEM;
174                         i--;
175                         goto free_partial_kdata;
176                 }
177                 size *= sizeof(uint32_t);
178                 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
179                         ret = -EFAULT;
180                         goto free_partial_kdata;
181                 }
182
183                 switch (p->chunks[i].chunk_id) {
184                 case AMDGPU_CHUNK_ID_IB:
185                         ++num_ibs;
186                         break;
187
188                 case AMDGPU_CHUNK_ID_FENCE:
189                         size = sizeof(struct drm_amdgpu_cs_chunk_fence);
190                         if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
191                                 ret = -EINVAL;
192                                 goto free_partial_kdata;
193                         }
194
195                         ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
196                                                          &uf_offset);
197                         if (ret)
198                                 goto free_partial_kdata;
199
200                         break;
201
202                 case AMDGPU_CHUNK_ID_BO_HANDLES:
203                         size = sizeof(struct drm_amdgpu_bo_list_in);
204                         if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
205                                 ret = -EINVAL;
206                                 goto free_partial_kdata;
207                         }
208
209                         ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
210                         if (ret)
211                                 goto free_partial_kdata;
212
213                         break;
214
215                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
216                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
217                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
218                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
219                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
220                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
221                         break;
222
223                 default:
224                         ret = -EINVAL;
225                         goto free_partial_kdata;
226                 }
227         }
228
229         ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
230         if (ret)
231                 goto free_all_kdata;
232
233         if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
234                 ret = -ECANCELED;
235                 goto free_all_kdata;
236         }
237
238         if (p->uf_entry.tv.bo)
239                 p->job->uf_addr = uf_offset;
240         kfree(chunk_array);
241
242         /* Use this opportunity to fill in task info for the vm */
243         amdgpu_vm_set_task_info(vm);
244
245         return 0;
246
247 free_all_kdata:
248         i = p->nchunks - 1;
249 free_partial_kdata:
250         for (; i >= 0; i--)
251                 kvfree(p->chunks[i].kdata);
252         kfree(p->chunks);
253         p->chunks = NULL;
254         p->nchunks = 0;
255 free_chunk:
256         kfree(chunk_array);
257
258         return ret;
259 }
260
261 /* Convert microseconds to bytes. */
262 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
263 {
264         if (us <= 0 || !adev->mm_stats.log2_max_MBps)
265                 return 0;
266
267         /* Since accum_us is incremented by a million per second, just
268          * multiply it by the number of MB/s to get the number of bytes.
269          */
270         return us << adev->mm_stats.log2_max_MBps;
271 }
272
273 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
274 {
275         if (!adev->mm_stats.log2_max_MBps)
276                 return 0;
277
278         return bytes >> adev->mm_stats.log2_max_MBps;
279 }
280
281 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
282  * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
283  * which means it can go over the threshold once. If that happens, the driver
284  * will be in debt and no other buffer migrations can be done until that debt
285  * is repaid.
286  *
287  * This approach allows moving a buffer of any size (it's important to allow
288  * that).
289  *
290  * The currency is simply time in microseconds and it increases as the clock
291  * ticks. The accumulated microseconds (us) are converted to bytes and
292  * returned.
293  */
294 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
295                                               u64 *max_bytes,
296                                               u64 *max_vis_bytes)
297 {
298         s64 time_us, increment_us;
299         u64 free_vram, total_vram, used_vram;
300
301         /* Allow a maximum of 200 accumulated ms. This is basically per-IB
302          * throttling.
303          *
304          * It means that in order to get full max MBps, at least 5 IBs per
305          * second must be submitted and not more than 200ms apart from each
306          * other.
307          */
308         const s64 us_upper_bound = 200000;
309
310         if (!adev->mm_stats.log2_max_MBps) {
311                 *max_bytes = 0;
312                 *max_vis_bytes = 0;
313                 return;
314         }
315
316         total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
317         used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
318         free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
319
320         spin_lock(&adev->mm_stats.lock);
321
322         /* Increase the amount of accumulated us. */
323         time_us = ktime_to_us(ktime_get());
324         increment_us = time_us - adev->mm_stats.last_update_us;
325         adev->mm_stats.last_update_us = time_us;
326         adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
327                                       us_upper_bound);
328
329         /* This prevents the short period of low performance when the VRAM
330          * usage is low and the driver is in debt or doesn't have enough
331          * accumulated us to fill VRAM quickly.
332          *
333          * The situation can occur in these cases:
334          * - a lot of VRAM is freed by userspace
335          * - the presence of a big buffer causes a lot of evictions
336          *   (solution: split buffers into smaller ones)
337          *
338          * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
339          * accum_us to a positive number.
340          */
341         if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
342                 s64 min_us;
343
344                 /* Be more aggresive on dGPUs. Try to fill a portion of free
345                  * VRAM now.
346                  */
347                 if (!(adev->flags & AMD_IS_APU))
348                         min_us = bytes_to_us(adev, free_vram / 4);
349                 else
350                         min_us = 0; /* Reset accum_us on APUs. */
351
352                 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
353         }
354
355         /* This is set to 0 if the driver is in debt to disallow (optional)
356          * buffer moves.
357          */
358         *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
359
360         /* Do the same for visible VRAM if half of it is free */
361         if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
362                 u64 total_vis_vram = adev->gmc.visible_vram_size;
363                 u64 used_vis_vram =
364                         amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
365
366                 if (used_vis_vram < total_vis_vram) {
367                         u64 free_vis_vram = total_vis_vram - used_vis_vram;
368                         adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
369                                                           increment_us, us_upper_bound);
370
371                         if (free_vis_vram >= total_vis_vram / 2)
372                                 adev->mm_stats.accum_us_vis =
373                                         max(bytes_to_us(adev, free_vis_vram / 2),
374                                             adev->mm_stats.accum_us_vis);
375                 }
376
377                 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
378         } else {
379                 *max_vis_bytes = 0;
380         }
381
382         spin_unlock(&adev->mm_stats.lock);
383 }
384
385 /* Report how many bytes have really been moved for the last command
386  * submission. This can result in a debt that can stop buffer migrations
387  * temporarily.
388  */
389 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
390                                   u64 num_vis_bytes)
391 {
392         spin_lock(&adev->mm_stats.lock);
393         adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
394         adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
395         spin_unlock(&adev->mm_stats.lock);
396 }
397
398 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
399                                  struct amdgpu_bo *bo)
400 {
401         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
402         struct ttm_operation_ctx ctx = {
403                 .interruptible = true,
404                 .no_wait_gpu = false,
405                 .resv = bo->tbo.base.resv,
406                 .flags = 0
407         };
408         uint32_t domain;
409         int r;
410
411         if (bo->pin_count)
412                 return 0;
413
414         /* Don't move this buffer if we have depleted our allowance
415          * to move it. Don't move anything if the threshold is zero.
416          */
417         if (p->bytes_moved < p->bytes_moved_threshold) {
418                 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
419                     (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
420                         /* And don't move a CPU_ACCESS_REQUIRED BO to limited
421                          * visible VRAM if we've depleted our allowance to do
422                          * that.
423                          */
424                         if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
425                                 domain = bo->preferred_domains;
426                         else
427                                 domain = bo->allowed_domains;
428                 } else {
429                         domain = bo->preferred_domains;
430                 }
431         } else {
432                 domain = bo->allowed_domains;
433         }
434
435 retry:
436         amdgpu_bo_placement_from_domain(bo, domain);
437         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
438
439         p->bytes_moved += ctx.bytes_moved;
440         if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
441             amdgpu_bo_in_cpu_visible_vram(bo))
442                 p->bytes_moved_vis += ctx.bytes_moved;
443
444         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
445                 domain = bo->allowed_domains;
446                 goto retry;
447         }
448
449         return r;
450 }
451
452 /* Last resort, try to evict something from the current working set */
453 static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
454                                 struct amdgpu_bo *validated)
455 {
456         uint32_t domain = validated->allowed_domains;
457         struct ttm_operation_ctx ctx = { true, false };
458         int r;
459
460         if (!p->evictable)
461                 return false;
462
463         for (;&p->evictable->tv.head != &p->validated;
464              p->evictable = list_prev_entry(p->evictable, tv.head)) {
465
466                 struct amdgpu_bo_list_entry *candidate = p->evictable;
467                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
468                 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
469                 bool update_bytes_moved_vis;
470                 uint32_t other;
471
472                 /* If we reached our current BO we can forget it */
473                 if (bo == validated)
474                         break;
475
476                 /* We can't move pinned BOs here */
477                 if (bo->pin_count)
478                         continue;
479
480                 other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
481
482                 /* Check if this BO is in one of the domains we need space for */
483                 if (!(other & domain))
484                         continue;
485
486                 /* Check if we can move this BO somewhere else */
487                 other = bo->allowed_domains & ~domain;
488                 if (!other)
489                         continue;
490
491                 /* Good we can try to move this BO somewhere else */
492                 update_bytes_moved_vis =
493                                 !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
494                                 amdgpu_bo_in_cpu_visible_vram(bo);
495                 amdgpu_bo_placement_from_domain(bo, other);
496                 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
497                 p->bytes_moved += ctx.bytes_moved;
498                 if (update_bytes_moved_vis)
499                         p->bytes_moved_vis += ctx.bytes_moved;
500
501                 if (unlikely(r))
502                         break;
503
504                 p->evictable = list_prev_entry(p->evictable, tv.head);
505                 list_move(&candidate->tv.head, &p->validated);
506
507                 return true;
508         }
509
510         return false;
511 }
512
513 static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
514 {
515         struct amdgpu_cs_parser *p = param;
516         int r;
517
518         do {
519                 r = amdgpu_cs_bo_validate(p, bo);
520         } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
521         if (r)
522                 return r;
523
524         if (bo->shadow)
525                 r = amdgpu_cs_bo_validate(p, bo->shadow);
526
527         return r;
528 }
529
530 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
531                             struct list_head *validated)
532 {
533         struct ttm_operation_ctx ctx = { true, false };
534         struct amdgpu_bo_list_entry *lobj;
535         int r;
536
537         list_for_each_entry(lobj, validated, tv.head) {
538                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
539                 struct mm_struct *usermm;
540
541                 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
542                 if (usermm && usermm != current->mm)
543                         return -EPERM;
544
545                 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
546                     lobj->user_invalidated && lobj->user_pages) {
547                         amdgpu_bo_placement_from_domain(bo,
548                                                         AMDGPU_GEM_DOMAIN_CPU);
549                         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
550                         if (r)
551                                 return r;
552
553                         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
554                                                      lobj->user_pages);
555                 }
556
557                 if (p->evictable == lobj)
558                         p->evictable = NULL;
559
560                 r = amdgpu_cs_validate(p, bo);
561                 if (r)
562                         return r;
563
564                 kvfree(lobj->user_pages);
565                 lobj->user_pages = NULL;
566         }
567         return 0;
568 }
569
570 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
571                                 union drm_amdgpu_cs *cs)
572 {
573         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
574         struct amdgpu_vm *vm = &fpriv->vm;
575         struct amdgpu_bo_list_entry *e;
576         struct list_head duplicates;
577         struct amdgpu_bo *gds;
578         struct amdgpu_bo *gws;
579         struct amdgpu_bo *oa;
580         int r;
581
582         INIT_LIST_HEAD(&p->validated);
583
584         /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
585         if (cs->in.bo_list_handle) {
586                 if (p->bo_list)
587                         return -EINVAL;
588
589                 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
590                                        &p->bo_list);
591                 if (r)
592                         return r;
593         } else if (!p->bo_list) {
594                 /* Create a empty bo_list when no handle is provided */
595                 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
596                                           &p->bo_list);
597                 if (r)
598                         return r;
599         }
600
601         /* One for TTM and one for the CS job */
602         amdgpu_bo_list_for_each_entry(e, p->bo_list)
603                 e->tv.num_shared = 2;
604
605         amdgpu_bo_list_get_list(p->bo_list, &p->validated);
606         if (p->bo_list->first_userptr != p->bo_list->num_entries)
607                 p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
608
609         INIT_LIST_HEAD(&duplicates);
610         amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
611
612         if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
613                 list_add(&p->uf_entry.tv.head, &p->validated);
614
615         /* Get userptr backing pages. If pages are updated after registered
616          * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
617          * amdgpu_ttm_backend_bind() to flush and invalidate new pages
618          */
619         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
620                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
621                 bool userpage_invalidated = false;
622                 int i;
623
624                 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
625                                         sizeof(struct page *),
626                                         GFP_KERNEL | __GFP_ZERO);
627                 if (!e->user_pages) {
628                         DRM_ERROR("calloc failure\n");
629                         return -ENOMEM;
630                 }
631
632                 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
633                 if (r) {
634                         kvfree(e->user_pages);
635                         e->user_pages = NULL;
636                         return r;
637                 }
638
639                 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
640                         if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
641                                 userpage_invalidated = true;
642                                 break;
643                         }
644                 }
645                 e->user_invalidated = userpage_invalidated;
646         }
647
648         r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
649                                    &duplicates, false);
650         if (unlikely(r != 0)) {
651                 if (r != -ERESTARTSYS)
652                         DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
653                 goto out;
654         }
655
656         amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
657                                           &p->bytes_moved_vis_threshold);
658         p->bytes_moved = 0;
659         p->bytes_moved_vis = 0;
660         p->evictable = list_last_entry(&p->validated,
661                                        struct amdgpu_bo_list_entry,
662                                        tv.head);
663
664         r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
665                                       amdgpu_cs_validate, p);
666         if (r) {
667                 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
668                 goto error_validate;
669         }
670
671         r = amdgpu_cs_list_validate(p, &duplicates);
672         if (r)
673                 goto error_validate;
674
675         r = amdgpu_cs_list_validate(p, &p->validated);
676         if (r)
677                 goto error_validate;
678
679         amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
680                                      p->bytes_moved_vis);
681
682         gds = p->bo_list->gds_obj;
683         gws = p->bo_list->gws_obj;
684         oa = p->bo_list->oa_obj;
685
686         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
687                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
688
689                 /* Make sure we use the exclusive slot for shared BOs */
690                 if (bo->prime_shared_count)
691                         e->tv.num_shared = 0;
692                 e->bo_va = amdgpu_vm_bo_find(vm, bo);
693         }
694
695         if (gds) {
696                 p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
697                 p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
698         }
699         if (gws) {
700                 p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
701                 p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
702         }
703         if (oa) {
704                 p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
705                 p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
706         }
707
708         if (!r && p->uf_entry.tv.bo) {
709                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
710
711                 r = amdgpu_ttm_alloc_gart(&uf->tbo);
712                 p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
713         }
714
715 error_validate:
716         if (r)
717                 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
718 out:
719         return r;
720 }
721
722 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
723 {
724         struct amdgpu_bo_list_entry *e;
725         int r;
726
727         list_for_each_entry(e, &p->validated, tv.head) {
728                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
729                 struct dma_resv *resv = bo->tbo.base.resv;
730
731                 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
732                                      amdgpu_bo_explicit_sync(bo));
733
734                 if (r)
735                         return r;
736         }
737         return 0;
738 }
739
740 /**
741  * cs_parser_fini() - clean parser states
742  * @parser:     parser structure holding parsing context.
743  * @error:      error number
744  *
745  * If error is set than unvalidate buffer, otherwise just free memory
746  * used by parsing context.
747  **/
748 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
749                                   bool backoff)
750 {
751         unsigned i;
752
753         if (error && backoff)
754                 ttm_eu_backoff_reservation(&parser->ticket,
755                                            &parser->validated);
756
757         for (i = 0; i < parser->num_post_deps; i++) {
758                 drm_syncobj_put(parser->post_deps[i].syncobj);
759                 kfree(parser->post_deps[i].chain);
760         }
761         kfree(parser->post_deps);
762
763         dma_fence_put(parser->fence);
764
765         if (parser->ctx) {
766                 mutex_unlock(&parser->ctx->lock);
767                 amdgpu_ctx_put(parser->ctx);
768         }
769         if (parser->bo_list)
770                 amdgpu_bo_list_put(parser->bo_list);
771
772         for (i = 0; i < parser->nchunks; i++)
773                 kvfree(parser->chunks[i].kdata);
774         kfree(parser->chunks);
775         if (parser->job)
776                 amdgpu_job_free(parser->job);
777         if (parser->uf_entry.tv.bo) {
778                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
779
780                 amdgpu_bo_unref(&uf);
781         }
782 }
783
784 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
785 {
786         struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
787         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
788         struct amdgpu_device *adev = p->adev;
789         struct amdgpu_vm *vm = &fpriv->vm;
790         struct amdgpu_bo_list_entry *e;
791         struct amdgpu_bo_va *bo_va;
792         struct amdgpu_bo *bo;
793         int r;
794
795         /* Only for UVD/VCE VM emulation */
796         if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
797                 unsigned i, j;
798
799                 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
800                         struct drm_amdgpu_cs_chunk_ib *chunk_ib;
801                         struct amdgpu_bo_va_mapping *m;
802                         struct amdgpu_bo *aobj = NULL;
803                         struct amdgpu_cs_chunk *chunk;
804                         uint64_t offset, va_start;
805                         struct amdgpu_ib *ib;
806                         uint8_t *kptr;
807
808                         chunk = &p->chunks[i];
809                         ib = &p->job->ibs[j];
810                         chunk_ib = chunk->kdata;
811
812                         if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
813                                 continue;
814
815                         va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
816                         r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
817                         if (r) {
818                                 DRM_ERROR("IB va_start is invalid\n");
819                                 return r;
820                         }
821
822                         if ((va_start + chunk_ib->ib_bytes) >
823                             (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
824                                 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
825                                 return -EINVAL;
826                         }
827
828                         /* the IB should be reserved at this point */
829                         r = amdgpu_bo_kmap(aobj, (void **)&kptr);
830                         if (r) {
831                                 return r;
832                         }
833
834                         offset = m->start * AMDGPU_GPU_PAGE_SIZE;
835                         kptr += va_start - offset;
836
837                         if (ring->funcs->parse_cs) {
838                                 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
839                                 amdgpu_bo_kunmap(aobj);
840
841                                 r = amdgpu_ring_parse_cs(ring, p, j);
842                                 if (r)
843                                         return r;
844                         } else {
845                                 ib->ptr = (uint32_t *)kptr;
846                                 r = amdgpu_ring_patch_cs_in_place(ring, p, j);
847                                 amdgpu_bo_kunmap(aobj);
848                                 if (r)
849                                         return r;
850                         }
851
852                         j++;
853                 }
854         }
855
856         if (!p->job->vm)
857                 return amdgpu_cs_sync_rings(p);
858
859
860         r = amdgpu_vm_clear_freed(adev, vm, NULL);
861         if (r)
862                 return r;
863
864         r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
865         if (r)
866                 return r;
867
868         r = amdgpu_sync_fence(adev, &p->job->sync,
869                               fpriv->prt_va->last_pt_update, false);
870         if (r)
871                 return r;
872
873         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
874                 struct dma_fence *f;
875
876                 bo_va = fpriv->csa_va;
877                 BUG_ON(!bo_va);
878                 r = amdgpu_vm_bo_update(adev, bo_va, false);
879                 if (r)
880                         return r;
881
882                 f = bo_va->last_pt_update;
883                 r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
884                 if (r)
885                         return r;
886         }
887
888         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
889                 struct dma_fence *f;
890
891                 /* ignore duplicates */
892                 bo = ttm_to_amdgpu_bo(e->tv.bo);
893                 if (!bo)
894                         continue;
895
896                 bo_va = e->bo_va;
897                 if (bo_va == NULL)
898                         continue;
899
900                 r = amdgpu_vm_bo_update(adev, bo_va, false);
901                 if (r)
902                         return r;
903
904                 f = bo_va->last_pt_update;
905                 r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
906                 if (r)
907                         return r;
908         }
909
910         r = amdgpu_vm_handle_moved(adev, vm);
911         if (r)
912                 return r;
913
914         r = amdgpu_vm_update_directories(adev, vm);
915         if (r)
916                 return r;
917
918         r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
919         if (r)
920                 return r;
921
922         p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
923
924         if (amdgpu_vm_debug) {
925                 /* Invalidate all BOs to test for userspace bugs */
926                 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
927                         struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
928
929                         /* ignore duplicates */
930                         if (!bo)
931                                 continue;
932
933                         amdgpu_vm_bo_invalidate(adev, bo, false);
934                 }
935         }
936
937         return amdgpu_cs_sync_rings(p);
938 }
939
940 static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
941                              struct amdgpu_cs_parser *parser)
942 {
943         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
944         struct amdgpu_vm *vm = &fpriv->vm;
945         int r, ce_preempt = 0, de_preempt = 0;
946         struct amdgpu_ring *ring;
947         int i, j;
948
949         for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
950                 struct amdgpu_cs_chunk *chunk;
951                 struct amdgpu_ib *ib;
952                 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
953                 struct drm_sched_entity *entity;
954
955                 chunk = &parser->chunks[i];
956                 ib = &parser->job->ibs[j];
957                 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
958
959                 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
960                         continue;
961
962                 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
963                     (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
964                         if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
965                                 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
966                                         ce_preempt++;
967                                 else
968                                         de_preempt++;
969                         }
970
971                         /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
972                         if (ce_preempt > 1 || de_preempt > 1)
973                                 return -EINVAL;
974                 }
975
976                 r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
977                                           chunk_ib->ip_instance, chunk_ib->ring,
978                                           &entity);
979                 if (r)
980                         return r;
981
982                 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
983                         parser->job->preamble_status |=
984                                 AMDGPU_PREAMBLE_IB_PRESENT;
985
986                 if (parser->entity && parser->entity != entity)
987                         return -EINVAL;
988
989                 parser->entity = entity;
990
991                 ring = to_amdgpu_ring(entity->rq->sched);
992                 r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
993                                    chunk_ib->ib_bytes : 0, ib);
994                 if (r) {
995                         DRM_ERROR("Failed to get ib !\n");
996                         return r;
997                 }
998
999                 ib->gpu_addr = chunk_ib->va_start;
1000                 ib->length_dw = chunk_ib->ib_bytes / 4;
1001                 ib->flags = chunk_ib->flags;
1002
1003                 j++;
1004         }
1005
1006         /* MM engine doesn't support user fences */
1007         ring = to_amdgpu_ring(parser->entity->rq->sched);
1008         if (parser->job->uf_addr && ring->funcs->no_user_fence)
1009                 return -EINVAL;
1010
1011         return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
1012 }
1013
1014 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
1015                                        struct amdgpu_cs_chunk *chunk)
1016 {
1017         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1018         unsigned num_deps;
1019         int i, r;
1020         struct drm_amdgpu_cs_chunk_dep *deps;
1021
1022         deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
1023         num_deps = chunk->length_dw * 4 /
1024                 sizeof(struct drm_amdgpu_cs_chunk_dep);
1025
1026         for (i = 0; i < num_deps; ++i) {
1027                 struct amdgpu_ctx *ctx;
1028                 struct drm_sched_entity *entity;
1029                 struct dma_fence *fence;
1030
1031                 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
1032                 if (ctx == NULL)
1033                         return -EINVAL;
1034
1035                 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
1036                                           deps[i].ip_instance,
1037                                           deps[i].ring, &entity);
1038                 if (r) {
1039                         amdgpu_ctx_put(ctx);
1040                         return r;
1041                 }
1042
1043                 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
1044                 amdgpu_ctx_put(ctx);
1045
1046                 if (IS_ERR(fence))
1047                         return PTR_ERR(fence);
1048                 else if (!fence)
1049                         continue;
1050
1051                 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
1052                         struct drm_sched_fence *s_fence;
1053                         struct dma_fence *old = fence;
1054
1055                         s_fence = to_drm_sched_fence(fence);
1056                         fence = dma_fence_get(&s_fence->scheduled);
1057                         dma_fence_put(old);
1058                 }
1059
1060                 r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1061                 dma_fence_put(fence);
1062                 if (r)
1063                         return r;
1064         }
1065         return 0;
1066 }
1067
1068 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1069                                                  uint32_t handle, u64 point,
1070                                                  u64 flags)
1071 {
1072         struct dma_fence *fence;
1073         int r;
1074
1075         r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
1076         if (r) {
1077                 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
1078                           handle, point, r);
1079                 return r;
1080         }
1081
1082         r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1083         dma_fence_put(fence);
1084
1085         return r;
1086 }
1087
1088 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1089                                             struct amdgpu_cs_chunk *chunk)
1090 {
1091         struct drm_amdgpu_cs_chunk_sem *deps;
1092         unsigned num_deps;
1093         int i, r;
1094
1095         deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1096         num_deps = chunk->length_dw * 4 /
1097                 sizeof(struct drm_amdgpu_cs_chunk_sem);
1098         for (i = 0; i < num_deps; ++i) {
1099                 r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
1100                                                           0, 0);
1101                 if (r)
1102                         return r;
1103         }
1104
1105         return 0;
1106 }
1107
1108
1109 static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
1110                                                      struct amdgpu_cs_chunk *chunk)
1111 {
1112         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1113         unsigned num_deps;
1114         int i, r;
1115
1116         syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1117         num_deps = chunk->length_dw * 4 /
1118                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1119         for (i = 0; i < num_deps; ++i) {
1120                 r = amdgpu_syncobj_lookup_and_add_to_sync(p,
1121                                                           syncobj_deps[i].handle,
1122                                                           syncobj_deps[i].point,
1123                                                           syncobj_deps[i].flags);
1124                 if (r)
1125                         return r;
1126         }
1127
1128         return 0;
1129 }
1130
1131 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1132                                              struct amdgpu_cs_chunk *chunk)
1133 {
1134         struct drm_amdgpu_cs_chunk_sem *deps;
1135         unsigned num_deps;
1136         int i;
1137
1138         deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1139         num_deps = chunk->length_dw * 4 /
1140                 sizeof(struct drm_amdgpu_cs_chunk_sem);
1141
1142         if (p->post_deps)
1143                 return -EINVAL;
1144
1145         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1146                                      GFP_KERNEL);
1147         p->num_post_deps = 0;
1148
1149         if (!p->post_deps)
1150                 return -ENOMEM;
1151
1152
1153         for (i = 0; i < num_deps; ++i) {
1154                 p->post_deps[i].syncobj =
1155                         drm_syncobj_find(p->filp, deps[i].handle);
1156                 if (!p->post_deps[i].syncobj)
1157                         return -EINVAL;
1158                 p->post_deps[i].chain = NULL;
1159                 p->post_deps[i].point = 0;
1160                 p->num_post_deps++;
1161         }
1162
1163         return 0;
1164 }
1165
1166
1167 static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1168                                                       struct amdgpu_cs_chunk *chunk)
1169 {
1170         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1171         unsigned num_deps;
1172         int i;
1173
1174         syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1175         num_deps = chunk->length_dw * 4 /
1176                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1177
1178         if (p->post_deps)
1179                 return -EINVAL;
1180
1181         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1182                                      GFP_KERNEL);
1183         p->num_post_deps = 0;
1184
1185         if (!p->post_deps)
1186                 return -ENOMEM;
1187
1188         for (i = 0; i < num_deps; ++i) {
1189                 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
1190
1191                 dep->chain = NULL;
1192                 if (syncobj_deps[i].point) {
1193                         dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
1194                         if (!dep->chain)
1195                                 return -ENOMEM;
1196                 }
1197
1198                 dep->syncobj = drm_syncobj_find(p->filp,
1199                                                 syncobj_deps[i].handle);
1200                 if (!dep->syncobj) {
1201                         kfree(dep->chain);
1202                         return -EINVAL;
1203                 }
1204                 dep->point = syncobj_deps[i].point;
1205                 p->num_post_deps++;
1206         }
1207
1208         return 0;
1209 }
1210
1211 static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1212                                   struct amdgpu_cs_parser *p)
1213 {
1214         int i, r;
1215
1216         for (i = 0; i < p->nchunks; ++i) {
1217                 struct amdgpu_cs_chunk *chunk;
1218
1219                 chunk = &p->chunks[i];
1220
1221                 switch (chunk->chunk_id) {
1222                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
1223                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1224                         r = amdgpu_cs_process_fence_dep(p, chunk);
1225                         if (r)
1226                                 return r;
1227                         break;
1228                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1229                         r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1230                         if (r)
1231                                 return r;
1232                         break;
1233                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1234                         r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1235                         if (r)
1236                                 return r;
1237                         break;
1238                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
1239                         r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
1240                         if (r)
1241                                 return r;
1242                         break;
1243                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
1244                         r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
1245                         if (r)
1246                                 return r;
1247                         break;
1248                 }
1249         }
1250
1251         return 0;
1252 }
1253
1254 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1255 {
1256         int i;
1257
1258         for (i = 0; i < p->num_post_deps; ++i) {
1259                 if (p->post_deps[i].chain && p->post_deps[i].point) {
1260                         drm_syncobj_add_point(p->post_deps[i].syncobj,
1261                                               p->post_deps[i].chain,
1262                                               p->fence, p->post_deps[i].point);
1263                         p->post_deps[i].chain = NULL;
1264                 } else {
1265                         drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1266                                                   p->fence);
1267                 }
1268         }
1269 }
1270
1271 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1272                             union drm_amdgpu_cs *cs)
1273 {
1274         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1275         struct drm_sched_entity *entity = p->entity;
1276         enum drm_sched_priority priority;
1277         struct amdgpu_ring *ring;
1278         struct amdgpu_bo_list_entry *e;
1279         struct amdgpu_job *job;
1280         uint64_t seq;
1281         int r;
1282
1283         job = p->job;
1284         p->job = NULL;
1285
1286         r = drm_sched_job_init(&job->base, entity, p->filp);
1287         if (r)
1288                 goto error_unlock;
1289
1290         /* No memory allocation is allowed while holding the mn lock.
1291          * p->mn is hold until amdgpu_cs_submit is finished and fence is added
1292          * to BOs.
1293          */
1294         amdgpu_mn_lock(p->mn);
1295
1296         /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1297          * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1298          */
1299         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1300                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1301
1302                 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1303         }
1304         if (r) {
1305                 r = -EAGAIN;
1306                 goto error_abort;
1307         }
1308
1309         job->owner = p->filp;
1310         p->fence = dma_fence_get(&job->base.s_fence->finished);
1311
1312         amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1313         amdgpu_cs_post_dependencies(p);
1314
1315         if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1316             !p->ctx->preamble_presented) {
1317                 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1318                 p->ctx->preamble_presented = true;
1319         }
1320
1321         cs->out.handle = seq;
1322         job->uf_sequence = seq;
1323
1324         amdgpu_job_free_resources(job);
1325
1326         trace_amdgpu_cs_ioctl(job);
1327         amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1328         priority = job->base.s_priority;
1329         drm_sched_entity_push_job(&job->base, entity);
1330
1331         ring = to_amdgpu_ring(entity->rq->sched);
1332         amdgpu_ring_priority_get(ring, priority);
1333
1334         amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1335
1336         ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1337         amdgpu_mn_unlock(p->mn);
1338
1339         return 0;
1340
1341 error_abort:
1342         drm_sched_job_cleanup(&job->base);
1343         amdgpu_mn_unlock(p->mn);
1344
1345 error_unlock:
1346         amdgpu_job_free(job);
1347         return r;
1348 }
1349
1350 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1351 {
1352         struct amdgpu_device *adev = dev->dev_private;
1353         union drm_amdgpu_cs *cs = data;
1354         struct amdgpu_cs_parser parser = {};
1355         bool reserved_buffers = false;
1356         int i, r;
1357
1358         if (!adev->accel_working)
1359                 return -EBUSY;
1360
1361         parser.adev = adev;
1362         parser.filp = filp;
1363
1364         r = amdgpu_cs_parser_init(&parser, data);
1365         if (r) {
1366                 DRM_ERROR("Failed to initialize parser %d!\n", r);
1367                 goto out;
1368         }
1369
1370         r = amdgpu_cs_ib_fill(adev, &parser);
1371         if (r)
1372                 goto out;
1373
1374         r = amdgpu_cs_dependencies(adev, &parser);
1375         if (r) {
1376                 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1377                 goto out;
1378         }
1379
1380         r = amdgpu_cs_parser_bos(&parser, data);
1381         if (r) {
1382                 if (r == -ENOMEM)
1383                         DRM_ERROR("Not enough memory for command submission!\n");
1384                 else if (r != -ERESTARTSYS && r != -EAGAIN)
1385                         DRM_ERROR("Failed to process the buffer list %d!\n", r);
1386                 goto out;
1387         }
1388
1389         reserved_buffers = true;
1390
1391         for (i = 0; i < parser.job->num_ibs; i++)
1392                 trace_amdgpu_cs(&parser, i);
1393
1394         r = amdgpu_cs_vm_handling(&parser);
1395         if (r)
1396                 goto out;
1397
1398         r = amdgpu_cs_submit(&parser, cs);
1399
1400 out:
1401         amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1402
1403         return r;
1404 }
1405
1406 /**
1407  * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1408  *
1409  * @dev: drm device
1410  * @data: data from userspace
1411  * @filp: file private
1412  *
1413  * Wait for the command submission identified by handle to finish.
1414  */
1415 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1416                          struct drm_file *filp)
1417 {
1418         union drm_amdgpu_wait_cs *wait = data;
1419         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1420         struct drm_sched_entity *entity;
1421         struct amdgpu_ctx *ctx;
1422         struct dma_fence *fence;
1423         long r;
1424
1425         ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1426         if (ctx == NULL)
1427                 return -EINVAL;
1428
1429         r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1430                                   wait->in.ring, &entity);
1431         if (r) {
1432                 amdgpu_ctx_put(ctx);
1433                 return r;
1434         }
1435
1436         fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1437         if (IS_ERR(fence))
1438                 r = PTR_ERR(fence);
1439         else if (fence) {
1440                 r = dma_fence_wait_timeout(fence, true, timeout);
1441                 if (r > 0 && fence->error)
1442                         r = fence->error;
1443                 dma_fence_put(fence);
1444         } else
1445                 r = 1;
1446
1447         amdgpu_ctx_put(ctx);
1448         if (r < 0)
1449                 return r;
1450
1451         memset(wait, 0, sizeof(*wait));
1452         wait->out.status = (r == 0);
1453
1454         return 0;
1455 }
1456
1457 /**
1458  * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1459  *
1460  * @adev: amdgpu device
1461  * @filp: file private
1462  * @user: drm_amdgpu_fence copied from user space
1463  */
1464 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1465                                              struct drm_file *filp,
1466                                              struct drm_amdgpu_fence *user)
1467 {
1468         struct drm_sched_entity *entity;
1469         struct amdgpu_ctx *ctx;
1470         struct dma_fence *fence;
1471         int r;
1472
1473         ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1474         if (ctx == NULL)
1475                 return ERR_PTR(-EINVAL);
1476
1477         r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1478                                   user->ring, &entity);
1479         if (r) {
1480                 amdgpu_ctx_put(ctx);
1481                 return ERR_PTR(r);
1482         }
1483
1484         fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1485         amdgpu_ctx_put(ctx);
1486
1487         return fence;
1488 }
1489
1490 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1491                                     struct drm_file *filp)
1492 {
1493         struct amdgpu_device *adev = dev->dev_private;
1494         union drm_amdgpu_fence_to_handle *info = data;
1495         struct dma_fence *fence;
1496         struct drm_syncobj *syncobj;
1497         struct sync_file *sync_file;
1498         int fd, r;
1499
1500         fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1501         if (IS_ERR(fence))
1502                 return PTR_ERR(fence);
1503
1504         if (!fence)
1505                 fence = dma_fence_get_stub();
1506
1507         switch (info->in.what) {
1508         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1509                 r = drm_syncobj_create(&syncobj, 0, fence);
1510                 dma_fence_put(fence);
1511                 if (r)
1512                         return r;
1513                 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1514                 drm_syncobj_put(syncobj);
1515                 return r;
1516
1517         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1518                 r = drm_syncobj_create(&syncobj, 0, fence);
1519                 dma_fence_put(fence);
1520                 if (r)
1521                         return r;
1522                 r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
1523                 drm_syncobj_put(syncobj);
1524                 return r;
1525
1526         case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1527                 fd = get_unused_fd_flags(O_CLOEXEC);
1528                 if (fd < 0) {
1529                         dma_fence_put(fence);
1530                         return fd;
1531                 }
1532
1533                 sync_file = sync_file_create(fence);
1534                 dma_fence_put(fence);
1535                 if (!sync_file) {
1536                         put_unused_fd(fd);
1537                         return -ENOMEM;
1538                 }
1539
1540                 fd_install(fd, sync_file->file);
1541                 info->out.handle = fd;
1542                 return 0;
1543
1544         default:
1545                 return -EINVAL;
1546         }
1547 }
1548
1549 /**
1550  * amdgpu_cs_wait_all_fence - wait on all fences to signal
1551  *
1552  * @adev: amdgpu device
1553  * @filp: file private
1554  * @wait: wait parameters
1555  * @fences: array of drm_amdgpu_fence
1556  */
1557 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1558                                      struct drm_file *filp,
1559                                      union drm_amdgpu_wait_fences *wait,
1560                                      struct drm_amdgpu_fence *fences)
1561 {
1562         uint32_t fence_count = wait->in.fence_count;
1563         unsigned int i;
1564         long r = 1;
1565
1566         for (i = 0; i < fence_count; i++) {
1567                 struct dma_fence *fence;
1568                 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1569
1570                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1571                 if (IS_ERR(fence))
1572                         return PTR_ERR(fence);
1573                 else if (!fence)
1574                         continue;
1575
1576                 r = dma_fence_wait_timeout(fence, true, timeout);
1577                 dma_fence_put(fence);
1578                 if (r < 0)
1579                         return r;
1580
1581                 if (r == 0)
1582                         break;
1583
1584                 if (fence->error)
1585                         return fence->error;
1586         }
1587
1588         memset(wait, 0, sizeof(*wait));
1589         wait->out.status = (r > 0);
1590
1591         return 0;
1592 }
1593
1594 /**
1595  * amdgpu_cs_wait_any_fence - wait on any fence to signal
1596  *
1597  * @adev: amdgpu device
1598  * @filp: file private
1599  * @wait: wait parameters
1600  * @fences: array of drm_amdgpu_fence
1601  */
1602 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1603                                     struct drm_file *filp,
1604                                     union drm_amdgpu_wait_fences *wait,
1605                                     struct drm_amdgpu_fence *fences)
1606 {
1607         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1608         uint32_t fence_count = wait->in.fence_count;
1609         uint32_t first = ~0;
1610         struct dma_fence **array;
1611         unsigned int i;
1612         long r;
1613
1614         /* Prepare the fence array */
1615         array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1616
1617         if (array == NULL)
1618                 return -ENOMEM;
1619
1620         for (i = 0; i < fence_count; i++) {
1621                 struct dma_fence *fence;
1622
1623                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1624                 if (IS_ERR(fence)) {
1625                         r = PTR_ERR(fence);
1626                         goto err_free_fence_array;
1627                 } else if (fence) {
1628                         array[i] = fence;
1629                 } else { /* NULL, the fence has been already signaled */
1630                         r = 1;
1631                         first = i;
1632                         goto out;
1633                 }
1634         }
1635
1636         r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1637                                        &first);
1638         if (r < 0)
1639                 goto err_free_fence_array;
1640
1641 out:
1642         memset(wait, 0, sizeof(*wait));
1643         wait->out.status = (r > 0);
1644         wait->out.first_signaled = first;
1645
1646         if (first < fence_count && array[first])
1647                 r = array[first]->error;
1648         else
1649                 r = 0;
1650
1651 err_free_fence_array:
1652         for (i = 0; i < fence_count; i++)
1653                 dma_fence_put(array[i]);
1654         kfree(array);
1655
1656         return r;
1657 }
1658
1659 /**
1660  * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1661  *
1662  * @dev: drm device
1663  * @data: data from userspace
1664  * @filp: file private
1665  */
1666 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1667                                 struct drm_file *filp)
1668 {
1669         struct amdgpu_device *adev = dev->dev_private;
1670         union drm_amdgpu_wait_fences *wait = data;
1671         uint32_t fence_count = wait->in.fence_count;
1672         struct drm_amdgpu_fence *fences_user;
1673         struct drm_amdgpu_fence *fences;
1674         int r;
1675
1676         /* Get the fences from userspace */
1677         fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1678                         GFP_KERNEL);
1679         if (fences == NULL)
1680                 return -ENOMEM;
1681
1682         fences_user = u64_to_user_ptr(wait->in.fences);
1683         if (copy_from_user(fences, fences_user,
1684                 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1685                 r = -EFAULT;
1686                 goto err_free_fences;
1687         }
1688
1689         if (wait->in.wait_all)
1690                 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1691         else
1692                 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1693
1694 err_free_fences:
1695         kfree(fences);
1696
1697         return r;
1698 }
1699
1700 /**
1701  * amdgpu_cs_find_bo_va - find bo_va for VM address
1702  *
1703  * @parser: command submission parser context
1704  * @addr: VM address
1705  * @bo: resulting BO of the mapping found
1706  *
1707  * Search the buffer objects in the command submission context for a certain
1708  * virtual memory address. Returns allocation structure when found, NULL
1709  * otherwise.
1710  */
1711 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1712                            uint64_t addr, struct amdgpu_bo **bo,
1713                            struct amdgpu_bo_va_mapping **map)
1714 {
1715         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1716         struct ttm_operation_ctx ctx = { false, false };
1717         struct amdgpu_vm *vm = &fpriv->vm;
1718         struct amdgpu_bo_va_mapping *mapping;
1719         int r;
1720
1721         addr /= AMDGPU_GPU_PAGE_SIZE;
1722
1723         mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1724         if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1725                 return -EINVAL;
1726
1727         *bo = mapping->bo_va->base.bo;
1728         *map = mapping;
1729
1730         /* Double check that the BO is reserved by this CS */
1731         if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1732                 return -EINVAL;
1733
1734         if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1735                 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1736                 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1737                 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1738                 if (r)
1739                         return r;
1740         }
1741
1742         return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1743 }