Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / amd / amdkfd / kfd_process.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <linux/slab.h>
29 #include <linux/amd-iommu.h>
30 #include <linux/notifier.h>
31 #include <linux/compat.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include "amdgpu_amdkfd.h"
35
36 struct mm_struct;
37
38 #include "kfd_priv.h"
39 #include "kfd_device_queue_manager.h"
40 #include "kfd_dbgmgr.h"
41 #include "kfd_iommu.h"
42
43 /*
44  * List of struct kfd_process (field kfd_process).
45  * Unique/indexed by mm_struct*
46  */
47 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
48 static DEFINE_MUTEX(kfd_processes_mutex);
49
50 DEFINE_SRCU(kfd_processes_srcu);
51
52 /* For process termination handling */
53 static struct workqueue_struct *kfd_process_wq;
54
55 /* Ordered, single-threaded workqueue for restoring evicted
56  * processes. Restoring multiple processes concurrently under memory
57  * pressure can lead to processes blocking each other from validating
58  * their BOs and result in a live-lock situation where processes
59  * remain evicted indefinitely.
60  */
61 static struct workqueue_struct *kfd_restore_wq;
62
63 static struct kfd_process *find_process(const struct task_struct *thread);
64 static void kfd_process_ref_release(struct kref *ref);
65 static struct kfd_process *create_process(const struct task_struct *thread,
66                                         struct file *filep);
67
68 static void evict_process_worker(struct work_struct *work);
69 static void restore_process_worker(struct work_struct *work);
70
71 struct kfd_procfs_tree {
72         struct kobject *kobj;
73 };
74
75 static struct kfd_procfs_tree procfs;
76
77 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
78                                char *buffer)
79 {
80         int val = 0;
81
82         if (strcmp(attr->name, "pasid") == 0) {
83                 struct kfd_process *p = container_of(attr, struct kfd_process,
84                                                      attr_pasid);
85                 val = p->pasid;
86         } else {
87                 pr_err("Invalid attribute");
88                 return -EINVAL;
89         }
90
91         return snprintf(buffer, PAGE_SIZE, "%d\n", val);
92 }
93
94 static void kfd_procfs_kobj_release(struct kobject *kobj)
95 {
96         kfree(kobj);
97 }
98
99 static const struct sysfs_ops kfd_procfs_ops = {
100         .show = kfd_procfs_show,
101 };
102
103 static struct kobj_type procfs_type = {
104         .release = kfd_procfs_kobj_release,
105         .sysfs_ops = &kfd_procfs_ops,
106 };
107
108 void kfd_procfs_init(void)
109 {
110         int ret = 0;
111
112         procfs.kobj = kfd_alloc_struct(procfs.kobj);
113         if (!procfs.kobj)
114                 return;
115
116         ret = kobject_init_and_add(procfs.kobj, &procfs_type,
117                                    &kfd_device->kobj, "proc");
118         if (ret) {
119                 pr_warn("Could not create procfs proc folder");
120                 /* If we fail to create the procfs, clean up */
121                 kfd_procfs_shutdown();
122         }
123 }
124
125 void kfd_procfs_shutdown(void)
126 {
127         if (procfs.kobj) {
128                 kobject_del(procfs.kobj);
129                 kobject_put(procfs.kobj);
130                 procfs.kobj = NULL;
131         }
132 }
133
134 int kfd_process_create_wq(void)
135 {
136         if (!kfd_process_wq)
137                 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
138         if (!kfd_restore_wq)
139                 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
140
141         if (!kfd_process_wq || !kfd_restore_wq) {
142                 kfd_process_destroy_wq();
143                 return -ENOMEM;
144         }
145
146         return 0;
147 }
148
149 void kfd_process_destroy_wq(void)
150 {
151         if (kfd_process_wq) {
152                 destroy_workqueue(kfd_process_wq);
153                 kfd_process_wq = NULL;
154         }
155         if (kfd_restore_wq) {
156                 destroy_workqueue(kfd_restore_wq);
157                 kfd_restore_wq = NULL;
158         }
159 }
160
161 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
162                         struct kfd_process_device *pdd)
163 {
164         struct kfd_dev *dev = pdd->dev;
165
166         amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
167         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
168 }
169
170 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
171  *      This function should be only called right after the process
172  *      is created and when kfd_processes_mutex is still being held
173  *      to avoid concurrency. Because of that exclusiveness, we do
174  *      not need to take p->mutex.
175  */
176 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
177                                    uint64_t gpu_va, uint32_t size,
178                                    uint32_t flags, void **kptr)
179 {
180         struct kfd_dev *kdev = pdd->dev;
181         struct kgd_mem *mem = NULL;
182         int handle;
183         int err;
184
185         err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
186                                                  pdd->vm, &mem, NULL, flags);
187         if (err)
188                 goto err_alloc_mem;
189
190         err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
191         if (err)
192                 goto err_map_mem;
193
194         err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
195         if (err) {
196                 pr_debug("Sync memory failed, wait interrupted by user signal\n");
197                 goto sync_memory_failed;
198         }
199
200         /* Create an obj handle so kfd_process_device_remove_obj_handle
201          * will take care of the bo removal when the process finishes.
202          * We do not need to take p->mutex, because the process is just
203          * created and the ioctls have not had the chance to run.
204          */
205         handle = kfd_process_device_create_obj_handle(pdd, mem);
206
207         if (handle < 0) {
208                 err = handle;
209                 goto free_gpuvm;
210         }
211
212         if (kptr) {
213                 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
214                                 (struct kgd_mem *)mem, kptr, NULL);
215                 if (err) {
216                         pr_debug("Map GTT BO to kernel failed\n");
217                         goto free_obj_handle;
218                 }
219         }
220
221         return err;
222
223 free_obj_handle:
224         kfd_process_device_remove_obj_handle(pdd, handle);
225 free_gpuvm:
226 sync_memory_failed:
227         kfd_process_free_gpuvm(mem, pdd);
228         return err;
229
230 err_map_mem:
231         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
232 err_alloc_mem:
233         *kptr = NULL;
234         return err;
235 }
236
237 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
238  *      process for IB usage The memory reserved is for KFD to submit
239  *      IB to AMDGPU from kernel.  If the memory is reserved
240  *      successfully, ib_kaddr will have the CPU/kernel
241  *      address. Check ib_kaddr before accessing the memory.
242  */
243 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
244 {
245         struct qcm_process_device *qpd = &pdd->qpd;
246         uint32_t flags = ALLOC_MEM_FLAGS_GTT |
247                          ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
248                          ALLOC_MEM_FLAGS_WRITABLE |
249                          ALLOC_MEM_FLAGS_EXECUTABLE;
250         void *kaddr;
251         int ret;
252
253         if (qpd->ib_kaddr || !qpd->ib_base)
254                 return 0;
255
256         /* ib_base is only set for dGPU */
257         ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
258                                       &kaddr);
259         if (ret)
260                 return ret;
261
262         qpd->ib_kaddr = kaddr;
263
264         return 0;
265 }
266
267 struct kfd_process *kfd_create_process(struct file *filep)
268 {
269         struct kfd_process *process;
270         struct task_struct *thread = current;
271         int ret;
272
273         if (!thread->mm)
274                 return ERR_PTR(-EINVAL);
275
276         /* Only the pthreads threading model is supported. */
277         if (thread->group_leader->mm != thread->mm)
278                 return ERR_PTR(-EINVAL);
279
280         /*
281          * take kfd processes mutex before starting of process creation
282          * so there won't be a case where two threads of the same process
283          * create two kfd_process structures
284          */
285         mutex_lock(&kfd_processes_mutex);
286
287         /* A prior open of /dev/kfd could have already created the process. */
288         process = find_process(thread);
289         if (process) {
290                 pr_debug("Process already found\n");
291         } else {
292                 process = create_process(thread, filep);
293
294                 if (!procfs.kobj)
295                         goto out;
296
297                 process->kobj = kfd_alloc_struct(process->kobj);
298                 if (!process->kobj) {
299                         pr_warn("Creating procfs kobject failed");
300                         goto out;
301                 }
302                 ret = kobject_init_and_add(process->kobj, &procfs_type,
303                                            procfs.kobj, "%d",
304                                            (int)process->lead_thread->pid);
305                 if (ret) {
306                         pr_warn("Creating procfs pid directory failed");
307                         goto out;
308                 }
309
310                 process->attr_pasid.name = "pasid";
311                 process->attr_pasid.mode = KFD_SYSFS_FILE_MODE;
312                 sysfs_attr_init(&process->attr_pasid);
313                 ret = sysfs_create_file(process->kobj, &process->attr_pasid);
314                 if (ret)
315                         pr_warn("Creating pasid for pid %d failed",
316                                         (int)process->lead_thread->pid);
317         }
318 out:
319         mutex_unlock(&kfd_processes_mutex);
320
321         return process;
322 }
323
324 struct kfd_process *kfd_get_process(const struct task_struct *thread)
325 {
326         struct kfd_process *process;
327
328         if (!thread->mm)
329                 return ERR_PTR(-EINVAL);
330
331         /* Only the pthreads threading model is supported. */
332         if (thread->group_leader->mm != thread->mm)
333                 return ERR_PTR(-EINVAL);
334
335         process = find_process(thread);
336         if (!process)
337                 return ERR_PTR(-EINVAL);
338
339         return process;
340 }
341
342 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
343 {
344         struct kfd_process *process;
345
346         hash_for_each_possible_rcu(kfd_processes_table, process,
347                                         kfd_processes, (uintptr_t)mm)
348                 if (process->mm == mm)
349                         return process;
350
351         return NULL;
352 }
353
354 static struct kfd_process *find_process(const struct task_struct *thread)
355 {
356         struct kfd_process *p;
357         int idx;
358
359         idx = srcu_read_lock(&kfd_processes_srcu);
360         p = find_process_by_mm(thread->mm);
361         srcu_read_unlock(&kfd_processes_srcu, idx);
362
363         return p;
364 }
365
366 void kfd_unref_process(struct kfd_process *p)
367 {
368         kref_put(&p->ref, kfd_process_ref_release);
369 }
370
371 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
372 {
373         struct kfd_process *p = pdd->process;
374         void *mem;
375         int id;
376
377         /*
378          * Remove all handles from idr and release appropriate
379          * local memory object
380          */
381         idr_for_each_entry(&pdd->alloc_idr, mem, id) {
382                 struct kfd_process_device *peer_pdd;
383
384                 list_for_each_entry(peer_pdd, &p->per_device_data,
385                                     per_device_list) {
386                         if (!peer_pdd->vm)
387                                 continue;
388                         amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
389                                 peer_pdd->dev->kgd, mem, peer_pdd->vm);
390                 }
391
392                 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
393                 kfd_process_device_remove_obj_handle(pdd, id);
394         }
395 }
396
397 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
398 {
399         struct kfd_process_device *pdd;
400
401         list_for_each_entry(pdd, &p->per_device_data, per_device_list)
402                 kfd_process_device_free_bos(pdd);
403 }
404
405 static void kfd_process_destroy_pdds(struct kfd_process *p)
406 {
407         struct kfd_process_device *pdd, *temp;
408
409         list_for_each_entry_safe(pdd, temp, &p->per_device_data,
410                                  per_device_list) {
411                 pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
412                                 pdd->dev->id, p->pasid);
413
414                 if (pdd->drm_file) {
415                         amdgpu_amdkfd_gpuvm_release_process_vm(
416                                         pdd->dev->kgd, pdd->vm);
417                         fput(pdd->drm_file);
418                 }
419                 else if (pdd->vm)
420                         amdgpu_amdkfd_gpuvm_destroy_process_vm(
421                                 pdd->dev->kgd, pdd->vm);
422
423                 list_del(&pdd->per_device_list);
424
425                 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
426                         free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
427                                 get_order(KFD_CWSR_TBA_TMA_SIZE));
428
429                 kfree(pdd->qpd.doorbell_bitmap);
430                 idr_destroy(&pdd->alloc_idr);
431
432                 kfree(pdd);
433         }
434 }
435
436 /* No process locking is needed in this function, because the process
437  * is not findable any more. We must assume that no other thread is
438  * using it any more, otherwise we couldn't safely free the process
439  * structure in the end.
440  */
441 static void kfd_process_wq_release(struct work_struct *work)
442 {
443         struct kfd_process *p = container_of(work, struct kfd_process,
444                                              release_work);
445
446         /* Remove the procfs files */
447         if (p->kobj) {
448                 sysfs_remove_file(p->kobj, &p->attr_pasid);
449                 kobject_del(p->kobj);
450                 kobject_put(p->kobj);
451                 p->kobj = NULL;
452         }
453
454         kfd_iommu_unbind_process(p);
455
456         kfd_process_free_outstanding_kfd_bos(p);
457
458         kfd_process_destroy_pdds(p);
459         dma_fence_put(p->ef);
460
461         kfd_event_free_process(p);
462
463         kfd_pasid_free(p->pasid);
464         kfd_free_process_doorbells(p);
465
466         mutex_destroy(&p->mutex);
467
468         put_task_struct(p->lead_thread);
469
470         kfree(p);
471 }
472
473 static void kfd_process_ref_release(struct kref *ref)
474 {
475         struct kfd_process *p = container_of(ref, struct kfd_process, ref);
476
477         INIT_WORK(&p->release_work, kfd_process_wq_release);
478         queue_work(kfd_process_wq, &p->release_work);
479 }
480
481 static void kfd_process_destroy_delayed(struct rcu_head *rcu)
482 {
483         struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
484
485         kfd_unref_process(p);
486 }
487
488 static void kfd_process_notifier_release(struct mmu_notifier *mn,
489                                         struct mm_struct *mm)
490 {
491         struct kfd_process *p;
492         struct kfd_process_device *pdd = NULL;
493
494         /*
495          * The kfd_process structure can not be free because the
496          * mmu_notifier srcu is read locked
497          */
498         p = container_of(mn, struct kfd_process, mmu_notifier);
499         if (WARN_ON(p->mm != mm))
500                 return;
501
502         mutex_lock(&kfd_processes_mutex);
503         hash_del_rcu(&p->kfd_processes);
504         mutex_unlock(&kfd_processes_mutex);
505         synchronize_srcu(&kfd_processes_srcu);
506
507         cancel_delayed_work_sync(&p->eviction_work);
508         cancel_delayed_work_sync(&p->restore_work);
509
510         mutex_lock(&p->mutex);
511
512         /* Iterate over all process device data structures and if the
513          * pdd is in debug mode, we should first force unregistration,
514          * then we will be able to destroy the queues
515          */
516         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
517                 struct kfd_dev *dev = pdd->dev;
518
519                 mutex_lock(kfd_get_dbgmgr_mutex());
520                 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
521                         if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
522                                 kfd_dbgmgr_destroy(dev->dbgmgr);
523                                 dev->dbgmgr = NULL;
524                         }
525                 }
526                 mutex_unlock(kfd_get_dbgmgr_mutex());
527         }
528
529         kfd_process_dequeue_from_all_devices(p);
530         pqm_uninit(&p->pqm);
531
532         /* Indicate to other users that MM is no longer valid */
533         p->mm = NULL;
534
535         mutex_unlock(&p->mutex);
536
537         mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
538         mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
539 }
540
541 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
542         .release = kfd_process_notifier_release,
543 };
544
545 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
546 {
547         unsigned long  offset;
548         struct kfd_process_device *pdd;
549
550         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
551                 struct kfd_dev *dev = pdd->dev;
552                 struct qcm_process_device *qpd = &pdd->qpd;
553
554                 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
555                         continue;
556
557                 offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
558                         << PAGE_SHIFT;
559                 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
560                         KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
561                         MAP_SHARED, offset);
562
563                 if (IS_ERR_VALUE(qpd->tba_addr)) {
564                         int err = qpd->tba_addr;
565
566                         pr_err("Failure to set tba address. error %d.\n", err);
567                         qpd->tba_addr = 0;
568                         qpd->cwsr_kaddr = NULL;
569                         return err;
570                 }
571
572                 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
573
574                 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
575                 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
576                         qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
577         }
578
579         return 0;
580 }
581
582 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
583 {
584         struct kfd_dev *dev = pdd->dev;
585         struct qcm_process_device *qpd = &pdd->qpd;
586         uint32_t flags = ALLOC_MEM_FLAGS_GTT |
587                 ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
588         void *kaddr;
589         int ret;
590
591         if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
592                 return 0;
593
594         /* cwsr_base is only set for dGPU */
595         ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
596                                       KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
597         if (ret)
598                 return ret;
599
600         qpd->cwsr_kaddr = kaddr;
601         qpd->tba_addr = qpd->cwsr_base;
602
603         memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
604
605         qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
606         pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
607                  qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
608
609         return 0;
610 }
611
612 static struct kfd_process *create_process(const struct task_struct *thread,
613                                         struct file *filep)
614 {
615         struct kfd_process *process;
616         int err = -ENOMEM;
617
618         process = kzalloc(sizeof(*process), GFP_KERNEL);
619
620         if (!process)
621                 goto err_alloc_process;
622
623         process->pasid = kfd_pasid_alloc();
624         if (process->pasid == 0)
625                 goto err_alloc_pasid;
626
627         if (kfd_alloc_process_doorbells(process) < 0)
628                 goto err_alloc_doorbells;
629
630         kref_init(&process->ref);
631
632         mutex_init(&process->mutex);
633
634         process->mm = thread->mm;
635
636         /* register notifier */
637         process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
638         err = mmu_notifier_register(&process->mmu_notifier, process->mm);
639         if (err)
640                 goto err_mmu_notifier;
641
642         hash_add_rcu(kfd_processes_table, &process->kfd_processes,
643                         (uintptr_t)process->mm);
644
645         process->lead_thread = thread->group_leader;
646         get_task_struct(process->lead_thread);
647
648         INIT_LIST_HEAD(&process->per_device_data);
649
650         kfd_event_init_process(process);
651
652         err = pqm_init(&process->pqm, process);
653         if (err != 0)
654                 goto err_process_pqm_init;
655
656         /* init process apertures*/
657         process->is_32bit_user_mode = in_compat_syscall();
658         err = kfd_init_apertures(process);
659         if (err != 0)
660                 goto err_init_apertures;
661
662         INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
663         INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
664         process->last_restore_timestamp = get_jiffies_64();
665
666         err = kfd_process_init_cwsr_apu(process, filep);
667         if (err)
668                 goto err_init_cwsr;
669
670         return process;
671
672 err_init_cwsr:
673         kfd_process_free_outstanding_kfd_bos(process);
674         kfd_process_destroy_pdds(process);
675 err_init_apertures:
676         pqm_uninit(&process->pqm);
677 err_process_pqm_init:
678         hash_del_rcu(&process->kfd_processes);
679         synchronize_rcu();
680         mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
681 err_mmu_notifier:
682         mutex_destroy(&process->mutex);
683         kfd_free_process_doorbells(process);
684 err_alloc_doorbells:
685         kfd_pasid_free(process->pasid);
686 err_alloc_pasid:
687         kfree(process);
688 err_alloc_process:
689         return ERR_PTR(err);
690 }
691
692 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
693                         struct kfd_dev *dev)
694 {
695         unsigned int i;
696
697         if (!KFD_IS_SOC15(dev->device_info->asic_family))
698                 return 0;
699
700         qpd->doorbell_bitmap =
701                 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
702                                      BITS_PER_BYTE), GFP_KERNEL);
703         if (!qpd->doorbell_bitmap)
704                 return -ENOMEM;
705
706         /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
707         for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
708                 if (i >= dev->shared_resources.non_cp_doorbells_start
709                         && i <= dev->shared_resources.non_cp_doorbells_end) {
710                         set_bit(i, qpd->doorbell_bitmap);
711                         set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
712                                 qpd->doorbell_bitmap);
713                         pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
714                                 i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
715                 }
716         }
717
718         return 0;
719 }
720
721 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
722                                                         struct kfd_process *p)
723 {
724         struct kfd_process_device *pdd = NULL;
725
726         list_for_each_entry(pdd, &p->per_device_data, per_device_list)
727                 if (pdd->dev == dev)
728                         return pdd;
729
730         return NULL;
731 }
732
733 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
734                                                         struct kfd_process *p)
735 {
736         struct kfd_process_device *pdd = NULL;
737
738         pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
739         if (!pdd)
740                 return NULL;
741
742         if (init_doorbell_bitmap(&pdd->qpd, dev)) {
743                 pr_err("Failed to init doorbell for process\n");
744                 kfree(pdd);
745                 return NULL;
746         }
747
748         pdd->dev = dev;
749         INIT_LIST_HEAD(&pdd->qpd.queues_list);
750         INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
751         pdd->qpd.dqm = dev->dqm;
752         pdd->qpd.pqm = &p->pqm;
753         pdd->qpd.evicted = 0;
754         pdd->process = p;
755         pdd->bound = PDD_UNBOUND;
756         pdd->already_dequeued = false;
757         list_add(&pdd->per_device_list, &p->per_device_data);
758
759         /* Init idr used for memory handle translation */
760         idr_init(&pdd->alloc_idr);
761
762         return pdd;
763 }
764
765 /**
766  * kfd_process_device_init_vm - Initialize a VM for a process-device
767  *
768  * @pdd: The process-device
769  * @drm_file: Optional pointer to a DRM file descriptor
770  *
771  * If @drm_file is specified, it will be used to acquire the VM from
772  * that file descriptor. If successful, the @pdd takes ownership of
773  * the file descriptor.
774  *
775  * If @drm_file is NULL, a new VM is created.
776  *
777  * Returns 0 on success, -errno on failure.
778  */
779 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
780                                struct file *drm_file)
781 {
782         struct kfd_process *p;
783         struct kfd_dev *dev;
784         int ret;
785
786         if (pdd->vm)
787                 return drm_file ? -EBUSY : 0;
788
789         p = pdd->process;
790         dev = pdd->dev;
791
792         if (drm_file)
793                 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
794                         dev->kgd, drm_file, p->pasid,
795                         &pdd->vm, &p->kgd_process_info, &p->ef);
796         else
797                 ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
798                         &pdd->vm, &p->kgd_process_info, &p->ef);
799         if (ret) {
800                 pr_err("Failed to create process VM object\n");
801                 return ret;
802         }
803
804         ret = kfd_process_device_reserve_ib_mem(pdd);
805         if (ret)
806                 goto err_reserve_ib_mem;
807         ret = kfd_process_device_init_cwsr_dgpu(pdd);
808         if (ret)
809                 goto err_init_cwsr;
810
811         pdd->drm_file = drm_file;
812
813         return 0;
814
815 err_init_cwsr:
816 err_reserve_ib_mem:
817         kfd_process_device_free_bos(pdd);
818         if (!drm_file)
819                 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
820         pdd->vm = NULL;
821
822         return ret;
823 }
824
825 /*
826  * Direct the IOMMU to bind the process (specifically the pasid->mm)
827  * to the device.
828  * Unbinding occurs when the process dies or the device is removed.
829  *
830  * Assumes that the process lock is held.
831  */
832 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
833                                                         struct kfd_process *p)
834 {
835         struct kfd_process_device *pdd;
836         int err;
837
838         pdd = kfd_get_process_device_data(dev, p);
839         if (!pdd) {
840                 pr_err("Process device data doesn't exist\n");
841                 return ERR_PTR(-ENOMEM);
842         }
843
844         err = kfd_iommu_bind_process_to_device(pdd);
845         if (err)
846                 return ERR_PTR(err);
847
848         err = kfd_process_device_init_vm(pdd, NULL);
849         if (err)
850                 return ERR_PTR(err);
851
852         return pdd;
853 }
854
855 struct kfd_process_device *kfd_get_first_process_device_data(
856                                                 struct kfd_process *p)
857 {
858         return list_first_entry(&p->per_device_data,
859                                 struct kfd_process_device,
860                                 per_device_list);
861 }
862
863 struct kfd_process_device *kfd_get_next_process_device_data(
864                                                 struct kfd_process *p,
865                                                 struct kfd_process_device *pdd)
866 {
867         if (list_is_last(&pdd->per_device_list, &p->per_device_data))
868                 return NULL;
869         return list_next_entry(pdd, per_device_list);
870 }
871
872 bool kfd_has_process_device_data(struct kfd_process *p)
873 {
874         return !(list_empty(&p->per_device_data));
875 }
876
877 /* Create specific handle mapped to mem from process local memory idr
878  * Assumes that the process lock is held.
879  */
880 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
881                                         void *mem)
882 {
883         return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
884 }
885
886 /* Translate specific handle from process local memory idr
887  * Assumes that the process lock is held.
888  */
889 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
890                                         int handle)
891 {
892         if (handle < 0)
893                 return NULL;
894
895         return idr_find(&pdd->alloc_idr, handle);
896 }
897
898 /* Remove specific handle from process local memory idr
899  * Assumes that the process lock is held.
900  */
901 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
902                                         int handle)
903 {
904         if (handle >= 0)
905                 idr_remove(&pdd->alloc_idr, handle);
906 }
907
908 /* This increments the process->ref counter. */
909 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
910 {
911         struct kfd_process *p, *ret_p = NULL;
912         unsigned int temp;
913
914         int idx = srcu_read_lock(&kfd_processes_srcu);
915
916         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
917                 if (p->pasid == pasid) {
918                         kref_get(&p->ref);
919                         ret_p = p;
920                         break;
921                 }
922         }
923
924         srcu_read_unlock(&kfd_processes_srcu, idx);
925
926         return ret_p;
927 }
928
929 /* This increments the process->ref counter. */
930 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
931 {
932         struct kfd_process *p;
933
934         int idx = srcu_read_lock(&kfd_processes_srcu);
935
936         p = find_process_by_mm(mm);
937         if (p)
938                 kref_get(&p->ref);
939
940         srcu_read_unlock(&kfd_processes_srcu, idx);
941
942         return p;
943 }
944
945 /* process_evict_queues - Evict all user queues of a process
946  *
947  * Eviction is reference-counted per process-device. This means multiple
948  * evictions from different sources can be nested safely.
949  */
950 int kfd_process_evict_queues(struct kfd_process *p)
951 {
952         struct kfd_process_device *pdd;
953         int r = 0;
954         unsigned int n_evicted = 0;
955
956         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
957                 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
958                                                             &pdd->qpd);
959                 if (r) {
960                         pr_err("Failed to evict process queues\n");
961                         goto fail;
962                 }
963                 n_evicted++;
964         }
965
966         return r;
967
968 fail:
969         /* To keep state consistent, roll back partial eviction by
970          * restoring queues
971          */
972         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
973                 if (n_evicted == 0)
974                         break;
975                 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
976                                                               &pdd->qpd))
977                         pr_err("Failed to restore queues\n");
978
979                 n_evicted--;
980         }
981
982         return r;
983 }
984
985 /* process_restore_queues - Restore all user queues of a process */
986 int kfd_process_restore_queues(struct kfd_process *p)
987 {
988         struct kfd_process_device *pdd;
989         int r, ret = 0;
990
991         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
992                 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
993                                                               &pdd->qpd);
994                 if (r) {
995                         pr_err("Failed to restore process queues\n");
996                         if (!ret)
997                                 ret = r;
998                 }
999         }
1000
1001         return ret;
1002 }
1003
1004 static void evict_process_worker(struct work_struct *work)
1005 {
1006         int ret;
1007         struct kfd_process *p;
1008         struct delayed_work *dwork;
1009
1010         dwork = to_delayed_work(work);
1011
1012         /* Process termination destroys this worker thread. So during the
1013          * lifetime of this thread, kfd_process p will be valid
1014          */
1015         p = container_of(dwork, struct kfd_process, eviction_work);
1016         WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1017                   "Eviction fence mismatch\n");
1018
1019         /* Narrow window of overlap between restore and evict work
1020          * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1021          * unreserves KFD BOs, it is possible to evicted again. But
1022          * restore has few more steps of finish. So lets wait for any
1023          * previous restore work to complete
1024          */
1025         flush_delayed_work(&p->restore_work);
1026
1027         pr_debug("Started evicting pasid %d\n", p->pasid);
1028         ret = kfd_process_evict_queues(p);
1029         if (!ret) {
1030                 dma_fence_signal(p->ef);
1031                 dma_fence_put(p->ef);
1032                 p->ef = NULL;
1033                 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1034                                 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1035
1036                 pr_debug("Finished evicting pasid %d\n", p->pasid);
1037         } else
1038                 pr_err("Failed to evict queues of pasid %d\n", p->pasid);
1039 }
1040
1041 static void restore_process_worker(struct work_struct *work)
1042 {
1043         struct delayed_work *dwork;
1044         struct kfd_process *p;
1045         struct kfd_process_device *pdd;
1046         int ret = 0;
1047
1048         dwork = to_delayed_work(work);
1049
1050         /* Process termination destroys this worker thread. So during the
1051          * lifetime of this thread, kfd_process p will be valid
1052          */
1053         p = container_of(dwork, struct kfd_process, restore_work);
1054
1055         /* Call restore_process_bos on the first KGD device. This function
1056          * takes care of restoring the whole process including other devices.
1057          * Restore can fail if enough memory is not available. If so,
1058          * reschedule again.
1059          */
1060         pdd = list_first_entry(&p->per_device_data,
1061                                struct kfd_process_device,
1062                                per_device_list);
1063
1064         pr_debug("Started restoring pasid %d\n", p->pasid);
1065
1066         /* Setting last_restore_timestamp before successful restoration.
1067          * Otherwise this would have to be set by KGD (restore_process_bos)
1068          * before KFD BOs are unreserved. If not, the process can be evicted
1069          * again before the timestamp is set.
1070          * If restore fails, the timestamp will be set again in the next
1071          * attempt. This would mean that the minimum GPU quanta would be
1072          * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1073          * functions)
1074          */
1075
1076         p->last_restore_timestamp = get_jiffies_64();
1077         ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1078                                                      &p->ef);
1079         if (ret) {
1080                 pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
1081                          p->pasid, PROCESS_BACK_OFF_TIME_MS);
1082                 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1083                                 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1084                 WARN(!ret, "reschedule restore work failed\n");
1085                 return;
1086         }
1087
1088         ret = kfd_process_restore_queues(p);
1089         if (!ret)
1090                 pr_debug("Finished restoring pasid %d\n", p->pasid);
1091         else
1092                 pr_err("Failed to restore queues of pasid %d\n", p->pasid);
1093 }
1094
1095 void kfd_suspend_all_processes(void)
1096 {
1097         struct kfd_process *p;
1098         unsigned int temp;
1099         int idx = srcu_read_lock(&kfd_processes_srcu);
1100
1101         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1102                 cancel_delayed_work_sync(&p->eviction_work);
1103                 cancel_delayed_work_sync(&p->restore_work);
1104
1105                 if (kfd_process_evict_queues(p))
1106                         pr_err("Failed to suspend process %d\n", p->pasid);
1107                 dma_fence_signal(p->ef);
1108                 dma_fence_put(p->ef);
1109                 p->ef = NULL;
1110         }
1111         srcu_read_unlock(&kfd_processes_srcu, idx);
1112 }
1113
1114 int kfd_resume_all_processes(void)
1115 {
1116         struct kfd_process *p;
1117         unsigned int temp;
1118         int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1119
1120         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1121                 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1122                         pr_err("Restore process %d failed during resume\n",
1123                                p->pasid);
1124                         ret = -EFAULT;
1125                 }
1126         }
1127         srcu_read_unlock(&kfd_processes_srcu, idx);
1128         return ret;
1129 }
1130
1131 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1132                           struct vm_area_struct *vma)
1133 {
1134         struct kfd_process_device *pdd;
1135         struct qcm_process_device *qpd;
1136
1137         if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1138                 pr_err("Incorrect CWSR mapping size.\n");
1139                 return -EINVAL;
1140         }
1141
1142         pdd = kfd_get_process_device_data(dev, process);
1143         if (!pdd)
1144                 return -EINVAL;
1145         qpd = &pdd->qpd;
1146
1147         qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1148                                         get_order(KFD_CWSR_TBA_TMA_SIZE));
1149         if (!qpd->cwsr_kaddr) {
1150                 pr_err("Error allocating per process CWSR buffer.\n");
1151                 return -ENOMEM;
1152         }
1153
1154         vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1155                 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1156         /* Mapping pages to user process */
1157         return remap_pfn_range(vma, vma->vm_start,
1158                                PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1159                                KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1160 }
1161
1162 void kfd_flush_tlb(struct kfd_process_device *pdd)
1163 {
1164         struct kfd_dev *dev = pdd->dev;
1165         const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
1166
1167         if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1168                 /* Nothing to flush until a VMID is assigned, which
1169                  * only happens when the first queue is created.
1170                  */
1171                 if (pdd->qpd.vmid)
1172                         f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
1173         } else {
1174                 f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
1175         }
1176 }
1177
1178 #if defined(CONFIG_DEBUG_FS)
1179
1180 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1181 {
1182         struct kfd_process *p;
1183         unsigned int temp;
1184         int r = 0;
1185
1186         int idx = srcu_read_lock(&kfd_processes_srcu);
1187
1188         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1189                 seq_printf(m, "Process %d PASID %d:\n",
1190                            p->lead_thread->tgid, p->pasid);
1191
1192                 mutex_lock(&p->mutex);
1193                 r = pqm_debugfs_mqds(m, &p->pqm);
1194                 mutex_unlock(&p->mutex);
1195
1196                 if (r)
1197                         break;
1198         }
1199
1200         srcu_read_unlock(&kfd_processes_srcu, idx);
1201
1202         return r;
1203 }
1204
1205 #endif
1206