Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / scheduler / sched_main.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <uapi/linux/sched/types.h>
51 #include <drm/drmP.h>
52 #include <drm/gpu_scheduler.h>
53 #include <drm/spsc_queue.h>
54
55 #define CREATE_TRACE_POINTS
56 #include "gpu_scheduler_trace.h"
57
58 #define to_drm_sched_job(sched_job)             \
59                 container_of((sched_job), struct drm_sched_job, queue_node)
60
61 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
62
63 /**
64  * drm_sched_rq_init - initialize a given run queue struct
65  *
66  * @rq: scheduler run queue
67  *
68  * Initializes a scheduler runqueue.
69  */
70 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
71                               struct drm_sched_rq *rq)
72 {
73         spin_lock_init(&rq->lock);
74         INIT_LIST_HEAD(&rq->entities);
75         rq->current_entity = NULL;
76         rq->sched = sched;
77 }
78
79 /**
80  * drm_sched_rq_add_entity - add an entity
81  *
82  * @rq: scheduler run queue
83  * @entity: scheduler entity
84  *
85  * Adds a scheduler entity to the run queue.
86  */
87 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
88                              struct drm_sched_entity *entity)
89 {
90         if (!list_empty(&entity->list))
91                 return;
92         spin_lock(&rq->lock);
93         list_add_tail(&entity->list, &rq->entities);
94         spin_unlock(&rq->lock);
95 }
96
97 /**
98  * drm_sched_rq_remove_entity - remove an entity
99  *
100  * @rq: scheduler run queue
101  * @entity: scheduler entity
102  *
103  * Removes a scheduler entity from the run queue.
104  */
105 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
106                                 struct drm_sched_entity *entity)
107 {
108         if (list_empty(&entity->list))
109                 return;
110         spin_lock(&rq->lock);
111         list_del_init(&entity->list);
112         if (rq->current_entity == entity)
113                 rq->current_entity = NULL;
114         spin_unlock(&rq->lock);
115 }
116
117 /**
118  * drm_sched_rq_select_entity - Select an entity which could provide a job to run
119  *
120  * @rq: scheduler run queue to check.
121  *
122  * Try to find a ready entity, returns NULL if none found.
123  */
124 static struct drm_sched_entity *
125 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
126 {
127         struct drm_sched_entity *entity;
128
129         spin_lock(&rq->lock);
130
131         entity = rq->current_entity;
132         if (entity) {
133                 list_for_each_entry_continue(entity, &rq->entities, list) {
134                         if (drm_sched_entity_is_ready(entity)) {
135                                 rq->current_entity = entity;
136                                 spin_unlock(&rq->lock);
137                                 return entity;
138                         }
139                 }
140         }
141
142         list_for_each_entry(entity, &rq->entities, list) {
143
144                 if (drm_sched_entity_is_ready(entity)) {
145                         rq->current_entity = entity;
146                         spin_unlock(&rq->lock);
147                         return entity;
148                 }
149
150                 if (entity == rq->current_entity)
151                         break;
152         }
153
154         spin_unlock(&rq->lock);
155
156         return NULL;
157 }
158
159 /**
160  * drm_sched_dependency_optimized
161  *
162  * @fence: the dependency fence
163  * @entity: the entity which depends on the above fence
164  *
165  * Returns true if the dependency can be optimized and false otherwise
166  */
167 bool drm_sched_dependency_optimized(struct dma_fence* fence,
168                                     struct drm_sched_entity *entity)
169 {
170         struct drm_gpu_scheduler *sched = entity->rq->sched;
171         struct drm_sched_fence *s_fence;
172
173         if (!fence || dma_fence_is_signaled(fence))
174                 return false;
175         if (fence->context == entity->fence_context)
176                 return true;
177         s_fence = to_drm_sched_fence(fence);
178         if (s_fence && s_fence->sched == sched)
179                 return true;
180
181         return false;
182 }
183 EXPORT_SYMBOL(drm_sched_dependency_optimized);
184
185 /**
186  * drm_sched_start_timeout - start timeout for reset worker
187  *
188  * @sched: scheduler instance to start the worker for
189  *
190  * Start the timeout for the given scheduler.
191  */
192 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
193 {
194         if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
195             !list_empty(&sched->ring_mirror_list))
196                 schedule_delayed_work(&sched->work_tdr, sched->timeout);
197 }
198
199 /**
200  * drm_sched_fault - immediately start timeout handler
201  *
202  * @sched: scheduler where the timeout handling should be started.
203  *
204  * Start timeout handling immediately when the driver detects a hardware fault.
205  */
206 void drm_sched_fault(struct drm_gpu_scheduler *sched)
207 {
208         mod_delayed_work(system_wq, &sched->work_tdr, 0);
209 }
210 EXPORT_SYMBOL(drm_sched_fault);
211
212 /**
213  * drm_sched_suspend_timeout - Suspend scheduler job timeout
214  *
215  * @sched: scheduler instance for which to suspend the timeout
216  *
217  * Suspend the delayed work timeout for the scheduler. This is done by
218  * modifying the delayed work timeout to an arbitrary large value,
219  * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
220  * called from an IRQ context.
221  *
222  * Returns the timeout remaining
223  *
224  */
225 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
226 {
227         unsigned long sched_timeout, now = jiffies;
228
229         sched_timeout = sched->work_tdr.timer.expires;
230
231         /*
232          * Modify the timeout to an arbitrarily large value. This also prevents
233          * the timeout to be restarted when new submissions arrive
234          */
235         if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
236                         && time_after(sched_timeout, now))
237                 return sched_timeout - now;
238         else
239                 return sched->timeout;
240 }
241 EXPORT_SYMBOL(drm_sched_suspend_timeout);
242
243 /**
244  * drm_sched_resume_timeout - Resume scheduler job timeout
245  *
246  * @sched: scheduler instance for which to resume the timeout
247  * @remaining: remaining timeout
248  *
249  * Resume the delayed work timeout for the scheduler. Note that
250  * this function can be called from an IRQ context.
251  */
252 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
253                 unsigned long remaining)
254 {
255         unsigned long flags;
256
257         spin_lock_irqsave(&sched->job_list_lock, flags);
258
259         if (list_empty(&sched->ring_mirror_list))
260                 cancel_delayed_work(&sched->work_tdr);
261         else
262                 mod_delayed_work(system_wq, &sched->work_tdr, remaining);
263
264         spin_unlock_irqrestore(&sched->job_list_lock, flags);
265 }
266 EXPORT_SYMBOL(drm_sched_resume_timeout);
267
268 static void drm_sched_job_begin(struct drm_sched_job *s_job)
269 {
270         struct drm_gpu_scheduler *sched = s_job->sched;
271         unsigned long flags;
272
273         spin_lock_irqsave(&sched->job_list_lock, flags);
274         list_add_tail(&s_job->node, &sched->ring_mirror_list);
275         drm_sched_start_timeout(sched);
276         spin_unlock_irqrestore(&sched->job_list_lock, flags);
277 }
278
279 static void drm_sched_job_timedout(struct work_struct *work)
280 {
281         struct drm_gpu_scheduler *sched;
282         struct drm_sched_job *job;
283         unsigned long flags;
284
285         sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
286         job = list_first_entry_or_null(&sched->ring_mirror_list,
287                                        struct drm_sched_job, node);
288
289         if (job) {
290                 job->sched->ops->timedout_job(job);
291
292                 /*
293                  * Guilty job did complete and hence needs to be manually removed
294                  * See drm_sched_stop doc.
295                  */
296                 if (sched->free_guilty) {
297                         job->sched->ops->free_job(job);
298                         sched->free_guilty = false;
299                 }
300         }
301
302         spin_lock_irqsave(&sched->job_list_lock, flags);
303         drm_sched_start_timeout(sched);
304         spin_unlock_irqrestore(&sched->job_list_lock, flags);
305 }
306
307  /**
308   * drm_sched_increase_karma - Update sched_entity guilty flag
309   *
310   * @bad: The job guilty of time out
311   *
312   * Increment on every hang caused by the 'bad' job. If this exceeds the hang
313   * limit of the scheduler then the respective sched entity is marked guilty and
314   * jobs from it will not be scheduled further
315   */
316 void drm_sched_increase_karma(struct drm_sched_job *bad)
317 {
318         int i;
319         struct drm_sched_entity *tmp;
320         struct drm_sched_entity *entity;
321         struct drm_gpu_scheduler *sched = bad->sched;
322
323         /* don't increase @bad's karma if it's from KERNEL RQ,
324          * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
325          * corrupt but keep in mind that kernel jobs always considered good.
326          */
327         if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
328                 atomic_inc(&bad->karma);
329                 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
330                      i++) {
331                         struct drm_sched_rq *rq = &sched->sched_rq[i];
332
333                         spin_lock(&rq->lock);
334                         list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
335                                 if (bad->s_fence->scheduled.context ==
336                                     entity->fence_context) {
337                                         if (atomic_read(&bad->karma) >
338                                             bad->sched->hang_limit)
339                                                 if (entity->guilty)
340                                                         atomic_set(entity->guilty, 1);
341                                         break;
342                                 }
343                         }
344                         spin_unlock(&rq->lock);
345                         if (&entity->list != &rq->entities)
346                                 break;
347                 }
348         }
349 }
350 EXPORT_SYMBOL(drm_sched_increase_karma);
351
352 /**
353  * drm_sched_stop - stop the scheduler
354  *
355  * @sched: scheduler instance
356  * @bad: job which caused the time out
357  *
358  * Stop the scheduler and also removes and frees all completed jobs.
359  * Note: bad job will not be freed as it might be used later and so it's
360  * callers responsibility to release it manually if it's not part of the
361  * mirror list any more.
362  *
363  */
364 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
365 {
366         struct drm_sched_job *s_job, *tmp;
367         unsigned long flags;
368
369         kthread_park(sched->thread);
370
371         /*
372          * Iterate the job list from later to  earlier one and either deactive
373          * their HW callbacks or remove them from mirror list if they already
374          * signaled.
375          * This iteration is thread safe as sched thread is stopped.
376          */
377         list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
378                 if (s_job->s_fence->parent &&
379                     dma_fence_remove_callback(s_job->s_fence->parent,
380                                               &s_job->cb)) {
381                         atomic_dec(&sched->hw_rq_count);
382                 } else {
383                         /*
384                          * remove job from ring_mirror_list.
385                          * Locking here is for concurrent resume timeout
386                          */
387                         spin_lock_irqsave(&sched->job_list_lock, flags);
388                         list_del_init(&s_job->node);
389                         spin_unlock_irqrestore(&sched->job_list_lock, flags);
390
391                         /*
392                          * Wait for job's HW fence callback to finish using s_job
393                          * before releasing it.
394                          *
395                          * Job is still alive so fence refcount at least 1
396                          */
397                         dma_fence_wait(&s_job->s_fence->finished, false);
398
399                         /*
400                          * We must keep bad job alive for later use during
401                          * recovery by some of the drivers but leave a hint
402                          * that the guilty job must be released.
403                          */
404                         if (bad != s_job)
405                                 sched->ops->free_job(s_job);
406                         else
407                                 sched->free_guilty = true;
408                 }
409         }
410
411         /*
412          * Stop pending timer in flight as we rearm it in  drm_sched_start. This
413          * avoids the pending timeout work in progress to fire right away after
414          * this TDR finished and before the newly restarted jobs had a
415          * chance to complete.
416          */
417         cancel_delayed_work(&sched->work_tdr);
418 }
419
420 EXPORT_SYMBOL(drm_sched_stop);
421
422 /**
423  * drm_sched_job_recovery - recover jobs after a reset
424  *
425  * @sched: scheduler instance
426  * @full_recovery: proceed with complete sched restart
427  *
428  */
429 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
430 {
431         struct drm_sched_job *s_job, *tmp;
432         unsigned long flags;
433         int r;
434
435         /*
436          * Locking the list is not required here as the sched thread is parked
437          * so no new jobs are being inserted or removed. Also concurrent
438          * GPU recovers can't run in parallel.
439          */
440         list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
441                 struct dma_fence *fence = s_job->s_fence->parent;
442
443                 atomic_inc(&sched->hw_rq_count);
444
445                 if (!full_recovery)
446                         continue;
447
448                 if (fence) {
449                         r = dma_fence_add_callback(fence, &s_job->cb,
450                                                    drm_sched_process_job);
451                         if (r == -ENOENT)
452                                 drm_sched_process_job(fence, &s_job->cb);
453                         else if (r)
454                                 DRM_ERROR("fence add callback failed (%d)\n",
455                                           r);
456                 } else
457                         drm_sched_process_job(NULL, &s_job->cb);
458         }
459
460         if (full_recovery) {
461                 spin_lock_irqsave(&sched->job_list_lock, flags);
462                 drm_sched_start_timeout(sched);
463                 spin_unlock_irqrestore(&sched->job_list_lock, flags);
464         }
465
466         kthread_unpark(sched->thread);
467 }
468 EXPORT_SYMBOL(drm_sched_start);
469
470 /**
471  * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
472  *
473  * @sched: scheduler instance
474  *
475  */
476 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
477 {
478         struct drm_sched_job *s_job, *tmp;
479         uint64_t guilty_context;
480         bool found_guilty = false;
481         struct dma_fence *fence;
482
483         list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
484                 struct drm_sched_fence *s_fence = s_job->s_fence;
485
486                 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
487                         found_guilty = true;
488                         guilty_context = s_job->s_fence->scheduled.context;
489                 }
490
491                 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
492                         dma_fence_set_error(&s_fence->finished, -ECANCELED);
493
494                 dma_fence_put(s_job->s_fence->parent);
495                 fence = sched->ops->run_job(s_job);
496
497                 if (IS_ERR_OR_NULL(fence)) {
498                         s_job->s_fence->parent = NULL;
499                         dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
500                 } else {
501                         s_job->s_fence->parent = fence;
502                 }
503
504
505         }
506 }
507 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
508
509 /**
510  * drm_sched_job_init - init a scheduler job
511  *
512  * @job: scheduler job to init
513  * @entity: scheduler entity to use
514  * @owner: job owner for debugging
515  *
516  * Refer to drm_sched_entity_push_job() documentation
517  * for locking considerations.
518  *
519  * Returns 0 for success, negative error code otherwise.
520  */
521 int drm_sched_job_init(struct drm_sched_job *job,
522                        struct drm_sched_entity *entity,
523                        void *owner)
524 {
525         struct drm_gpu_scheduler *sched;
526
527         drm_sched_entity_select_rq(entity);
528         if (!entity->rq)
529                 return -ENOENT;
530
531         sched = entity->rq->sched;
532
533         job->sched = sched;
534         job->entity = entity;
535         job->s_priority = entity->rq - sched->sched_rq;
536         job->s_fence = drm_sched_fence_create(entity, owner);
537         if (!job->s_fence)
538                 return -ENOMEM;
539         job->id = atomic64_inc_return(&sched->job_id_count);
540
541         INIT_LIST_HEAD(&job->node);
542
543         return 0;
544 }
545 EXPORT_SYMBOL(drm_sched_job_init);
546
547 /**
548  * drm_sched_job_cleanup - clean up scheduler job resources
549  *
550  * @job: scheduler job to clean up
551  */
552 void drm_sched_job_cleanup(struct drm_sched_job *job)
553 {
554         dma_fence_put(&job->s_fence->finished);
555         job->s_fence = NULL;
556 }
557 EXPORT_SYMBOL(drm_sched_job_cleanup);
558
559 /**
560  * drm_sched_ready - is the scheduler ready
561  *
562  * @sched: scheduler instance
563  *
564  * Return true if we can push more jobs to the hw, otherwise false.
565  */
566 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
567 {
568         return atomic_read(&sched->hw_rq_count) <
569                 sched->hw_submission_limit;
570 }
571
572 /**
573  * drm_sched_wakeup - Wake up the scheduler when it is ready
574  *
575  * @sched: scheduler instance
576  *
577  */
578 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
579 {
580         if (drm_sched_ready(sched))
581                 wake_up_interruptible(&sched->wake_up_worker);
582 }
583
584 /**
585  * drm_sched_select_entity - Select next entity to process
586  *
587  * @sched: scheduler instance
588  *
589  * Returns the entity to process or NULL if none are found.
590  */
591 static struct drm_sched_entity *
592 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
593 {
594         struct drm_sched_entity *entity;
595         int i;
596
597         if (!drm_sched_ready(sched))
598                 return NULL;
599
600         /* Kernel run queue has higher priority than normal run queue*/
601         for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
602                 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
603                 if (entity)
604                         break;
605         }
606
607         return entity;
608 }
609
610 /**
611  * drm_sched_process_job - process a job
612  *
613  * @f: fence
614  * @cb: fence callbacks
615  *
616  * Called after job has finished execution.
617  */
618 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
619 {
620         struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
621         struct drm_sched_fence *s_fence = s_job->s_fence;
622         struct drm_gpu_scheduler *sched = s_fence->sched;
623
624         atomic_dec(&sched->hw_rq_count);
625         atomic_dec(&sched->num_jobs);
626
627         trace_drm_sched_process_job(s_fence);
628
629         drm_sched_fence_finished(s_fence);
630         wake_up_interruptible(&sched->wake_up_worker);
631 }
632
633 /**
634  * drm_sched_cleanup_jobs - destroy finished jobs
635  *
636  * @sched: scheduler instance
637  *
638  * Remove all finished jobs from the mirror list and destroy them.
639  */
640 static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
641 {
642         unsigned long flags;
643
644         /* Don't destroy jobs while the timeout worker is running */
645         if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
646             !cancel_delayed_work(&sched->work_tdr))
647                 return;
648
649
650         while (!list_empty(&sched->ring_mirror_list)) {
651                 struct drm_sched_job *job;
652
653                 job = list_first_entry(&sched->ring_mirror_list,
654                                        struct drm_sched_job, node);
655                 if (!dma_fence_is_signaled(&job->s_fence->finished))
656                         break;
657
658                 spin_lock_irqsave(&sched->job_list_lock, flags);
659                 /* remove job from ring_mirror_list */
660                 list_del_init(&job->node);
661                 spin_unlock_irqrestore(&sched->job_list_lock, flags);
662
663                 sched->ops->free_job(job);
664         }
665
666         /* queue timeout for next job */
667         spin_lock_irqsave(&sched->job_list_lock, flags);
668         drm_sched_start_timeout(sched);
669         spin_unlock_irqrestore(&sched->job_list_lock, flags);
670
671 }
672
673 /**
674  * drm_sched_blocked - check if the scheduler is blocked
675  *
676  * @sched: scheduler instance
677  *
678  * Returns true if blocked, otherwise false.
679  */
680 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
681 {
682         if (kthread_should_park()) {
683                 kthread_parkme();
684                 return true;
685         }
686
687         return false;
688 }
689
690 /**
691  * drm_sched_main - main scheduler thread
692  *
693  * @param: scheduler instance
694  *
695  * Returns 0.
696  */
697 static int drm_sched_main(void *param)
698 {
699         struct sched_param sparam = {.sched_priority = 1};
700         struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
701         int r;
702
703         sched_setscheduler(current, SCHED_FIFO, &sparam);
704
705         while (!kthread_should_stop()) {
706                 struct drm_sched_entity *entity = NULL;
707                 struct drm_sched_fence *s_fence;
708                 struct drm_sched_job *sched_job;
709                 struct dma_fence *fence;
710
711                 wait_event_interruptible(sched->wake_up_worker,
712                                          (drm_sched_cleanup_jobs(sched),
713                                          (!drm_sched_blocked(sched) &&
714                                           (entity = drm_sched_select_entity(sched))) ||
715                                          kthread_should_stop()));
716
717                 if (!entity)
718                         continue;
719
720                 sched_job = drm_sched_entity_pop_job(entity);
721                 if (!sched_job)
722                         continue;
723
724                 s_fence = sched_job->s_fence;
725
726                 atomic_inc(&sched->hw_rq_count);
727                 drm_sched_job_begin(sched_job);
728
729                 fence = sched->ops->run_job(sched_job);
730                 drm_sched_fence_scheduled(s_fence);
731
732                 if (!IS_ERR_OR_NULL(fence)) {
733                         s_fence->parent = dma_fence_get(fence);
734                         r = dma_fence_add_callback(fence, &sched_job->cb,
735                                                    drm_sched_process_job);
736                         if (r == -ENOENT)
737                                 drm_sched_process_job(fence, &sched_job->cb);
738                         else if (r)
739                                 DRM_ERROR("fence add callback failed (%d)\n",
740                                           r);
741                         dma_fence_put(fence);
742                 } else {
743
744                         dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
745                         drm_sched_process_job(NULL, &sched_job->cb);
746                 }
747
748                 wake_up(&sched->job_scheduled);
749         }
750         return 0;
751 }
752
753 /**
754  * drm_sched_init - Init a gpu scheduler instance
755  *
756  * @sched: scheduler instance
757  * @ops: backend operations for this scheduler
758  * @hw_submission: number of hw submissions that can be in flight
759  * @hang_limit: number of times to allow a job to hang before dropping it
760  * @timeout: timeout value in jiffies for the scheduler
761  * @name: name used for debugging
762  *
763  * Return 0 on success, otherwise error code.
764  */
765 int drm_sched_init(struct drm_gpu_scheduler *sched,
766                    const struct drm_sched_backend_ops *ops,
767                    unsigned hw_submission,
768                    unsigned hang_limit,
769                    long timeout,
770                    const char *name)
771 {
772         int i, ret;
773         sched->ops = ops;
774         sched->hw_submission_limit = hw_submission;
775         sched->name = name;
776         sched->timeout = timeout;
777         sched->hang_limit = hang_limit;
778         for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
779                 drm_sched_rq_init(sched, &sched->sched_rq[i]);
780
781         init_waitqueue_head(&sched->wake_up_worker);
782         init_waitqueue_head(&sched->job_scheduled);
783         INIT_LIST_HEAD(&sched->ring_mirror_list);
784         spin_lock_init(&sched->job_list_lock);
785         atomic_set(&sched->hw_rq_count, 0);
786         INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
787         atomic_set(&sched->num_jobs, 0);
788         atomic64_set(&sched->job_id_count, 0);
789
790         /* Each scheduler will run on a seperate kernel thread */
791         sched->thread = kthread_run(drm_sched_main, sched, sched->name);
792         if (IS_ERR(sched->thread)) {
793                 ret = PTR_ERR(sched->thread);
794                 sched->thread = NULL;
795                 DRM_ERROR("Failed to create scheduler for %s.\n", name);
796                 return ret;
797         }
798
799         sched->ready = true;
800         return 0;
801 }
802 EXPORT_SYMBOL(drm_sched_init);
803
804 /**
805  * drm_sched_fini - Destroy a gpu scheduler
806  *
807  * @sched: scheduler instance
808  *
809  * Tears down and cleans up the scheduler.
810  */
811 void drm_sched_fini(struct drm_gpu_scheduler *sched)
812 {
813         if (sched->thread)
814                 kthread_stop(sched->thread);
815
816         sched->ready = false;
817 }
818 EXPORT_SYMBOL(drm_sched_fini);