Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drm_vma_manager.h>
29 #include <drm/i915_drm.h>
30 #include <linux/dma-fence-array.h>
31 #include <linux/kthread.h>
32 #include <linux/reservation.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/slab.h>
35 #include <linux/stop_machine.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mman.h>
40
41 #include "display/intel_display.h"
42 #include "display/intel_frontbuffer.h"
43
44 #include "gem/i915_gem_clflush.h"
45 #include "gem/i915_gem_context.h"
46 #include "gem/i915_gem_ioctls.h"
47 #include "gem/i915_gem_pm.h"
48 #include "gem/i915_gemfs.h"
49 #include "gt/intel_gt_pm.h"
50 #include "gt/intel_mocs.h"
51 #include "gt/intel_reset.h"
52 #include "gt/intel_workarounds.h"
53
54 #include "i915_drv.h"
55 #include "i915_scatterlist.h"
56 #include "i915_trace.h"
57 #include "i915_vgpu.h"
58
59 #include "intel_drv.h"
60 #include "intel_pm.h"
61
62 static int
63 insert_mappable_node(struct i915_ggtt *ggtt,
64                      struct drm_mm_node *node, u32 size)
65 {
66         memset(node, 0, sizeof(*node));
67         return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
68                                            size, 0, I915_COLOR_UNEVICTABLE,
69                                            0, ggtt->mappable_end,
70                                            DRM_MM_INSERT_LOW);
71 }
72
73 static void
74 remove_mappable_node(struct drm_mm_node *node)
75 {
76         drm_mm_remove_node(node);
77 }
78
79 int
80 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
81                             struct drm_file *file)
82 {
83         struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
84         struct drm_i915_gem_get_aperture *args = data;
85         struct i915_vma *vma;
86         u64 pinned;
87
88         mutex_lock(&ggtt->vm.mutex);
89
90         pinned = ggtt->vm.reserved;
91         list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
92                 if (i915_vma_is_pinned(vma))
93                         pinned += vma->node.size;
94
95         mutex_unlock(&ggtt->vm.mutex);
96
97         args->aper_size = ggtt->vm.total;
98         args->aper_available_size = args->aper_size - pinned;
99
100         return 0;
101 }
102
103 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
104 {
105         struct i915_vma *vma;
106         LIST_HEAD(still_in_list);
107         int ret = 0;
108
109         lockdep_assert_held(&obj->base.dev->struct_mutex);
110
111         spin_lock(&obj->vma.lock);
112         while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
113                                                        struct i915_vma,
114                                                        obj_link))) {
115                 list_move_tail(&vma->obj_link, &still_in_list);
116                 spin_unlock(&obj->vma.lock);
117
118                 ret = i915_vma_unbind(vma);
119
120                 spin_lock(&obj->vma.lock);
121         }
122         list_splice(&still_in_list, &obj->vma.list);
123         spin_unlock(&obj->vma.lock);
124
125         return ret;
126 }
127
128 static int
129 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
130                      struct drm_i915_gem_pwrite *args,
131                      struct drm_file *file)
132 {
133         void *vaddr = obj->phys_handle->vaddr + args->offset;
134         char __user *user_data = u64_to_user_ptr(args->data_ptr);
135
136         /* We manually control the domain here and pretend that it
137          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
138          */
139         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
140         if (copy_from_user(vaddr, user_data, args->size))
141                 return -EFAULT;
142
143         drm_clflush_virt_range(vaddr, args->size);
144         i915_gem_chipset_flush(to_i915(obj->base.dev));
145
146         intel_fb_obj_flush(obj, ORIGIN_CPU);
147         return 0;
148 }
149
150 static int
151 i915_gem_create(struct drm_file *file,
152                 struct drm_i915_private *dev_priv,
153                 u64 *size_p,
154                 u32 *handle_p)
155 {
156         struct drm_i915_gem_object *obj;
157         u32 handle;
158         u64 size;
159         int ret;
160
161         size = round_up(*size_p, PAGE_SIZE);
162         if (size == 0)
163                 return -EINVAL;
164
165         /* Allocate the new object */
166         obj = i915_gem_object_create_shmem(dev_priv, size);
167         if (IS_ERR(obj))
168                 return PTR_ERR(obj);
169
170         ret = drm_gem_handle_create(file, &obj->base, &handle);
171         /* drop reference from allocate - handle holds it now */
172         i915_gem_object_put(obj);
173         if (ret)
174                 return ret;
175
176         *handle_p = handle;
177         *size_p = size;
178         return 0;
179 }
180
181 int
182 i915_gem_dumb_create(struct drm_file *file,
183                      struct drm_device *dev,
184                      struct drm_mode_create_dumb *args)
185 {
186         int cpp = DIV_ROUND_UP(args->bpp, 8);
187         u32 format;
188
189         switch (cpp) {
190         case 1:
191                 format = DRM_FORMAT_C8;
192                 break;
193         case 2:
194                 format = DRM_FORMAT_RGB565;
195                 break;
196         case 4:
197                 format = DRM_FORMAT_XRGB8888;
198                 break;
199         default:
200                 return -EINVAL;
201         }
202
203         /* have to work out size/pitch and return them */
204         args->pitch = ALIGN(args->width * cpp, 64);
205
206         /* align stride to page size so that we can remap */
207         if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
208                                                     DRM_FORMAT_MOD_LINEAR))
209                 args->pitch = ALIGN(args->pitch, 4096);
210
211         args->size = args->pitch * args->height;
212         return i915_gem_create(file, to_i915(dev),
213                                &args->size, &args->handle);
214 }
215
216 /**
217  * Creates a new mm object and returns a handle to it.
218  * @dev: drm device pointer
219  * @data: ioctl data blob
220  * @file: drm file pointer
221  */
222 int
223 i915_gem_create_ioctl(struct drm_device *dev, void *data,
224                       struct drm_file *file)
225 {
226         struct drm_i915_private *dev_priv = to_i915(dev);
227         struct drm_i915_gem_create *args = data;
228
229         i915_gem_flush_free_objects(dev_priv);
230
231         return i915_gem_create(file, dev_priv,
232                                &args->size, &args->handle);
233 }
234
235 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
236 {
237         intel_wakeref_t wakeref;
238
239         /*
240          * No actual flushing is required for the GTT write domain for reads
241          * from the GTT domain. Writes to it "immediately" go to main memory
242          * as far as we know, so there's no chipset flush. It also doesn't
243          * land in the GPU render cache.
244          *
245          * However, we do have to enforce the order so that all writes through
246          * the GTT land before any writes to the device, such as updates to
247          * the GATT itself.
248          *
249          * We also have to wait a bit for the writes to land from the GTT.
250          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
251          * timing. This issue has only been observed when switching quickly
252          * between GTT writes and CPU reads from inside the kernel on recent hw,
253          * and it appears to only affect discrete GTT blocks (i.e. on LLC
254          * system agents we cannot reproduce this behaviour, until Cannonlake
255          * that was!).
256          */
257
258         wmb();
259
260         if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
261                 return;
262
263         i915_gem_chipset_flush(dev_priv);
264
265         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
266                 struct intel_uncore *uncore = &dev_priv->uncore;
267
268                 spin_lock_irq(&uncore->lock);
269                 intel_uncore_posting_read_fw(uncore,
270                                              RING_HEAD(RENDER_RING_BASE));
271                 spin_unlock_irq(&uncore->lock);
272         }
273 }
274
275 static int
276 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
277             bool needs_clflush)
278 {
279         char *vaddr;
280         int ret;
281
282         vaddr = kmap(page);
283
284         if (needs_clflush)
285                 drm_clflush_virt_range(vaddr + offset, len);
286
287         ret = __copy_to_user(user_data, vaddr + offset, len);
288
289         kunmap(page);
290
291         return ret ? -EFAULT : 0;
292 }
293
294 static int
295 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
296                      struct drm_i915_gem_pread *args)
297 {
298         unsigned int needs_clflush;
299         unsigned int idx, offset;
300         struct dma_fence *fence;
301         char __user *user_data;
302         u64 remain;
303         int ret;
304
305         ret = i915_gem_object_prepare_read(obj, &needs_clflush);
306         if (ret)
307                 return ret;
308
309         fence = i915_gem_object_lock_fence(obj);
310         i915_gem_object_finish_access(obj);
311         if (!fence)
312                 return -ENOMEM;
313
314         remain = args->size;
315         user_data = u64_to_user_ptr(args->data_ptr);
316         offset = offset_in_page(args->offset);
317         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
318                 struct page *page = i915_gem_object_get_page(obj, idx);
319                 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
320
321                 ret = shmem_pread(page, offset, length, user_data,
322                                   needs_clflush);
323                 if (ret)
324                         break;
325
326                 remain -= length;
327                 user_data += length;
328                 offset = 0;
329         }
330
331         i915_gem_object_unlock_fence(obj, fence);
332         return ret;
333 }
334
335 static inline bool
336 gtt_user_read(struct io_mapping *mapping,
337               loff_t base, int offset,
338               char __user *user_data, int length)
339 {
340         void __iomem *vaddr;
341         unsigned long unwritten;
342
343         /* We can use the cpu mem copy function because this is X86. */
344         vaddr = io_mapping_map_atomic_wc(mapping, base);
345         unwritten = __copy_to_user_inatomic(user_data,
346                                             (void __force *)vaddr + offset,
347                                             length);
348         io_mapping_unmap_atomic(vaddr);
349         if (unwritten) {
350                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
351                 unwritten = copy_to_user(user_data,
352                                          (void __force *)vaddr + offset,
353                                          length);
354                 io_mapping_unmap(vaddr);
355         }
356         return unwritten;
357 }
358
359 static int
360 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
361                    const struct drm_i915_gem_pread *args)
362 {
363         struct drm_i915_private *i915 = to_i915(obj->base.dev);
364         struct i915_ggtt *ggtt = &i915->ggtt;
365         intel_wakeref_t wakeref;
366         struct drm_mm_node node;
367         struct dma_fence *fence;
368         void __user *user_data;
369         struct i915_vma *vma;
370         u64 remain, offset;
371         int ret;
372
373         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
374         if (ret)
375                 return ret;
376
377         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
378         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
379                                        PIN_MAPPABLE |
380                                        PIN_NONFAULT |
381                                        PIN_NONBLOCK);
382         if (!IS_ERR(vma)) {
383                 node.start = i915_ggtt_offset(vma);
384                 node.allocated = false;
385                 ret = i915_vma_put_fence(vma);
386                 if (ret) {
387                         i915_vma_unpin(vma);
388                         vma = ERR_PTR(ret);
389                 }
390         }
391         if (IS_ERR(vma)) {
392                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
393                 if (ret)
394                         goto out_unlock;
395                 GEM_BUG_ON(!node.allocated);
396         }
397
398         mutex_unlock(&i915->drm.struct_mutex);
399
400         ret = i915_gem_object_lock_interruptible(obj);
401         if (ret)
402                 goto out_unpin;
403
404         ret = i915_gem_object_set_to_gtt_domain(obj, false);
405         if (ret) {
406                 i915_gem_object_unlock(obj);
407                 goto out_unpin;
408         }
409
410         fence = i915_gem_object_lock_fence(obj);
411         i915_gem_object_unlock(obj);
412         if (!fence) {
413                 ret = -ENOMEM;
414                 goto out_unpin;
415         }
416
417         user_data = u64_to_user_ptr(args->data_ptr);
418         remain = args->size;
419         offset = args->offset;
420
421         while (remain > 0) {
422                 /* Operation in this page
423                  *
424                  * page_base = page offset within aperture
425                  * page_offset = offset within page
426                  * page_length = bytes to copy for this page
427                  */
428                 u32 page_base = node.start;
429                 unsigned page_offset = offset_in_page(offset);
430                 unsigned page_length = PAGE_SIZE - page_offset;
431                 page_length = remain < page_length ? remain : page_length;
432                 if (node.allocated) {
433                         wmb();
434                         ggtt->vm.insert_page(&ggtt->vm,
435                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
436                                              node.start, I915_CACHE_NONE, 0);
437                         wmb();
438                 } else {
439                         page_base += offset & PAGE_MASK;
440                 }
441
442                 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
443                                   user_data, page_length)) {
444                         ret = -EFAULT;
445                         break;
446                 }
447
448                 remain -= page_length;
449                 user_data += page_length;
450                 offset += page_length;
451         }
452
453         i915_gem_object_unlock_fence(obj, fence);
454 out_unpin:
455         mutex_lock(&i915->drm.struct_mutex);
456         if (node.allocated) {
457                 wmb();
458                 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
459                 remove_mappable_node(&node);
460         } else {
461                 i915_vma_unpin(vma);
462         }
463 out_unlock:
464         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
465         mutex_unlock(&i915->drm.struct_mutex);
466
467         return ret;
468 }
469
470 /**
471  * Reads data from the object referenced by handle.
472  * @dev: drm device pointer
473  * @data: ioctl data blob
474  * @file: drm file pointer
475  *
476  * On error, the contents of *data are undefined.
477  */
478 int
479 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
480                      struct drm_file *file)
481 {
482         struct drm_i915_gem_pread *args = data;
483         struct drm_i915_gem_object *obj;
484         int ret;
485
486         if (args->size == 0)
487                 return 0;
488
489         if (!access_ok(u64_to_user_ptr(args->data_ptr),
490                        args->size))
491                 return -EFAULT;
492
493         obj = i915_gem_object_lookup(file, args->handle);
494         if (!obj)
495                 return -ENOENT;
496
497         /* Bounds check source.  */
498         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
499                 ret = -EINVAL;
500                 goto out;
501         }
502
503         trace_i915_gem_object_pread(obj, args->offset, args->size);
504
505         ret = i915_gem_object_wait(obj,
506                                    I915_WAIT_INTERRUPTIBLE,
507                                    MAX_SCHEDULE_TIMEOUT);
508         if (ret)
509                 goto out;
510
511         ret = i915_gem_object_pin_pages(obj);
512         if (ret)
513                 goto out;
514
515         ret = i915_gem_shmem_pread(obj, args);
516         if (ret == -EFAULT || ret == -ENODEV)
517                 ret = i915_gem_gtt_pread(obj, args);
518
519         i915_gem_object_unpin_pages(obj);
520 out:
521         i915_gem_object_put(obj);
522         return ret;
523 }
524
525 /* This is the fast write path which cannot handle
526  * page faults in the source data
527  */
528
529 static inline bool
530 ggtt_write(struct io_mapping *mapping,
531            loff_t base, int offset,
532            char __user *user_data, int length)
533 {
534         void __iomem *vaddr;
535         unsigned long unwritten;
536
537         /* We can use the cpu mem copy function because this is X86. */
538         vaddr = io_mapping_map_atomic_wc(mapping, base);
539         unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
540                                                       user_data, length);
541         io_mapping_unmap_atomic(vaddr);
542         if (unwritten) {
543                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
544                 unwritten = copy_from_user((void __force *)vaddr + offset,
545                                            user_data, length);
546                 io_mapping_unmap(vaddr);
547         }
548
549         return unwritten;
550 }
551
552 /**
553  * This is the fast pwrite path, where we copy the data directly from the
554  * user into the GTT, uncached.
555  * @obj: i915 GEM object
556  * @args: pwrite arguments structure
557  */
558 static int
559 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
560                          const struct drm_i915_gem_pwrite *args)
561 {
562         struct drm_i915_private *i915 = to_i915(obj->base.dev);
563         struct i915_ggtt *ggtt = &i915->ggtt;
564         struct intel_runtime_pm *rpm = &i915->runtime_pm;
565         intel_wakeref_t wakeref;
566         struct drm_mm_node node;
567         struct dma_fence *fence;
568         struct i915_vma *vma;
569         u64 remain, offset;
570         void __user *user_data;
571         int ret;
572
573         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
574         if (ret)
575                 return ret;
576
577         if (i915_gem_object_has_struct_page(obj)) {
578                 /*
579                  * Avoid waking the device up if we can fallback, as
580                  * waking/resuming is very slow (worst-case 10-100 ms
581                  * depending on PCI sleeps and our own resume time).
582                  * This easily dwarfs any performance advantage from
583                  * using the cache bypass of indirect GGTT access.
584                  */
585                 wakeref = intel_runtime_pm_get_if_in_use(rpm);
586                 if (!wakeref) {
587                         ret = -EFAULT;
588                         goto out_unlock;
589                 }
590         } else {
591                 /* No backing pages, no fallback, we must force GGTT access */
592                 wakeref = intel_runtime_pm_get(rpm);
593         }
594
595         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
596                                        PIN_MAPPABLE |
597                                        PIN_NONFAULT |
598                                        PIN_NONBLOCK);
599         if (!IS_ERR(vma)) {
600                 node.start = i915_ggtt_offset(vma);
601                 node.allocated = false;
602                 ret = i915_vma_put_fence(vma);
603                 if (ret) {
604                         i915_vma_unpin(vma);
605                         vma = ERR_PTR(ret);
606                 }
607         }
608         if (IS_ERR(vma)) {
609                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
610                 if (ret)
611                         goto out_rpm;
612                 GEM_BUG_ON(!node.allocated);
613         }
614
615         mutex_unlock(&i915->drm.struct_mutex);
616
617         ret = i915_gem_object_lock_interruptible(obj);
618         if (ret)
619                 goto out_unpin;
620
621         ret = i915_gem_object_set_to_gtt_domain(obj, true);
622         if (ret) {
623                 i915_gem_object_unlock(obj);
624                 goto out_unpin;
625         }
626
627         fence = i915_gem_object_lock_fence(obj);
628         i915_gem_object_unlock(obj);
629         if (!fence) {
630                 ret = -ENOMEM;
631                 goto out_unpin;
632         }
633
634         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
635
636         user_data = u64_to_user_ptr(args->data_ptr);
637         offset = args->offset;
638         remain = args->size;
639         while (remain) {
640                 /* Operation in this page
641                  *
642                  * page_base = page offset within aperture
643                  * page_offset = offset within page
644                  * page_length = bytes to copy for this page
645                  */
646                 u32 page_base = node.start;
647                 unsigned int page_offset = offset_in_page(offset);
648                 unsigned int page_length = PAGE_SIZE - page_offset;
649                 page_length = remain < page_length ? remain : page_length;
650                 if (node.allocated) {
651                         wmb(); /* flush the write before we modify the GGTT */
652                         ggtt->vm.insert_page(&ggtt->vm,
653                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
654                                              node.start, I915_CACHE_NONE, 0);
655                         wmb(); /* flush modifications to the GGTT (insert_page) */
656                 } else {
657                         page_base += offset & PAGE_MASK;
658                 }
659                 /* If we get a fault while copying data, then (presumably) our
660                  * source page isn't available.  Return the error and we'll
661                  * retry in the slow path.
662                  * If the object is non-shmem backed, we retry again with the
663                  * path that handles page fault.
664                  */
665                 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
666                                user_data, page_length)) {
667                         ret = -EFAULT;
668                         break;
669                 }
670
671                 remain -= page_length;
672                 user_data += page_length;
673                 offset += page_length;
674         }
675         intel_fb_obj_flush(obj, ORIGIN_CPU);
676
677         i915_gem_object_unlock_fence(obj, fence);
678 out_unpin:
679         mutex_lock(&i915->drm.struct_mutex);
680         if (node.allocated) {
681                 wmb();
682                 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
683                 remove_mappable_node(&node);
684         } else {
685                 i915_vma_unpin(vma);
686         }
687 out_rpm:
688         intel_runtime_pm_put(rpm, wakeref);
689 out_unlock:
690         mutex_unlock(&i915->drm.struct_mutex);
691         return ret;
692 }
693
694 /* Per-page copy function for the shmem pwrite fastpath.
695  * Flushes invalid cachelines before writing to the target if
696  * needs_clflush_before is set and flushes out any written cachelines after
697  * writing if needs_clflush is set.
698  */
699 static int
700 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
701              bool needs_clflush_before,
702              bool needs_clflush_after)
703 {
704         char *vaddr;
705         int ret;
706
707         vaddr = kmap(page);
708
709         if (needs_clflush_before)
710                 drm_clflush_virt_range(vaddr + offset, len);
711
712         ret = __copy_from_user(vaddr + offset, user_data, len);
713         if (!ret && needs_clflush_after)
714                 drm_clflush_virt_range(vaddr + offset, len);
715
716         kunmap(page);
717
718         return ret ? -EFAULT : 0;
719 }
720
721 static int
722 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
723                       const struct drm_i915_gem_pwrite *args)
724 {
725         unsigned int partial_cacheline_write;
726         unsigned int needs_clflush;
727         unsigned int offset, idx;
728         struct dma_fence *fence;
729         void __user *user_data;
730         u64 remain;
731         int ret;
732
733         ret = i915_gem_object_prepare_write(obj, &needs_clflush);
734         if (ret)
735                 return ret;
736
737         fence = i915_gem_object_lock_fence(obj);
738         i915_gem_object_finish_access(obj);
739         if (!fence)
740                 return -ENOMEM;
741
742         /* If we don't overwrite a cacheline completely we need to be
743          * careful to have up-to-date data by first clflushing. Don't
744          * overcomplicate things and flush the entire patch.
745          */
746         partial_cacheline_write = 0;
747         if (needs_clflush & CLFLUSH_BEFORE)
748                 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
749
750         user_data = u64_to_user_ptr(args->data_ptr);
751         remain = args->size;
752         offset = offset_in_page(args->offset);
753         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
754                 struct page *page = i915_gem_object_get_page(obj, idx);
755                 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
756
757                 ret = shmem_pwrite(page, offset, length, user_data,
758                                    (offset | length) & partial_cacheline_write,
759                                    needs_clflush & CLFLUSH_AFTER);
760                 if (ret)
761                         break;
762
763                 remain -= length;
764                 user_data += length;
765                 offset = 0;
766         }
767
768         intel_fb_obj_flush(obj, ORIGIN_CPU);
769         i915_gem_object_unlock_fence(obj, fence);
770
771         return ret;
772 }
773
774 /**
775  * Writes data to the object referenced by handle.
776  * @dev: drm device
777  * @data: ioctl data blob
778  * @file: drm file
779  *
780  * On error, the contents of the buffer that were to be modified are undefined.
781  */
782 int
783 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
784                       struct drm_file *file)
785 {
786         struct drm_i915_gem_pwrite *args = data;
787         struct drm_i915_gem_object *obj;
788         int ret;
789
790         if (args->size == 0)
791                 return 0;
792
793         if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
794                 return -EFAULT;
795
796         obj = i915_gem_object_lookup(file, args->handle);
797         if (!obj)
798                 return -ENOENT;
799
800         /* Bounds check destination. */
801         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
802                 ret = -EINVAL;
803                 goto err;
804         }
805
806         /* Writes not allowed into this read-only object */
807         if (i915_gem_object_is_readonly(obj)) {
808                 ret = -EINVAL;
809                 goto err;
810         }
811
812         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
813
814         ret = -ENODEV;
815         if (obj->ops->pwrite)
816                 ret = obj->ops->pwrite(obj, args);
817         if (ret != -ENODEV)
818                 goto err;
819
820         ret = i915_gem_object_wait(obj,
821                                    I915_WAIT_INTERRUPTIBLE |
822                                    I915_WAIT_ALL,
823                                    MAX_SCHEDULE_TIMEOUT);
824         if (ret)
825                 goto err;
826
827         ret = i915_gem_object_pin_pages(obj);
828         if (ret)
829                 goto err;
830
831         ret = -EFAULT;
832         /* We can only do the GTT pwrite on untiled buffers, as otherwise
833          * it would end up going through the fenced access, and we'll get
834          * different detiling behavior between reading and writing.
835          * pread/pwrite currently are reading and writing from the CPU
836          * perspective, requiring manual detiling by the client.
837          */
838         if (!i915_gem_object_has_struct_page(obj) ||
839             cpu_write_needs_clflush(obj))
840                 /* Note that the gtt paths might fail with non-page-backed user
841                  * pointers (e.g. gtt mappings when moving data between
842                  * textures). Fallback to the shmem path in that case.
843                  */
844                 ret = i915_gem_gtt_pwrite_fast(obj, args);
845
846         if (ret == -EFAULT || ret == -ENOSPC) {
847                 if (obj->phys_handle)
848                         ret = i915_gem_phys_pwrite(obj, args, file);
849                 else
850                         ret = i915_gem_shmem_pwrite(obj, args);
851         }
852
853         i915_gem_object_unpin_pages(obj);
854 err:
855         i915_gem_object_put(obj);
856         return ret;
857 }
858
859 /**
860  * Called when user space has done writes to this buffer
861  * @dev: drm device
862  * @data: ioctl data blob
863  * @file: drm file
864  */
865 int
866 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
867                          struct drm_file *file)
868 {
869         struct drm_i915_gem_sw_finish *args = data;
870         struct drm_i915_gem_object *obj;
871
872         obj = i915_gem_object_lookup(file, args->handle);
873         if (!obj)
874                 return -ENOENT;
875
876         /*
877          * Proxy objects are barred from CPU access, so there is no
878          * need to ban sw_finish as it is a nop.
879          */
880
881         /* Pinned buffers may be scanout, so flush the cache */
882         i915_gem_object_flush_if_display(obj);
883         i915_gem_object_put(obj);
884
885         return 0;
886 }
887
888 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
889 {
890         struct drm_i915_gem_object *obj, *on;
891         int i;
892
893         /*
894          * Only called during RPM suspend. All users of the userfault_list
895          * must be holding an RPM wakeref to ensure that this can not
896          * run concurrently with themselves (and use the struct_mutex for
897          * protection between themselves).
898          */
899
900         list_for_each_entry_safe(obj, on,
901                                  &i915->ggtt.userfault_list, userfault_link)
902                 __i915_gem_object_release_mmap(obj);
903
904         /*
905          * The fence will be lost when the device powers down. If any were
906          * in use by hardware (i.e. they are pinned), we should not be powering
907          * down! All other fences will be reacquired by the user upon waking.
908          */
909         for (i = 0; i < i915->ggtt.num_fences; i++) {
910                 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
911
912                 /*
913                  * Ideally we want to assert that the fence register is not
914                  * live at this point (i.e. that no piece of code will be
915                  * trying to write through fence + GTT, as that both violates
916                  * our tracking of activity and associated locking/barriers,
917                  * but also is illegal given that the hw is powered down).
918                  *
919                  * Previously we used reg->pin_count as a "liveness" indicator.
920                  * That is not sufficient, and we need a more fine-grained
921                  * tool if we want to have a sanity check here.
922                  */
923
924                 if (!reg->vma)
925                         continue;
926
927                 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
928                 reg->dirty = true;
929         }
930 }
931
932 static int wait_for_engines(struct drm_i915_private *i915)
933 {
934         if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
935                 dev_err(i915->drm.dev,
936                         "Failed to idle engines, declaring wedged!\n");
937                 GEM_TRACE_DUMP();
938                 i915_gem_set_wedged(i915);
939                 return -EIO;
940         }
941
942         return 0;
943 }
944
945 static long
946 wait_for_timelines(struct drm_i915_private *i915,
947                    unsigned int flags, long timeout)
948 {
949         struct i915_gt_timelines *gt = &i915->gt.timelines;
950         struct i915_timeline *tl;
951
952         mutex_lock(&gt->mutex);
953         list_for_each_entry(tl, &gt->active_list, link) {
954                 struct i915_request *rq;
955
956                 rq = i915_active_request_get_unlocked(&tl->last_request);
957                 if (!rq)
958                         continue;
959
960                 mutex_unlock(&gt->mutex);
961
962                 /*
963                  * "Race-to-idle".
964                  *
965                  * Switching to the kernel context is often used a synchronous
966                  * step prior to idling, e.g. in suspend for flushing all
967                  * current operations to memory before sleeping. These we
968                  * want to complete as quickly as possible to avoid prolonged
969                  * stalls, so allow the gpu to boost to maximum clocks.
970                  */
971                 if (flags & I915_WAIT_FOR_IDLE_BOOST)
972                         gen6_rps_boost(rq);
973
974                 timeout = i915_request_wait(rq, flags, timeout);
975                 i915_request_put(rq);
976                 if (timeout < 0)
977                         return timeout;
978
979                 /* restart after reacquiring the lock */
980                 mutex_lock(&gt->mutex);
981                 tl = list_entry(&gt->active_list, typeof(*tl), link);
982         }
983         mutex_unlock(&gt->mutex);
984
985         return timeout;
986 }
987
988 int i915_gem_wait_for_idle(struct drm_i915_private *i915,
989                            unsigned int flags, long timeout)
990 {
991         GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
992                   flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
993                   timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
994                   yesno(i915->gt.awake));
995
996         /* If the device is asleep, we have no requests outstanding */
997         if (!READ_ONCE(i915->gt.awake))
998                 return 0;
999
1000         timeout = wait_for_timelines(i915, flags, timeout);
1001         if (timeout < 0)
1002                 return timeout;
1003
1004         if (flags & I915_WAIT_LOCKED) {
1005                 int err;
1006
1007                 lockdep_assert_held(&i915->drm.struct_mutex);
1008
1009                 err = wait_for_engines(i915);
1010                 if (err)
1011                         return err;
1012
1013                 i915_retire_requests(i915);
1014         }
1015
1016         return 0;
1017 }
1018
1019 struct i915_vma *
1020 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1021                          const struct i915_ggtt_view *view,
1022                          u64 size,
1023                          u64 alignment,
1024                          u64 flags)
1025 {
1026         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1027         struct i915_address_space *vm = &dev_priv->ggtt.vm;
1028
1029         return i915_gem_object_pin(obj, vm, view, size, alignment,
1030                                    flags | PIN_GLOBAL);
1031 }
1032
1033 struct i915_vma *
1034 i915_gem_object_pin(struct drm_i915_gem_object *obj,
1035                     struct i915_address_space *vm,
1036                     const struct i915_ggtt_view *view,
1037                     u64 size,
1038                     u64 alignment,
1039                     u64 flags)
1040 {
1041         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1042         struct i915_vma *vma;
1043         int ret;
1044
1045         lockdep_assert_held(&obj->base.dev->struct_mutex);
1046
1047         if (i915_gem_object_never_bind_ggtt(obj))
1048                 return ERR_PTR(-ENODEV);
1049
1050         if (flags & PIN_MAPPABLE &&
1051             (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
1052                 /* If the required space is larger than the available
1053                  * aperture, we will not able to find a slot for the
1054                  * object and unbinding the object now will be in
1055                  * vain. Worse, doing so may cause us to ping-pong
1056                  * the object in and out of the Global GTT and
1057                  * waste a lot of cycles under the mutex.
1058                  */
1059                 if (obj->base.size > dev_priv->ggtt.mappable_end)
1060                         return ERR_PTR(-E2BIG);
1061
1062                 /* If NONBLOCK is set the caller is optimistically
1063                  * trying to cache the full object within the mappable
1064                  * aperture, and *must* have a fallback in place for
1065                  * situations where we cannot bind the object. We
1066                  * can be a little more lax here and use the fallback
1067                  * more often to avoid costly migrations of ourselves
1068                  * and other objects within the aperture.
1069                  *
1070                  * Half-the-aperture is used as a simple heuristic.
1071                  * More interesting would to do search for a free
1072                  * block prior to making the commitment to unbind.
1073                  * That caters for the self-harm case, and with a
1074                  * little more heuristics (e.g. NOFAULT, NOEVICT)
1075                  * we could try to minimise harm to others.
1076                  */
1077                 if (flags & PIN_NONBLOCK &&
1078                     obj->base.size > dev_priv->ggtt.mappable_end / 2)
1079                         return ERR_PTR(-ENOSPC);
1080         }
1081
1082         vma = i915_vma_instance(obj, vm, view);
1083         if (IS_ERR(vma))
1084                 return vma;
1085
1086         if (i915_vma_misplaced(vma, size, alignment, flags)) {
1087                 if (flags & PIN_NONBLOCK) {
1088                         if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1089                                 return ERR_PTR(-ENOSPC);
1090
1091                         if (flags & PIN_MAPPABLE &&
1092                             vma->fence_size > dev_priv->ggtt.mappable_end / 2)
1093                                 return ERR_PTR(-ENOSPC);
1094                 }
1095
1096                 WARN(i915_vma_is_pinned(vma),
1097                      "bo is already pinned in ggtt with incorrect alignment:"
1098                      " offset=%08x, req.alignment=%llx,"
1099                      " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
1100                      i915_ggtt_offset(vma), alignment,
1101                      !!(flags & PIN_MAPPABLE),
1102                      i915_vma_is_map_and_fenceable(vma));
1103                 ret = i915_vma_unbind(vma);
1104                 if (ret)
1105                         return ERR_PTR(ret);
1106         }
1107
1108         ret = i915_vma_pin(vma, size, alignment, flags);
1109         if (ret)
1110                 return ERR_PTR(ret);
1111
1112         return vma;
1113 }
1114
1115 int
1116 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1117                        struct drm_file *file_priv)
1118 {
1119         struct drm_i915_private *i915 = to_i915(dev);
1120         struct drm_i915_gem_madvise *args = data;
1121         struct drm_i915_gem_object *obj;
1122         int err;
1123
1124         switch (args->madv) {
1125         case I915_MADV_DONTNEED:
1126         case I915_MADV_WILLNEED:
1127             break;
1128         default:
1129             return -EINVAL;
1130         }
1131
1132         obj = i915_gem_object_lookup(file_priv, args->handle);
1133         if (!obj)
1134                 return -ENOENT;
1135
1136         err = mutex_lock_interruptible(&obj->mm.lock);
1137         if (err)
1138                 goto out;
1139
1140         if (i915_gem_object_has_pages(obj) &&
1141             i915_gem_object_is_tiled(obj) &&
1142             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1143                 if (obj->mm.madv == I915_MADV_WILLNEED) {
1144                         GEM_BUG_ON(!obj->mm.quirked);
1145                         __i915_gem_object_unpin_pages(obj);
1146                         obj->mm.quirked = false;
1147                 }
1148                 if (args->madv == I915_MADV_WILLNEED) {
1149                         GEM_BUG_ON(obj->mm.quirked);
1150                         __i915_gem_object_pin_pages(obj);
1151                         obj->mm.quirked = true;
1152                 }
1153         }
1154
1155         if (obj->mm.madv != __I915_MADV_PURGED)
1156                 obj->mm.madv = args->madv;
1157
1158         if (i915_gem_object_has_pages(obj)) {
1159                 struct list_head *list;
1160
1161                 if (i915_gem_object_is_shrinkable(obj)) {
1162                         unsigned long flags;
1163
1164                         spin_lock_irqsave(&i915->mm.obj_lock, flags);
1165
1166                         if (obj->mm.madv != I915_MADV_WILLNEED)
1167                                 list = &i915->mm.purge_list;
1168                         else
1169                                 list = &i915->mm.shrink_list;
1170                         list_move_tail(&obj->mm.link, list);
1171
1172                         spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1173                 }
1174         }
1175
1176         /* if the object is no longer attached, discard its backing storage */
1177         if (obj->mm.madv == I915_MADV_DONTNEED &&
1178             !i915_gem_object_has_pages(obj))
1179                 i915_gem_object_truncate(obj);
1180
1181         args->retained = obj->mm.madv != __I915_MADV_PURGED;
1182         mutex_unlock(&obj->mm.lock);
1183
1184 out:
1185         i915_gem_object_put(obj);
1186         return err;
1187 }
1188
1189 void i915_gem_sanitize(struct drm_i915_private *i915)
1190 {
1191         intel_wakeref_t wakeref;
1192
1193         GEM_TRACE("\n");
1194
1195         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1196         intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
1197
1198         /*
1199          * As we have just resumed the machine and woken the device up from
1200          * deep PCI sleep (presumably D3_cold), assume the HW has been reset
1201          * back to defaults, recovering from whatever wedged state we left it
1202          * in and so worth trying to use the device once more.
1203          */
1204         if (i915_terminally_wedged(i915))
1205                 i915_gem_unset_wedged(i915);
1206
1207         /*
1208          * If we inherit context state from the BIOS or earlier occupants
1209          * of the GPU, the GPU may be in an inconsistent state when we
1210          * try to take over. The only way to remove the earlier state
1211          * is by resetting. However, resetting on earlier gen is tricky as
1212          * it may impact the display and we are uncertain about the stability
1213          * of the reset, so this could be applied to even earlier gen.
1214          */
1215         intel_gt_sanitize(i915, false);
1216
1217         intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
1218         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1219 }
1220
1221 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
1222 {
1223         if (INTEL_GEN(dev_priv) < 5 ||
1224             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
1225                 return;
1226
1227         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
1228                                  DISP_TILE_SURFACE_SWIZZLING);
1229
1230         if (IS_GEN(dev_priv, 5))
1231                 return;
1232
1233         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
1234         if (IS_GEN(dev_priv, 6))
1235                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
1236         else if (IS_GEN(dev_priv, 7))
1237                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
1238         else if (IS_GEN(dev_priv, 8))
1239                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
1240         else
1241                 BUG();
1242 }
1243
1244 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
1245 {
1246         I915_WRITE(RING_CTL(base), 0);
1247         I915_WRITE(RING_HEAD(base), 0);
1248         I915_WRITE(RING_TAIL(base), 0);
1249         I915_WRITE(RING_START(base), 0);
1250 }
1251
1252 static void init_unused_rings(struct drm_i915_private *dev_priv)
1253 {
1254         if (IS_I830(dev_priv)) {
1255                 init_unused_ring(dev_priv, PRB1_BASE);
1256                 init_unused_ring(dev_priv, SRB0_BASE);
1257                 init_unused_ring(dev_priv, SRB1_BASE);
1258                 init_unused_ring(dev_priv, SRB2_BASE);
1259                 init_unused_ring(dev_priv, SRB3_BASE);
1260         } else if (IS_GEN(dev_priv, 2)) {
1261                 init_unused_ring(dev_priv, SRB0_BASE);
1262                 init_unused_ring(dev_priv, SRB1_BASE);
1263         } else if (IS_GEN(dev_priv, 3)) {
1264                 init_unused_ring(dev_priv, PRB1_BASE);
1265                 init_unused_ring(dev_priv, PRB2_BASE);
1266         }
1267 }
1268
1269 int i915_gem_init_hw(struct drm_i915_private *dev_priv)
1270 {
1271         int ret;
1272
1273         dev_priv->gt.last_init_time = ktime_get();
1274
1275         /* Double layer security blanket, see i915_gem_init() */
1276         intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1277
1278         if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
1279                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
1280
1281         if (IS_HASWELL(dev_priv))
1282                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
1283                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
1284
1285         /* Apply the GT workarounds... */
1286         intel_gt_apply_workarounds(dev_priv);
1287         /* ...and determine whether they are sticking. */
1288         intel_gt_verify_workarounds(dev_priv, "init");
1289
1290         i915_gem_init_swizzling(dev_priv);
1291
1292         /*
1293          * At least 830 can leave some of the unused rings
1294          * "active" (ie. head != tail) after resume which
1295          * will prevent c3 entry. Makes sure all unused rings
1296          * are totally idle.
1297          */
1298         init_unused_rings(dev_priv);
1299
1300         BUG_ON(!dev_priv->kernel_context);
1301         ret = i915_terminally_wedged(dev_priv);
1302         if (ret)
1303                 goto out;
1304
1305         ret = i915_ppgtt_init_hw(dev_priv);
1306         if (ret) {
1307                 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
1308                 goto out;
1309         }
1310
1311         ret = intel_wopcm_init_hw(&dev_priv->wopcm);
1312         if (ret) {
1313                 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
1314                 goto out;
1315         }
1316
1317         /* We can't enable contexts until all firmware is loaded */
1318         ret = intel_uc_init_hw(dev_priv);
1319         if (ret) {
1320                 DRM_ERROR("Enabling uc failed (%d)\n", ret);
1321                 goto out;
1322         }
1323
1324         intel_mocs_init_l3cc_table(dev_priv);
1325
1326         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1327
1328         intel_engines_set_scheduler_caps(dev_priv);
1329         return 0;
1330
1331 out:
1332         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1333         return ret;
1334 }
1335
1336 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
1337 {
1338         struct intel_engine_cs *engine;
1339         struct i915_gem_context *ctx;
1340         struct i915_gem_engines *e;
1341         enum intel_engine_id id;
1342         int err = 0;
1343
1344         /*
1345          * As we reset the gpu during very early sanitisation, the current
1346          * register state on the GPU should reflect its defaults values.
1347          * We load a context onto the hw (with restore-inhibit), then switch
1348          * over to a second context to save that default register state. We
1349          * can then prime every new context with that state so they all start
1350          * from the same default HW values.
1351          */
1352
1353         ctx = i915_gem_context_create_kernel(i915, 0);
1354         if (IS_ERR(ctx))
1355                 return PTR_ERR(ctx);
1356
1357         e = i915_gem_context_lock_engines(ctx);
1358
1359         for_each_engine(engine, i915, id) {
1360                 struct intel_context *ce = e->engines[id];
1361                 struct i915_request *rq;
1362
1363                 rq = intel_context_create_request(ce);
1364                 if (IS_ERR(rq)) {
1365                         err = PTR_ERR(rq);
1366                         goto err_active;
1367                 }
1368
1369                 err = 0;
1370                 if (rq->engine->init_context)
1371                         err = rq->engine->init_context(rq);
1372
1373                 i915_request_add(rq);
1374                 if (err)
1375                         goto err_active;
1376         }
1377
1378         /* Flush the default context image to memory, and enable powersaving. */
1379         if (!i915_gem_load_power_context(i915)) {
1380                 err = -EIO;
1381                 goto err_active;
1382         }
1383
1384         for_each_engine(engine, i915, id) {
1385                 struct intel_context *ce = e->engines[id];
1386                 struct i915_vma *state = ce->state;
1387                 void *vaddr;
1388
1389                 if (!state)
1390                         continue;
1391
1392                 GEM_BUG_ON(intel_context_is_pinned(ce));
1393
1394                 /*
1395                  * As we will hold a reference to the logical state, it will
1396                  * not be torn down with the context, and importantly the
1397                  * object will hold onto its vma (making it possible for a
1398                  * stray GTT write to corrupt our defaults). Unmap the vma
1399                  * from the GTT to prevent such accidents and reclaim the
1400                  * space.
1401                  */
1402                 err = i915_vma_unbind(state);
1403                 if (err)
1404                         goto err_active;
1405
1406                 i915_gem_object_lock(state->obj);
1407                 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
1408                 i915_gem_object_unlock(state->obj);
1409                 if (err)
1410                         goto err_active;
1411
1412                 engine->default_state = i915_gem_object_get(state->obj);
1413                 i915_gem_object_set_cache_coherency(engine->default_state,
1414                                                     I915_CACHE_LLC);
1415
1416                 /* Check we can acquire the image of the context state */
1417                 vaddr = i915_gem_object_pin_map(engine->default_state,
1418                                                 I915_MAP_FORCE_WB);
1419                 if (IS_ERR(vaddr)) {
1420                         err = PTR_ERR(vaddr);
1421                         goto err_active;
1422                 }
1423
1424                 i915_gem_object_unpin_map(engine->default_state);
1425         }
1426
1427         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1428                 unsigned int found = intel_engines_has_context_isolation(i915);
1429
1430                 /*
1431                  * Make sure that classes with multiple engine instances all
1432                  * share the same basic configuration.
1433                  */
1434                 for_each_engine(engine, i915, id) {
1435                         unsigned int bit = BIT(engine->uabi_class);
1436                         unsigned int expected = engine->default_state ? bit : 0;
1437
1438                         if ((found & bit) != expected) {
1439                                 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
1440                                           engine->uabi_class, engine->name);
1441                         }
1442                 }
1443         }
1444
1445 out_ctx:
1446         i915_gem_context_unlock_engines(ctx);
1447         i915_gem_context_set_closed(ctx);
1448         i915_gem_context_put(ctx);
1449         return err;
1450
1451 err_active:
1452         /*
1453          * If we have to abandon now, we expect the engines to be idle
1454          * and ready to be torn-down. The quickest way we can accomplish
1455          * this is by declaring ourselves wedged.
1456          */
1457         i915_gem_set_wedged(i915);
1458         goto out_ctx;
1459 }
1460
1461 static int
1462 i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
1463 {
1464         struct drm_i915_gem_object *obj;
1465         struct i915_vma *vma;
1466         int ret;
1467
1468         obj = i915_gem_object_create_stolen(i915, size);
1469         if (!obj)
1470                 obj = i915_gem_object_create_internal(i915, size);
1471         if (IS_ERR(obj)) {
1472                 DRM_ERROR("Failed to allocate scratch page\n");
1473                 return PTR_ERR(obj);
1474         }
1475
1476         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1477         if (IS_ERR(vma)) {
1478                 ret = PTR_ERR(vma);
1479                 goto err_unref;
1480         }
1481
1482         ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1483         if (ret)
1484                 goto err_unref;
1485
1486         i915->gt.scratch = vma;
1487         return 0;
1488
1489 err_unref:
1490         i915_gem_object_put(obj);
1491         return ret;
1492 }
1493
1494 static void i915_gem_fini_scratch(struct drm_i915_private *i915)
1495 {
1496         i915_vma_unpin_and_release(&i915->gt.scratch, 0);
1497 }
1498
1499 static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
1500 {
1501         struct intel_engine_cs *engine;
1502         enum intel_engine_id id;
1503         int err = 0;
1504
1505         if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1506                 return 0;
1507
1508         for_each_engine(engine, i915, id) {
1509                 if (intel_engine_verify_workarounds(engine, "load"))
1510                         err = -EIO;
1511         }
1512
1513         return err;
1514 }
1515
1516 int i915_gem_init(struct drm_i915_private *dev_priv)
1517 {
1518         int ret;
1519
1520         /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1521         if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1522                 mkwrite_device_info(dev_priv)->page_sizes =
1523                         I915_GTT_PAGE_SIZE_4K;
1524
1525         dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
1526
1527         i915_timelines_init(dev_priv);
1528
1529         ret = i915_gem_init_userptr(dev_priv);
1530         if (ret)
1531                 return ret;
1532
1533         ret = intel_uc_init_misc(dev_priv);
1534         if (ret)
1535                 return ret;
1536
1537         ret = intel_wopcm_init(&dev_priv->wopcm);
1538         if (ret)
1539                 goto err_uc_misc;
1540
1541         /* This is just a security blanket to placate dragons.
1542          * On some systems, we very sporadically observe that the first TLBs
1543          * used by the CS may be stale, despite us poking the TLB reset. If
1544          * we hold the forcewake during initialisation these problems
1545          * just magically go away.
1546          */
1547         mutex_lock(&dev_priv->drm.struct_mutex);
1548         intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1549
1550         ret = i915_gem_init_ggtt(dev_priv);
1551         if (ret) {
1552                 GEM_BUG_ON(ret == -EIO);
1553                 goto err_unlock;
1554         }
1555
1556         ret = i915_gem_init_scratch(dev_priv,
1557                                     IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
1558         if (ret) {
1559                 GEM_BUG_ON(ret == -EIO);
1560                 goto err_ggtt;
1561         }
1562
1563         ret = intel_engines_setup(dev_priv);
1564         if (ret) {
1565                 GEM_BUG_ON(ret == -EIO);
1566                 goto err_unlock;
1567         }
1568
1569         ret = i915_gem_contexts_init(dev_priv);
1570         if (ret) {
1571                 GEM_BUG_ON(ret == -EIO);
1572                 goto err_scratch;
1573         }
1574
1575         ret = intel_engines_init(dev_priv);
1576         if (ret) {
1577                 GEM_BUG_ON(ret == -EIO);
1578                 goto err_context;
1579         }
1580
1581         intel_init_gt_powersave(dev_priv);
1582
1583         ret = intel_uc_init(dev_priv);
1584         if (ret)
1585                 goto err_pm;
1586
1587         ret = i915_gem_init_hw(dev_priv);
1588         if (ret)
1589                 goto err_uc_init;
1590
1591         /* Only when the HW is re-initialised, can we replay the requests */
1592         ret = intel_gt_resume(dev_priv);
1593         if (ret)
1594                 goto err_init_hw;
1595
1596         /*
1597          * Despite its name intel_init_clock_gating applies both display
1598          * clock gating workarounds; GT mmio workarounds and the occasional
1599          * GT power context workaround. Worse, sometimes it includes a context
1600          * register workaround which we need to apply before we record the
1601          * default HW state for all contexts.
1602          *
1603          * FIXME: break up the workarounds and apply them at the right time!
1604          */
1605         intel_init_clock_gating(dev_priv);
1606
1607         ret = intel_engines_verify_workarounds(dev_priv);
1608         if (ret)
1609                 goto err_gt;
1610
1611         ret = __intel_engines_record_defaults(dev_priv);
1612         if (ret)
1613                 goto err_gt;
1614
1615         if (i915_inject_load_failure()) {
1616                 ret = -ENODEV;
1617                 goto err_gt;
1618         }
1619
1620         if (i915_inject_load_failure()) {
1621                 ret = -EIO;
1622                 goto err_gt;
1623         }
1624
1625         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1626         mutex_unlock(&dev_priv->drm.struct_mutex);
1627
1628         return 0;
1629
1630         /*
1631          * Unwinding is complicated by that we want to handle -EIO to mean
1632          * disable GPU submission but keep KMS alive. We want to mark the
1633          * HW as irrevisibly wedged, but keep enough state around that the
1634          * driver doesn't explode during runtime.
1635          */
1636 err_gt:
1637         mutex_unlock(&dev_priv->drm.struct_mutex);
1638
1639         i915_gem_set_wedged(dev_priv);
1640         i915_gem_suspend(dev_priv);
1641         i915_gem_suspend_late(dev_priv);
1642
1643         i915_gem_drain_workqueue(dev_priv);
1644
1645         mutex_lock(&dev_priv->drm.struct_mutex);
1646 err_init_hw:
1647         intel_uc_fini_hw(dev_priv);
1648 err_uc_init:
1649         intel_uc_fini(dev_priv);
1650 err_pm:
1651         if (ret != -EIO) {
1652                 intel_cleanup_gt_powersave(dev_priv);
1653                 intel_engines_cleanup(dev_priv);
1654         }
1655 err_context:
1656         if (ret != -EIO)
1657                 i915_gem_contexts_fini(dev_priv);
1658 err_scratch:
1659         i915_gem_fini_scratch(dev_priv);
1660 err_ggtt:
1661 err_unlock:
1662         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1663         mutex_unlock(&dev_priv->drm.struct_mutex);
1664
1665 err_uc_misc:
1666         intel_uc_fini_misc(dev_priv);
1667
1668         if (ret != -EIO) {
1669                 i915_gem_cleanup_userptr(dev_priv);
1670                 i915_timelines_fini(dev_priv);
1671         }
1672
1673         if (ret == -EIO) {
1674                 mutex_lock(&dev_priv->drm.struct_mutex);
1675
1676                 /*
1677                  * Allow engine initialisation to fail by marking the GPU as
1678                  * wedged. But we only want to do this where the GPU is angry,
1679                  * for all other failure, such as an allocation failure, bail.
1680                  */
1681                 if (!i915_reset_failed(dev_priv)) {
1682                         i915_load_error(dev_priv,
1683                                         "Failed to initialize GPU, declaring it wedged!\n");
1684                         i915_gem_set_wedged(dev_priv);
1685                 }
1686
1687                 /* Minimal basic recovery for KMS */
1688                 ret = i915_ggtt_enable_hw(dev_priv);
1689                 i915_gem_restore_gtt_mappings(dev_priv);
1690                 i915_gem_restore_fences(dev_priv);
1691                 intel_init_clock_gating(dev_priv);
1692
1693                 mutex_unlock(&dev_priv->drm.struct_mutex);
1694         }
1695
1696         i915_gem_drain_freed_objects(dev_priv);
1697         return ret;
1698 }
1699
1700 void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
1701 {
1702         GEM_BUG_ON(dev_priv->gt.awake);
1703
1704         intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1705
1706         i915_gem_suspend_late(dev_priv);
1707         intel_disable_gt_powersave(dev_priv);
1708
1709         /* Flush any outstanding unpin_work. */
1710         i915_gem_drain_workqueue(dev_priv);
1711
1712         mutex_lock(&dev_priv->drm.struct_mutex);
1713         intel_uc_fini_hw(dev_priv);
1714         intel_uc_fini(dev_priv);
1715         mutex_unlock(&dev_priv->drm.struct_mutex);
1716
1717         i915_gem_drain_freed_objects(dev_priv);
1718 }
1719
1720 void i915_gem_fini(struct drm_i915_private *dev_priv)
1721 {
1722         mutex_lock(&dev_priv->drm.struct_mutex);
1723         intel_engines_cleanup(dev_priv);
1724         i915_gem_contexts_fini(dev_priv);
1725         i915_gem_fini_scratch(dev_priv);
1726         mutex_unlock(&dev_priv->drm.struct_mutex);
1727
1728         intel_wa_list_free(&dev_priv->gt_wa_list);
1729
1730         intel_cleanup_gt_powersave(dev_priv);
1731
1732         intel_uc_fini_misc(dev_priv);
1733         i915_gem_cleanup_userptr(dev_priv);
1734         i915_timelines_fini(dev_priv);
1735
1736         i915_gem_drain_freed_objects(dev_priv);
1737
1738         WARN_ON(!list_empty(&dev_priv->contexts.list));
1739 }
1740
1741 void i915_gem_init_mmio(struct drm_i915_private *i915)
1742 {
1743         i915_gem_sanitize(i915);
1744 }
1745
1746 static void i915_gem_init__mm(struct drm_i915_private *i915)
1747 {
1748         spin_lock_init(&i915->mm.obj_lock);
1749         spin_lock_init(&i915->mm.free_lock);
1750
1751         init_llist_head(&i915->mm.free_list);
1752
1753         INIT_LIST_HEAD(&i915->mm.purge_list);
1754         INIT_LIST_HEAD(&i915->mm.shrink_list);
1755
1756         i915_gem_init__objects(i915);
1757 }
1758
1759 int i915_gem_init_early(struct drm_i915_private *dev_priv)
1760 {
1761         int err;
1762
1763         intel_gt_pm_init(dev_priv);
1764
1765         INIT_LIST_HEAD(&dev_priv->gt.active_rings);
1766         INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
1767         spin_lock_init(&dev_priv->gt.closed_lock);
1768
1769         i915_gem_init__mm(dev_priv);
1770         i915_gem_init__pm(dev_priv);
1771
1772         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
1773         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
1774         mutex_init(&dev_priv->gpu_error.wedge_mutex);
1775         init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1776
1777         atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
1778
1779         spin_lock_init(&dev_priv->fb_tracking.lock);
1780
1781         err = i915_gemfs_init(dev_priv);
1782         if (err)
1783                 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
1784
1785         return 0;
1786 }
1787
1788 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1789 {
1790         i915_gem_drain_freed_objects(dev_priv);
1791         GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1792         GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1793         WARN_ON(dev_priv->mm.shrink_count);
1794
1795         cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1796
1797         i915_gemfs_fini(dev_priv);
1798 }
1799
1800 int i915_gem_freeze(struct drm_i915_private *dev_priv)
1801 {
1802         /* Discard all purgeable objects, let userspace recover those as
1803          * required after resuming.
1804          */
1805         i915_gem_shrink_all(dev_priv);
1806
1807         return 0;
1808 }
1809
1810 int i915_gem_freeze_late(struct drm_i915_private *i915)
1811 {
1812         struct drm_i915_gem_object *obj;
1813         intel_wakeref_t wakeref;
1814
1815         /*
1816          * Called just before we write the hibernation image.
1817          *
1818          * We need to update the domain tracking to reflect that the CPU
1819          * will be accessing all the pages to create and restore from the
1820          * hibernation, and so upon restoration those pages will be in the
1821          * CPU domain.
1822          *
1823          * To make sure the hibernation image contains the latest state,
1824          * we update that state just before writing out the image.
1825          *
1826          * To try and reduce the hibernation image, we manually shrink
1827          * the objects as well, see i915_gem_freeze()
1828          */
1829
1830         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1831
1832         i915_gem_shrink(i915, -1UL, NULL, ~0);
1833         i915_gem_drain_freed_objects(i915);
1834
1835         list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1836                 i915_gem_object_lock(obj);
1837                 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
1838                 i915_gem_object_unlock(obj);
1839         }
1840
1841         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1842
1843         return 0;
1844 }
1845
1846 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
1847 {
1848         struct drm_i915_file_private *file_priv = file->driver_priv;
1849         struct i915_request *request;
1850
1851         /* Clean up our request list when the client is going away, so that
1852          * later retire_requests won't dereference our soon-to-be-gone
1853          * file_priv.
1854          */
1855         spin_lock(&file_priv->mm.lock);
1856         list_for_each_entry(request, &file_priv->mm.request_list, client_link)
1857                 request->file_priv = NULL;
1858         spin_unlock(&file_priv->mm.lock);
1859 }
1860
1861 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1862 {
1863         struct drm_i915_file_private *file_priv;
1864         int ret;
1865
1866         DRM_DEBUG("\n");
1867
1868         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1869         if (!file_priv)
1870                 return -ENOMEM;
1871
1872         file->driver_priv = file_priv;
1873         file_priv->dev_priv = i915;
1874         file_priv->file = file;
1875
1876         spin_lock_init(&file_priv->mm.lock);
1877         INIT_LIST_HEAD(&file_priv->mm.request_list);
1878
1879         file_priv->bsd_engine = -1;
1880         file_priv->hang_timestamp = jiffies;
1881
1882         ret = i915_gem_context_open(i915, file);
1883         if (ret)
1884                 kfree(file_priv);
1885
1886         return ret;
1887 }
1888
1889 /**
1890  * i915_gem_track_fb - update frontbuffer tracking
1891  * @old: current GEM buffer for the frontbuffer slots
1892  * @new: new GEM buffer for the frontbuffer slots
1893  * @frontbuffer_bits: bitmask of frontbuffer slots
1894  *
1895  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
1896  * from @old and setting them in @new. Both @old and @new can be NULL.
1897  */
1898 void i915_gem_track_fb(struct drm_i915_gem_object *old,
1899                        struct drm_i915_gem_object *new,
1900                        unsigned frontbuffer_bits)
1901 {
1902         /* Control of individual bits within the mask are guarded by
1903          * the owning plane->mutex, i.e. we can never see concurrent
1904          * manipulation of individual bits. But since the bitfield as a whole
1905          * is updated using RMW, we need to use atomics in order to update
1906          * the bits.
1907          */
1908         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
1909                      BITS_PER_TYPE(atomic_t));
1910
1911         if (old) {
1912                 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
1913                 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
1914         }
1915
1916         if (new) {
1917                 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
1918                 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
1919         }
1920 }
1921
1922 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1923 #include "selftests/mock_gem_device.c"
1924 #include "selftests/i915_gem.c"
1925 #endif