Linux-libre 4.13.7-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #include <linux/acpi.h>
31 #include <linux/device.h>
32 #include <linux/oom.h>
33 #include <linux/module.h>
34 #include <linux/pci.h>
35 #include <linux/pm.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/pnp.h>
38 #include <linux/slab.h>
39 #include <linux/vgaarb.h>
40 #include <linux/vga_switcheroo.h>
41 #include <linux/vt.h>
42 #include <acpi/video.h>
43
44 #include <drm/drmP.h>
45 #include <drm/drm_crtc_helper.h>
46 #include <drm/drm_atomic_helper.h>
47 #include <drm/i915_drm.h>
48
49 #include "i915_drv.h"
50 #include "i915_trace.h"
51 #include "i915_vgpu.h"
52 #include "intel_drv.h"
53 #include "intel_uc.h"
54
55 static struct drm_driver driver;
56
57 static unsigned int i915_load_fail_count;
58
59 bool __i915_inject_load_failure(const char *func, int line)
60 {
61         if (i915_load_fail_count >= i915.inject_load_failure)
62                 return false;
63
64         if (++i915_load_fail_count == i915.inject_load_failure) {
65                 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
66                          i915.inject_load_failure, func, line);
67                 return true;
68         }
69
70         return false;
71 }
72
73 #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
74 #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
75                     "providing the dmesg log by booting with drm.debug=0xf"
76
77 void
78 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
79               const char *fmt, ...)
80 {
81         static bool shown_bug_once;
82         struct device *kdev = dev_priv->drm.dev;
83         bool is_error = level[1] <= KERN_ERR[1];
84         bool is_debug = level[1] == KERN_DEBUG[1];
85         struct va_format vaf;
86         va_list args;
87
88         if (is_debug && !(drm_debug & DRM_UT_DRIVER))
89                 return;
90
91         va_start(args, fmt);
92
93         vaf.fmt = fmt;
94         vaf.va = &args;
95
96         dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
97                    __builtin_return_address(0), &vaf);
98
99         if (is_error && !shown_bug_once) {
100                 dev_notice(kdev, "%s", FDO_BUG_MSG);
101                 shown_bug_once = true;
102         }
103
104         va_end(args);
105 }
106
107 static bool i915_error_injected(struct drm_i915_private *dev_priv)
108 {
109         return i915.inject_load_failure &&
110                i915_load_fail_count == i915.inject_load_failure;
111 }
112
113 #define i915_load_error(dev_priv, fmt, ...)                                  \
114         __i915_printk(dev_priv,                                              \
115                       i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
116                       fmt, ##__VA_ARGS__)
117
118
119 static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
120 {
121         enum intel_pch ret = PCH_NOP;
122
123         /*
124          * In a virtualized passthrough environment we can be in a
125          * setup where the ISA bridge is not able to be passed through.
126          * In this case, a south bridge can be emulated and we have to
127          * make an educated guess as to which PCH is really there.
128          */
129
130         if (IS_GEN5(dev_priv)) {
131                 ret = PCH_IBX;
132                 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
133         } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
134                 ret = PCH_CPT;
135                 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
136         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
137                 ret = PCH_LPT;
138                 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
139         } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
140                 ret = PCH_SPT;
141                 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
142         } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
143                 ret = PCH_CNP;
144                 DRM_DEBUG_KMS("Assuming CannonPoint PCH\n");
145         }
146
147         return ret;
148 }
149
150 static void intel_detect_pch(struct drm_i915_private *dev_priv)
151 {
152         struct pci_dev *pch = NULL;
153
154         /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
155          * (which really amounts to a PCH but no South Display).
156          */
157         if (INTEL_INFO(dev_priv)->num_pipes == 0) {
158                 dev_priv->pch_type = PCH_NOP;
159                 return;
160         }
161
162         /*
163          * The reason to probe ISA bridge instead of Dev31:Fun0 is to
164          * make graphics device passthrough work easy for VMM, that only
165          * need to expose ISA bridge to let driver know the real hardware
166          * underneath. This is a requirement from virtualization team.
167          *
168          * In some virtualized environments (e.g. XEN), there is irrelevant
169          * ISA bridge in the system. To work reliably, we should scan trhough
170          * all the ISA bridge devices and check for the first match, instead
171          * of only checking the first one.
172          */
173         while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
174                 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
175                         unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
176                         unsigned short id_ext = pch->device &
177                                 INTEL_PCH_DEVICE_ID_MASK_EXT;
178
179                         if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
180                                 dev_priv->pch_id = id;
181                                 dev_priv->pch_type = PCH_IBX;
182                                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
183                                 WARN_ON(!IS_GEN5(dev_priv));
184                         } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
185                                 dev_priv->pch_id = id;
186                                 dev_priv->pch_type = PCH_CPT;
187                                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
188                                 WARN_ON(!(IS_GEN6(dev_priv) ||
189                                         IS_IVYBRIDGE(dev_priv)));
190                         } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
191                                 /* PantherPoint is CPT compatible */
192                                 dev_priv->pch_id = id;
193                                 dev_priv->pch_type = PCH_CPT;
194                                 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
195                                 WARN_ON(!(IS_GEN6(dev_priv) ||
196                                         IS_IVYBRIDGE(dev_priv)));
197                         } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
198                                 dev_priv->pch_id = id;
199                                 dev_priv->pch_type = PCH_LPT;
200                                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
201                                 WARN_ON(!IS_HASWELL(dev_priv) &&
202                                         !IS_BROADWELL(dev_priv));
203                                 WARN_ON(IS_HSW_ULT(dev_priv) ||
204                                         IS_BDW_ULT(dev_priv));
205                         } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
206                                 dev_priv->pch_id = id;
207                                 dev_priv->pch_type = PCH_LPT;
208                                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
209                                 WARN_ON(!IS_HASWELL(dev_priv) &&
210                                         !IS_BROADWELL(dev_priv));
211                                 WARN_ON(!IS_HSW_ULT(dev_priv) &&
212                                         !IS_BDW_ULT(dev_priv));
213                         } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
214                                 dev_priv->pch_id = id;
215                                 dev_priv->pch_type = PCH_SPT;
216                                 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
217                                 WARN_ON(!IS_SKYLAKE(dev_priv) &&
218                                         !IS_KABYLAKE(dev_priv));
219                         } else if (id_ext == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
220                                 dev_priv->pch_id = id_ext;
221                                 dev_priv->pch_type = PCH_SPT;
222                                 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
223                                 WARN_ON(!IS_SKYLAKE(dev_priv) &&
224                                         !IS_KABYLAKE(dev_priv));
225                         } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
226                                 dev_priv->pch_id = id;
227                                 dev_priv->pch_type = PCH_KBP;
228                                 DRM_DEBUG_KMS("Found KabyPoint PCH\n");
229                                 WARN_ON(!IS_SKYLAKE(dev_priv) &&
230                                         !IS_KABYLAKE(dev_priv));
231                         } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
232                                 dev_priv->pch_id = id;
233                                 dev_priv->pch_type = PCH_CNP;
234                                 DRM_DEBUG_KMS("Found CannonPoint PCH\n");
235                                 WARN_ON(!IS_CANNONLAKE(dev_priv) &&
236                                         !IS_COFFEELAKE(dev_priv));
237                         } else if (id_ext == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
238                                 dev_priv->pch_id = id_ext;
239                                 dev_priv->pch_type = PCH_CNP;
240                                 DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
241                                 WARN_ON(!IS_CANNONLAKE(dev_priv) &&
242                                         !IS_COFFEELAKE(dev_priv));
243                         } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
244                                    (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
245                                    ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
246                                     pch->subsystem_vendor ==
247                                             PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
248                                     pch->subsystem_device ==
249                                             PCI_SUBDEVICE_ID_QEMU)) {
250                                 dev_priv->pch_id = id;
251                                 dev_priv->pch_type =
252                                         intel_virt_detect_pch(dev_priv);
253                         } else
254                                 continue;
255
256                         break;
257                 }
258         }
259         if (!pch)
260                 DRM_DEBUG_KMS("No PCH found.\n");
261
262         pci_dev_put(pch);
263 }
264
265 static int i915_getparam(struct drm_device *dev, void *data,
266                          struct drm_file *file_priv)
267 {
268         struct drm_i915_private *dev_priv = to_i915(dev);
269         struct pci_dev *pdev = dev_priv->drm.pdev;
270         drm_i915_getparam_t *param = data;
271         int value;
272
273         switch (param->param) {
274         case I915_PARAM_IRQ_ACTIVE:
275         case I915_PARAM_ALLOW_BATCHBUFFER:
276         case I915_PARAM_LAST_DISPATCH:
277         case I915_PARAM_HAS_EXEC_CONSTANTS:
278                 /* Reject all old ums/dri params. */
279                 return -ENODEV;
280         case I915_PARAM_CHIPSET_ID:
281                 value = pdev->device;
282                 break;
283         case I915_PARAM_REVISION:
284                 value = pdev->revision;
285                 break;
286         case I915_PARAM_NUM_FENCES_AVAIL:
287                 value = dev_priv->num_fence_regs;
288                 break;
289         case I915_PARAM_HAS_OVERLAY:
290                 value = dev_priv->overlay ? 1 : 0;
291                 break;
292         case I915_PARAM_HAS_BSD:
293                 value = !!dev_priv->engine[VCS];
294                 break;
295         case I915_PARAM_HAS_BLT:
296                 value = !!dev_priv->engine[BCS];
297                 break;
298         case I915_PARAM_HAS_VEBOX:
299                 value = !!dev_priv->engine[VECS];
300                 break;
301         case I915_PARAM_HAS_BSD2:
302                 value = !!dev_priv->engine[VCS2];
303                 break;
304         case I915_PARAM_HAS_LLC:
305                 value = HAS_LLC(dev_priv);
306                 break;
307         case I915_PARAM_HAS_WT:
308                 value = HAS_WT(dev_priv);
309                 break;
310         case I915_PARAM_HAS_ALIASING_PPGTT:
311                 value = USES_PPGTT(dev_priv);
312                 break;
313         case I915_PARAM_HAS_SEMAPHORES:
314                 value = i915.semaphores;
315                 break;
316         case I915_PARAM_HAS_SECURE_BATCHES:
317                 value = capable(CAP_SYS_ADMIN);
318                 break;
319         case I915_PARAM_CMD_PARSER_VERSION:
320                 value = i915_cmd_parser_get_version(dev_priv);
321                 break;
322         case I915_PARAM_SUBSLICE_TOTAL:
323                 value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
324                 if (!value)
325                         return -ENODEV;
326                 break;
327         case I915_PARAM_EU_TOTAL:
328                 value = INTEL_INFO(dev_priv)->sseu.eu_total;
329                 if (!value)
330                         return -ENODEV;
331                 break;
332         case I915_PARAM_HAS_GPU_RESET:
333                 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
334                 break;
335         case I915_PARAM_HAS_RESOURCE_STREAMER:
336                 value = HAS_RESOURCE_STREAMER(dev_priv);
337                 break;
338         case I915_PARAM_HAS_POOLED_EU:
339                 value = HAS_POOLED_EU(dev_priv);
340                 break;
341         case I915_PARAM_MIN_EU_IN_POOL:
342                 value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
343                 break;
344         case I915_PARAM_HUC_STATUS:
345                 intel_runtime_pm_get(dev_priv);
346                 value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
347                 intel_runtime_pm_put(dev_priv);
348                 break;
349         case I915_PARAM_MMAP_GTT_VERSION:
350                 /* Though we've started our numbering from 1, and so class all
351                  * earlier versions as 0, in effect their value is undefined as
352                  * the ioctl will report EINVAL for the unknown param!
353                  */
354                 value = i915_gem_mmap_gtt_version();
355                 break;
356         case I915_PARAM_HAS_SCHEDULER:
357                 value = dev_priv->engine[RCS] &&
358                         dev_priv->engine[RCS]->schedule;
359                 break;
360         case I915_PARAM_MMAP_VERSION:
361                 /* Remember to bump this if the version changes! */
362         case I915_PARAM_HAS_GEM:
363         case I915_PARAM_HAS_PAGEFLIPPING:
364         case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
365         case I915_PARAM_HAS_RELAXED_FENCING:
366         case I915_PARAM_HAS_COHERENT_RINGS:
367         case I915_PARAM_HAS_RELAXED_DELTA:
368         case I915_PARAM_HAS_GEN7_SOL_RESET:
369         case I915_PARAM_HAS_WAIT_TIMEOUT:
370         case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
371         case I915_PARAM_HAS_PINNED_BATCHES:
372         case I915_PARAM_HAS_EXEC_NO_RELOC:
373         case I915_PARAM_HAS_EXEC_HANDLE_LUT:
374         case I915_PARAM_HAS_COHERENT_PHYS_GTT:
375         case I915_PARAM_HAS_EXEC_SOFTPIN:
376         case I915_PARAM_HAS_EXEC_ASYNC:
377         case I915_PARAM_HAS_EXEC_FENCE:
378         case I915_PARAM_HAS_EXEC_CAPTURE:
379         case I915_PARAM_HAS_EXEC_BATCH_FIRST:
380                 /* For the time being all of these are always true;
381                  * if some supported hardware does not have one of these
382                  * features this value needs to be provided from
383                  * INTEL_INFO(), a feature macro, or similar.
384                  */
385                 value = 1;
386                 break;
387         case I915_PARAM_SLICE_MASK:
388                 value = INTEL_INFO(dev_priv)->sseu.slice_mask;
389                 if (!value)
390                         return -ENODEV;
391                 break;
392         case I915_PARAM_SUBSLICE_MASK:
393                 value = INTEL_INFO(dev_priv)->sseu.subslice_mask;
394                 if (!value)
395                         return -ENODEV;
396                 break;
397         default:
398                 DRM_DEBUG("Unknown parameter %d\n", param->param);
399                 return -EINVAL;
400         }
401
402         if (put_user(value, param->value))
403                 return -EFAULT;
404
405         return 0;
406 }
407
408 static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
409 {
410         dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
411         if (!dev_priv->bridge_dev) {
412                 DRM_ERROR("bridge device not found\n");
413                 return -1;
414         }
415         return 0;
416 }
417
418 /* Allocate space for the MCH regs if needed, return nonzero on error */
419 static int
420 intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
421 {
422         int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
423         u32 temp_lo, temp_hi = 0;
424         u64 mchbar_addr;
425         int ret;
426
427         if (INTEL_GEN(dev_priv) >= 4)
428                 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
429         pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
430         mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
431
432         /* If ACPI doesn't have it, assume we need to allocate it ourselves */
433 #ifdef CONFIG_PNP
434         if (mchbar_addr &&
435             pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
436                 return 0;
437 #endif
438
439         /* Get some space for it */
440         dev_priv->mch_res.name = "i915 MCHBAR";
441         dev_priv->mch_res.flags = IORESOURCE_MEM;
442         ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
443                                      &dev_priv->mch_res,
444                                      MCHBAR_SIZE, MCHBAR_SIZE,
445                                      PCIBIOS_MIN_MEM,
446                                      0, pcibios_align_resource,
447                                      dev_priv->bridge_dev);
448         if (ret) {
449                 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
450                 dev_priv->mch_res.start = 0;
451                 return ret;
452         }
453
454         if (INTEL_GEN(dev_priv) >= 4)
455                 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
456                                        upper_32_bits(dev_priv->mch_res.start));
457
458         pci_write_config_dword(dev_priv->bridge_dev, reg,
459                                lower_32_bits(dev_priv->mch_res.start));
460         return 0;
461 }
462
463 /* Setup MCHBAR if possible, return true if we should disable it again */
464 static void
465 intel_setup_mchbar(struct drm_i915_private *dev_priv)
466 {
467         int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
468         u32 temp;
469         bool enabled;
470
471         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
472                 return;
473
474         dev_priv->mchbar_need_disable = false;
475
476         if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
477                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
478                 enabled = !!(temp & DEVEN_MCHBAR_EN);
479         } else {
480                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
481                 enabled = temp & 1;
482         }
483
484         /* If it's already enabled, don't have to do anything */
485         if (enabled)
486                 return;
487
488         if (intel_alloc_mchbar_resource(dev_priv))
489                 return;
490
491         dev_priv->mchbar_need_disable = true;
492
493         /* Space is allocated or reserved, so enable it. */
494         if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
495                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
496                                        temp | DEVEN_MCHBAR_EN);
497         } else {
498                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
499                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
500         }
501 }
502
503 static void
504 intel_teardown_mchbar(struct drm_i915_private *dev_priv)
505 {
506         int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
507
508         if (dev_priv->mchbar_need_disable) {
509                 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
510                         u32 deven_val;
511
512                         pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
513                                               &deven_val);
514                         deven_val &= ~DEVEN_MCHBAR_EN;
515                         pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
516                                                deven_val);
517                 } else {
518                         u32 mchbar_val;
519
520                         pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
521                                               &mchbar_val);
522                         mchbar_val &= ~1;
523                         pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
524                                                mchbar_val);
525                 }
526         }
527
528         if (dev_priv->mch_res.start)
529                 release_resource(&dev_priv->mch_res);
530 }
531
532 /* true = enable decode, false = disable decoder */
533 static unsigned int i915_vga_set_decode(void *cookie, bool state)
534 {
535         struct drm_i915_private *dev_priv = cookie;
536
537         intel_modeset_vga_set_state(dev_priv, state);
538         if (state)
539                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
540                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
541         else
542                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
543 }
544
545 static int i915_resume_switcheroo(struct drm_device *dev);
546 static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
547
548 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
549 {
550         struct drm_device *dev = pci_get_drvdata(pdev);
551         pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
552
553         if (state == VGA_SWITCHEROO_ON) {
554                 pr_info("switched on\n");
555                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
556                 /* i915 resume handler doesn't set to D0 */
557                 pci_set_power_state(pdev, PCI_D0);
558                 i915_resume_switcheroo(dev);
559                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
560         } else {
561                 pr_info("switched off\n");
562                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
563                 i915_suspend_switcheroo(dev, pmm);
564                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
565         }
566 }
567
568 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
569 {
570         struct drm_device *dev = pci_get_drvdata(pdev);
571
572         /*
573          * FIXME: open_count is protected by drm_global_mutex but that would lead to
574          * locking inversion with the driver load path. And the access here is
575          * completely racy anyway. So don't bother with locking for now.
576          */
577         return dev->open_count == 0;
578 }
579
580 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
581         .set_gpu_state = i915_switcheroo_set_state,
582         .reprobe = NULL,
583         .can_switch = i915_switcheroo_can_switch,
584 };
585
586 static void i915_gem_fini(struct drm_i915_private *dev_priv)
587 {
588         mutex_lock(&dev_priv->drm.struct_mutex);
589         intel_uc_fini_hw(dev_priv);
590         i915_gem_cleanup_engines(dev_priv);
591         i915_gem_context_fini(dev_priv);
592         i915_gem_cleanup_userptr(dev_priv);
593         mutex_unlock(&dev_priv->drm.struct_mutex);
594
595         i915_gem_drain_freed_objects(dev_priv);
596
597         WARN_ON(!list_empty(&dev_priv->context_list));
598 }
599
600 static int i915_load_modeset_init(struct drm_device *dev)
601 {
602         struct drm_i915_private *dev_priv = to_i915(dev);
603         struct pci_dev *pdev = dev_priv->drm.pdev;
604         int ret;
605
606         if (i915_inject_load_failure())
607                 return -ENODEV;
608
609         intel_bios_init(dev_priv);
610
611         /* If we have > 1 VGA cards, then we need to arbitrate access
612          * to the common VGA resources.
613          *
614          * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
615          * then we do not take part in VGA arbitration and the
616          * vga_client_register() fails with -ENODEV.
617          */
618         ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
619         if (ret && ret != -ENODEV)
620                 goto out;
621
622         intel_register_dsm_handler();
623
624         ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
625         if (ret)
626                 goto cleanup_vga_client;
627
628         /* must happen before intel_power_domains_init_hw() on VLV/CHV */
629         intel_update_rawclk(dev_priv);
630
631         intel_power_domains_init_hw(dev_priv, false);
632
633         intel_csr_ucode_init(dev_priv);
634
635         ret = intel_irq_install(dev_priv);
636         if (ret)
637                 goto cleanup_csr;
638
639         intel_setup_gmbus(dev_priv);
640
641         /* Important: The output setup functions called by modeset_init need
642          * working irqs for e.g. gmbus and dp aux transfers. */
643         ret = intel_modeset_init(dev);
644         if (ret)
645                 goto cleanup_irq;
646
647         intel_uc_init_fw(dev_priv);
648
649         ret = i915_gem_init(dev_priv);
650         if (ret)
651                 goto cleanup_uc;
652
653         intel_modeset_gem_init(dev);
654
655         if (INTEL_INFO(dev_priv)->num_pipes == 0)
656                 return 0;
657
658         ret = intel_fbdev_init(dev);
659         if (ret)
660                 goto cleanup_gem;
661
662         /* Only enable hotplug handling once the fbdev is fully set up. */
663         intel_hpd_init(dev_priv);
664
665         drm_kms_helper_poll_init(dev);
666
667         return 0;
668
669 cleanup_gem:
670         if (i915_gem_suspend(dev_priv))
671                 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
672         i915_gem_fini(dev_priv);
673 cleanup_uc:
674         intel_uc_fini_fw(dev_priv);
675 cleanup_irq:
676         drm_irq_uninstall(dev);
677         intel_teardown_gmbus(dev_priv);
678 cleanup_csr:
679         intel_csr_ucode_fini(dev_priv);
680         intel_power_domains_fini(dev_priv);
681         vga_switcheroo_unregister_client(pdev);
682 cleanup_vga_client:
683         vga_client_register(pdev, NULL, NULL, NULL);
684 out:
685         return ret;
686 }
687
688 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
689 {
690         struct apertures_struct *ap;
691         struct pci_dev *pdev = dev_priv->drm.pdev;
692         struct i915_ggtt *ggtt = &dev_priv->ggtt;
693         bool primary;
694         int ret;
695
696         ap = alloc_apertures(1);
697         if (!ap)
698                 return -ENOMEM;
699
700         ap->ranges[0].base = ggtt->mappable_base;
701         ap->ranges[0].size = ggtt->mappable_end;
702
703         primary =
704                 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
705
706         ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
707
708         kfree(ap);
709
710         return ret;
711 }
712
713 #if !defined(CONFIG_VGA_CONSOLE)
714 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
715 {
716         return 0;
717 }
718 #elif !defined(CONFIG_DUMMY_CONSOLE)
719 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
720 {
721         return -ENODEV;
722 }
723 #else
724 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
725 {
726         int ret = 0;
727
728         DRM_INFO("Replacing VGA console driver\n");
729
730         console_lock();
731         if (con_is_bound(&vga_con))
732                 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
733         if (ret == 0) {
734                 ret = do_unregister_con_driver(&vga_con);
735
736                 /* Ignore "already unregistered". */
737                 if (ret == -ENODEV)
738                         ret = 0;
739         }
740         console_unlock();
741
742         return ret;
743 }
744 #endif
745
746 static void intel_init_dpio(struct drm_i915_private *dev_priv)
747 {
748         /*
749          * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
750          * CHV x1 PHY (DP/HDMI D)
751          * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
752          */
753         if (IS_CHERRYVIEW(dev_priv)) {
754                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
755                 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
756         } else if (IS_VALLEYVIEW(dev_priv)) {
757                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
758         }
759 }
760
761 static int i915_workqueues_init(struct drm_i915_private *dev_priv)
762 {
763         /*
764          * The i915 workqueue is primarily used for batched retirement of
765          * requests (and thus managing bo) once the task has been completed
766          * by the GPU. i915_gem_retire_requests() is called directly when we
767          * need high-priority retirement, such as waiting for an explicit
768          * bo.
769          *
770          * It is also used for periodic low-priority events, such as
771          * idle-timers and recording error state.
772          *
773          * All tasks on the workqueue are expected to acquire the dev mutex
774          * so there is no point in running more than one instance of the
775          * workqueue at any time.  Use an ordered one.
776          */
777         dev_priv->wq = alloc_ordered_workqueue("i915", 0);
778         if (dev_priv->wq == NULL)
779                 goto out_err;
780
781         dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
782         if (dev_priv->hotplug.dp_wq == NULL)
783                 goto out_free_wq;
784
785         return 0;
786
787 out_free_wq:
788         destroy_workqueue(dev_priv->wq);
789 out_err:
790         DRM_ERROR("Failed to allocate workqueues.\n");
791
792         return -ENOMEM;
793 }
794
795 static void i915_engines_cleanup(struct drm_i915_private *i915)
796 {
797         struct intel_engine_cs *engine;
798         enum intel_engine_id id;
799
800         for_each_engine(engine, i915, id)
801                 kfree(engine);
802 }
803
804 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
805 {
806         destroy_workqueue(dev_priv->hotplug.dp_wq);
807         destroy_workqueue(dev_priv->wq);
808 }
809
810 /*
811  * We don't keep the workarounds for pre-production hardware, so we expect our
812  * driver to fail on these machines in one way or another. A little warning on
813  * dmesg may help both the user and the bug triagers.
814  */
815 static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
816 {
817         bool pre = false;
818
819         pre |= IS_HSW_EARLY_SDV(dev_priv);
820         pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
821         pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
822
823         if (pre) {
824                 DRM_ERROR("This is a pre-production stepping. "
825                           "It may not be fully functional.\n");
826                 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
827         }
828 }
829
830 /**
831  * i915_driver_init_early - setup state not requiring device access
832  * @dev_priv: device private
833  *
834  * Initialize everything that is a "SW-only" state, that is state not
835  * requiring accessing the device or exposing the driver via kernel internal
836  * or userspace interfaces. Example steps belonging here: lock initialization,
837  * system memory allocation, setting up device specific attributes and
838  * function hooks not requiring accessing the device.
839  */
840 static int i915_driver_init_early(struct drm_i915_private *dev_priv,
841                                   const struct pci_device_id *ent)
842 {
843         const struct intel_device_info *match_info =
844                 (struct intel_device_info *)ent->driver_data;
845         struct intel_device_info *device_info;
846         int ret = 0;
847
848         if (i915_inject_load_failure())
849                 return -ENODEV;
850
851         /* Setup the write-once "constant" device info */
852         device_info = mkwrite_device_info(dev_priv);
853         memcpy(device_info, match_info, sizeof(*device_info));
854         device_info->device_id = dev_priv->drm.pdev->device;
855
856         BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
857         device_info->gen_mask = BIT(device_info->gen - 1);
858
859         spin_lock_init(&dev_priv->irq_lock);
860         spin_lock_init(&dev_priv->gpu_error.lock);
861         mutex_init(&dev_priv->backlight_lock);
862         spin_lock_init(&dev_priv->uncore.lock);
863
864         spin_lock_init(&dev_priv->mm.object_stat_lock);
865         spin_lock_init(&dev_priv->mmio_flip_lock);
866         mutex_init(&dev_priv->sb_lock);
867         mutex_init(&dev_priv->modeset_restore_lock);
868         mutex_init(&dev_priv->av_mutex);
869         mutex_init(&dev_priv->wm.wm_mutex);
870         mutex_init(&dev_priv->pps_mutex);
871
872         intel_uc_init_early(dev_priv);
873         i915_memcpy_init_early(dev_priv);
874
875         ret = i915_workqueues_init(dev_priv);
876         if (ret < 0)
877                 goto err_engines;
878
879         /* This must be called before any calls to HAS_PCH_* */
880         intel_detect_pch(dev_priv);
881
882         intel_pm_setup(dev_priv);
883         intel_init_dpio(dev_priv);
884         intel_power_domains_init(dev_priv);
885         intel_irq_init(dev_priv);
886         intel_hangcheck_init(dev_priv);
887         intel_init_display_hooks(dev_priv);
888         intel_init_clock_gating_hooks(dev_priv);
889         intel_init_audio_hooks(dev_priv);
890         ret = i915_gem_load_init(dev_priv);
891         if (ret < 0)
892                 goto err_irq;
893
894         intel_display_crc_init(dev_priv);
895
896         intel_device_info_dump(dev_priv);
897
898         intel_detect_preproduction_hw(dev_priv);
899
900         i915_perf_init(dev_priv);
901
902         return 0;
903
904 err_irq:
905         intel_irq_fini(dev_priv);
906         i915_workqueues_cleanup(dev_priv);
907 err_engines:
908         i915_engines_cleanup(dev_priv);
909         return ret;
910 }
911
912 /**
913  * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
914  * @dev_priv: device private
915  */
916 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
917 {
918         i915_perf_fini(dev_priv);
919         i915_gem_load_cleanup(dev_priv);
920         intel_irq_fini(dev_priv);
921         i915_workqueues_cleanup(dev_priv);
922         i915_engines_cleanup(dev_priv);
923 }
924
925 static int i915_mmio_setup(struct drm_i915_private *dev_priv)
926 {
927         struct pci_dev *pdev = dev_priv->drm.pdev;
928         int mmio_bar;
929         int mmio_size;
930
931         mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
932         /*
933          * Before gen4, the registers and the GTT are behind different BARs.
934          * However, from gen4 onwards, the registers and the GTT are shared
935          * in the same BAR, so we want to restrict this ioremap from
936          * clobbering the GTT which we want ioremap_wc instead. Fortunately,
937          * the register BAR remains the same size for all the earlier
938          * generations up to Ironlake.
939          */
940         if (INTEL_GEN(dev_priv) < 5)
941                 mmio_size = 512 * 1024;
942         else
943                 mmio_size = 2 * 1024 * 1024;
944         dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
945         if (dev_priv->regs == NULL) {
946                 DRM_ERROR("failed to map registers\n");
947
948                 return -EIO;
949         }
950
951         /* Try to make sure MCHBAR is enabled before poking at it */
952         intel_setup_mchbar(dev_priv);
953
954         return 0;
955 }
956
957 static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
958 {
959         struct pci_dev *pdev = dev_priv->drm.pdev;
960
961         intel_teardown_mchbar(dev_priv);
962         pci_iounmap(pdev, dev_priv->regs);
963 }
964
965 /**
966  * i915_driver_init_mmio - setup device MMIO
967  * @dev_priv: device private
968  *
969  * Setup minimal device state necessary for MMIO accesses later in the
970  * initialization sequence. The setup here should avoid any other device-wide
971  * side effects or exposing the driver via kernel internal or user space
972  * interfaces.
973  */
974 static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
975 {
976         int ret;
977
978         if (i915_inject_load_failure())
979                 return -ENODEV;
980
981         if (i915_get_bridge_dev(dev_priv))
982                 return -EIO;
983
984         ret = i915_mmio_setup(dev_priv);
985         if (ret < 0)
986                 goto err_bridge;
987
988         intel_uncore_init(dev_priv);
989
990         ret = intel_engines_init_mmio(dev_priv);
991         if (ret)
992                 goto err_uncore;
993
994         i915_gem_init_mmio(dev_priv);
995
996         return 0;
997
998 err_uncore:
999         intel_uncore_fini(dev_priv);
1000 err_bridge:
1001         pci_dev_put(dev_priv->bridge_dev);
1002
1003         return ret;
1004 }
1005
1006 /**
1007  * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
1008  * @dev_priv: device private
1009  */
1010 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1011 {
1012         intel_uncore_fini(dev_priv);
1013         i915_mmio_cleanup(dev_priv);
1014         pci_dev_put(dev_priv->bridge_dev);
1015 }
1016
1017 static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1018 {
1019         i915.enable_execlists =
1020                 intel_sanitize_enable_execlists(dev_priv,
1021                                                 i915.enable_execlists);
1022
1023         /*
1024          * i915.enable_ppgtt is read-only, so do an early pass to validate the
1025          * user's requested state against the hardware/driver capabilities.  We
1026          * do this now so that we can print out any log messages once rather
1027          * than every time we check intel_enable_ppgtt().
1028          */
1029         i915.enable_ppgtt =
1030                 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
1031         DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
1032
1033         i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
1034         DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
1035
1036         intel_uc_sanitize_options(dev_priv);
1037
1038         intel_gvt_sanitize_options(dev_priv);
1039 }
1040
1041 /**
1042  * i915_driver_init_hw - setup state requiring device access
1043  * @dev_priv: device private
1044  *
1045  * Setup state that requires accessing the device, but doesn't require
1046  * exposing the driver via kernel internal or userspace interfaces.
1047  */
1048 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1049 {
1050         struct pci_dev *pdev = dev_priv->drm.pdev;
1051         int ret;
1052
1053         if (i915_inject_load_failure())
1054                 return -ENODEV;
1055
1056         intel_device_info_runtime_init(dev_priv);
1057
1058         intel_sanitize_options(dev_priv);
1059
1060         ret = i915_ggtt_probe_hw(dev_priv);
1061         if (ret)
1062                 return ret;
1063
1064         /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1065          * otherwise the vga fbdev driver falls over. */
1066         ret = i915_kick_out_firmware_fb(dev_priv);
1067         if (ret) {
1068                 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1069                 goto out_ggtt;
1070         }
1071
1072         ret = i915_kick_out_vgacon(dev_priv);
1073         if (ret) {
1074                 DRM_ERROR("failed to remove conflicting VGA console\n");
1075                 goto out_ggtt;
1076         }
1077
1078         ret = i915_ggtt_init_hw(dev_priv);
1079         if (ret)
1080                 return ret;
1081
1082         ret = i915_ggtt_enable_hw(dev_priv);
1083         if (ret) {
1084                 DRM_ERROR("failed to enable GGTT\n");
1085                 goto out_ggtt;
1086         }
1087
1088         pci_set_master(pdev);
1089
1090         /* overlay on gen2 is broken and can't address above 1G */
1091         if (IS_GEN2(dev_priv)) {
1092                 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
1093                 if (ret) {
1094                         DRM_ERROR("failed to set DMA mask\n");
1095
1096                         goto out_ggtt;
1097                 }
1098         }
1099
1100         /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1101          * using 32bit addressing, overwriting memory if HWS is located
1102          * above 4GB.
1103          *
1104          * The documentation also mentions an issue with undefined
1105          * behaviour if any general state is accessed within a page above 4GB,
1106          * which also needs to be handled carefully.
1107          */
1108         if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
1109                 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1110
1111                 if (ret) {
1112                         DRM_ERROR("failed to set DMA mask\n");
1113
1114                         goto out_ggtt;
1115                 }
1116         }
1117
1118         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1119                            PM_QOS_DEFAULT_VALUE);
1120
1121         intel_uncore_sanitize(dev_priv);
1122
1123         intel_opregion_setup(dev_priv);
1124
1125         i915_gem_load_init_fences(dev_priv);
1126
1127         /* On the 945G/GM, the chipset reports the MSI capability on the
1128          * integrated graphics even though the support isn't actually there
1129          * according to the published specs.  It doesn't appear to function
1130          * correctly in testing on 945G.
1131          * This may be a side effect of MSI having been made available for PEG
1132          * and the registers being closely associated.
1133          *
1134          * According to chipset errata, on the 965GM, MSI interrupts may
1135          * be lost or delayed, and was defeatured. MSI interrupts seem to
1136          * get lost on g4x as well, and interrupt delivery seems to stay
1137          * properly dead afterwards. So we'll just disable them for all
1138          * pre-gen5 chipsets.
1139          */
1140         if (INTEL_GEN(dev_priv) >= 5) {
1141                 if (pci_enable_msi(pdev) < 0)
1142                         DRM_DEBUG_DRIVER("can't enable MSI");
1143         }
1144
1145         ret = intel_gvt_init(dev_priv);
1146         if (ret)
1147                 goto out_ggtt;
1148
1149         return 0;
1150
1151 out_ggtt:
1152         i915_ggtt_cleanup_hw(dev_priv);
1153
1154         return ret;
1155 }
1156
1157 /**
1158  * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1159  * @dev_priv: device private
1160  */
1161 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1162 {
1163         struct pci_dev *pdev = dev_priv->drm.pdev;
1164
1165         if (pdev->msi_enabled)
1166                 pci_disable_msi(pdev);
1167
1168         pm_qos_remove_request(&dev_priv->pm_qos);
1169         i915_ggtt_cleanup_hw(dev_priv);
1170 }
1171
1172 /**
1173  * i915_driver_register - register the driver with the rest of the system
1174  * @dev_priv: device private
1175  *
1176  * Perform any steps necessary to make the driver available via kernel
1177  * internal or userspace interfaces.
1178  */
1179 static void i915_driver_register(struct drm_i915_private *dev_priv)
1180 {
1181         struct drm_device *dev = &dev_priv->drm;
1182
1183         i915_gem_shrinker_init(dev_priv);
1184
1185         /*
1186          * Notify a valid surface after modesetting,
1187          * when running inside a VM.
1188          */
1189         if (intel_vgpu_active(dev_priv))
1190                 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1191
1192         /* Reveal our presence to userspace */
1193         if (drm_dev_register(dev, 0) == 0) {
1194                 i915_debugfs_register(dev_priv);
1195                 i915_guc_log_register(dev_priv);
1196                 i915_setup_sysfs(dev_priv);
1197
1198                 /* Depends on sysfs having been initialized */
1199                 i915_perf_register(dev_priv);
1200         } else
1201                 DRM_ERROR("Failed to register driver for userspace access!\n");
1202
1203         if (INTEL_INFO(dev_priv)->num_pipes) {
1204                 /* Must be done after probing outputs */
1205                 intel_opregion_register(dev_priv);
1206                 acpi_video_register();
1207         }
1208
1209         if (IS_GEN5(dev_priv))
1210                 intel_gpu_ips_init(dev_priv);
1211
1212         intel_audio_init(dev_priv);
1213
1214         /*
1215          * Some ports require correctly set-up hpd registers for detection to
1216          * work properly (leading to ghost connected connector status), e.g. VGA
1217          * on gm45.  Hence we can only set up the initial fbdev config after hpd
1218          * irqs are fully enabled. We do it last so that the async config
1219          * cannot run before the connectors are registered.
1220          */
1221         intel_fbdev_initial_config_async(dev);
1222 }
1223
1224 /**
1225  * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1226  * @dev_priv: device private
1227  */
1228 static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1229 {
1230         intel_audio_deinit(dev_priv);
1231
1232         intel_gpu_ips_teardown();
1233         acpi_video_unregister();
1234         intel_opregion_unregister(dev_priv);
1235
1236         i915_perf_unregister(dev_priv);
1237
1238         i915_teardown_sysfs(dev_priv);
1239         i915_guc_log_unregister(dev_priv);
1240         drm_dev_unregister(&dev_priv->drm);
1241
1242         i915_gem_shrinker_cleanup(dev_priv);
1243 }
1244
1245 /**
1246  * i915_driver_load - setup chip and create an initial config
1247  * @pdev: PCI device
1248  * @ent: matching PCI ID entry
1249  *
1250  * The driver load routine has to do several things:
1251  *   - drive output discovery via intel_modeset_init()
1252  *   - initialize the memory manager
1253  *   - allocate initial config memory
1254  *   - setup the DRM framebuffer with the allocated memory
1255  */
1256 int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1257 {
1258         const struct intel_device_info *match_info =
1259                 (struct intel_device_info *)ent->driver_data;
1260         struct drm_i915_private *dev_priv;
1261         int ret;
1262
1263         /* Enable nuclear pageflip on ILK+ */
1264         if (!i915.nuclear_pageflip && match_info->gen < 5)
1265                 driver.driver_features &= ~DRIVER_ATOMIC;
1266
1267         ret = -ENOMEM;
1268         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1269         if (dev_priv)
1270                 ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
1271         if (ret) {
1272                 DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
1273                 goto out_free;
1274         }
1275
1276         dev_priv->drm.pdev = pdev;
1277         dev_priv->drm.dev_private = dev_priv;
1278
1279         ret = pci_enable_device(pdev);
1280         if (ret)
1281                 goto out_fini;
1282
1283         pci_set_drvdata(pdev, &dev_priv->drm);
1284         /*
1285          * Disable the system suspend direct complete optimization, which can
1286          * leave the device suspended skipping the driver's suspend handlers
1287          * if the device was already runtime suspended. This is needed due to
1288          * the difference in our runtime and system suspend sequence and
1289          * becaue the HDA driver may require us to enable the audio power
1290          * domain during system suspend.
1291          */
1292         pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
1293
1294         ret = i915_driver_init_early(dev_priv, ent);
1295         if (ret < 0)
1296                 goto out_pci_disable;
1297
1298         intel_runtime_pm_get(dev_priv);
1299
1300         ret = i915_driver_init_mmio(dev_priv);
1301         if (ret < 0)
1302                 goto out_runtime_pm_put;
1303
1304         ret = i915_driver_init_hw(dev_priv);
1305         if (ret < 0)
1306                 goto out_cleanup_mmio;
1307
1308         /*
1309          * TODO: move the vblank init and parts of modeset init steps into one
1310          * of the i915_driver_init_/i915_driver_register functions according
1311          * to the role/effect of the given init step.
1312          */
1313         if (INTEL_INFO(dev_priv)->num_pipes) {
1314                 ret = drm_vblank_init(&dev_priv->drm,
1315                                       INTEL_INFO(dev_priv)->num_pipes);
1316                 if (ret)
1317                         goto out_cleanup_hw;
1318         }
1319
1320         ret = i915_load_modeset_init(&dev_priv->drm);
1321         if (ret < 0)
1322                 goto out_cleanup_vblank;
1323
1324         i915_driver_register(dev_priv);
1325
1326         intel_runtime_pm_enable(dev_priv);
1327
1328         dev_priv->ipc_enabled = false;
1329
1330         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1331                 DRM_INFO("DRM_I915_DEBUG enabled\n");
1332         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1333                 DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
1334
1335         intel_runtime_pm_put(dev_priv);
1336
1337         return 0;
1338
1339 out_cleanup_vblank:
1340         drm_vblank_cleanup(&dev_priv->drm);
1341 out_cleanup_hw:
1342         i915_driver_cleanup_hw(dev_priv);
1343 out_cleanup_mmio:
1344         i915_driver_cleanup_mmio(dev_priv);
1345 out_runtime_pm_put:
1346         intel_runtime_pm_put(dev_priv);
1347         i915_driver_cleanup_early(dev_priv);
1348 out_pci_disable:
1349         pci_disable_device(pdev);
1350 out_fini:
1351         i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1352         drm_dev_fini(&dev_priv->drm);
1353 out_free:
1354         kfree(dev_priv);
1355         return ret;
1356 }
1357
1358 void i915_driver_unload(struct drm_device *dev)
1359 {
1360         struct drm_i915_private *dev_priv = to_i915(dev);
1361         struct pci_dev *pdev = dev_priv->drm.pdev;
1362
1363         intel_fbdev_fini(dev);
1364
1365         if (i915_gem_suspend(dev_priv))
1366                 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
1367
1368         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1369
1370         drm_atomic_helper_shutdown(dev);
1371
1372         intel_gvt_cleanup(dev_priv);
1373
1374         i915_driver_unregister(dev_priv);
1375
1376         drm_vblank_cleanup(dev);
1377
1378         intel_modeset_cleanup(dev);
1379
1380         /*
1381          * free the memory space allocated for the child device
1382          * config parsed from VBT
1383          */
1384         if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1385                 kfree(dev_priv->vbt.child_dev);
1386                 dev_priv->vbt.child_dev = NULL;
1387                 dev_priv->vbt.child_dev_num = 0;
1388         }
1389         kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1390         dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1391         kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1392         dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1393
1394         vga_switcheroo_unregister_client(pdev);
1395         vga_client_register(pdev, NULL, NULL, NULL);
1396
1397         intel_csr_ucode_fini(dev_priv);
1398
1399         /* Free error state after interrupts are fully disabled. */
1400         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1401         i915_reset_error_state(dev_priv);
1402
1403         /* Flush any outstanding unpin_work. */
1404         drain_workqueue(dev_priv->wq);
1405
1406         i915_gem_fini(dev_priv);
1407         intel_uc_fini_fw(dev_priv);
1408         intel_fbc_cleanup_cfb(dev_priv);
1409
1410         intel_power_domains_fini(dev_priv);
1411
1412         i915_driver_cleanup_hw(dev_priv);
1413         i915_driver_cleanup_mmio(dev_priv);
1414
1415         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1416 }
1417
1418 static void i915_driver_release(struct drm_device *dev)
1419 {
1420         struct drm_i915_private *dev_priv = to_i915(dev);
1421
1422         i915_driver_cleanup_early(dev_priv);
1423         drm_dev_fini(&dev_priv->drm);
1424
1425         kfree(dev_priv);
1426 }
1427
1428 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1429 {
1430         int ret;
1431
1432         ret = i915_gem_open(dev, file);
1433         if (ret)
1434                 return ret;
1435
1436         return 0;
1437 }
1438
1439 /**
1440  * i915_driver_lastclose - clean up after all DRM clients have exited
1441  * @dev: DRM device
1442  *
1443  * Take care of cleaning up after all DRM clients have exited.  In the
1444  * mode setting case, we want to restore the kernel's initial mode (just
1445  * in case the last client left us in a bad state).
1446  *
1447  * Additionally, in the non-mode setting case, we'll tear down the GTT
1448  * and DMA structures, since the kernel won't be using them, and clea
1449  * up any GEM state.
1450  */
1451 static void i915_driver_lastclose(struct drm_device *dev)
1452 {
1453         intel_fbdev_restore_mode(dev);
1454         vga_switcheroo_process_delayed_switch();
1455 }
1456
1457 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1458 {
1459         struct drm_i915_file_private *file_priv = file->driver_priv;
1460
1461         mutex_lock(&dev->struct_mutex);
1462         i915_gem_context_close(dev, file);
1463         i915_gem_release(dev, file);
1464         mutex_unlock(&dev->struct_mutex);
1465
1466         kfree(file_priv);
1467 }
1468
1469 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1470 {
1471         struct drm_device *dev = &dev_priv->drm;
1472         struct intel_encoder *encoder;
1473
1474         drm_modeset_lock_all(dev);
1475         for_each_intel_encoder(dev, encoder)
1476                 if (encoder->suspend)
1477                         encoder->suspend(encoder);
1478         drm_modeset_unlock_all(dev);
1479 }
1480
1481 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1482                               bool rpm_resume);
1483 static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
1484
1485 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1486 {
1487 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
1488         if (acpi_target_system_state() < ACPI_STATE_S3)
1489                 return true;
1490 #endif
1491         return false;
1492 }
1493
1494 static int i915_drm_suspend(struct drm_device *dev)
1495 {
1496         struct drm_i915_private *dev_priv = to_i915(dev);
1497         struct pci_dev *pdev = dev_priv->drm.pdev;
1498         pci_power_t opregion_target_state;
1499         int error;
1500
1501         /* ignore lid events during suspend */
1502         mutex_lock(&dev_priv->modeset_restore_lock);
1503         dev_priv->modeset_restore = MODESET_SUSPENDED;
1504         mutex_unlock(&dev_priv->modeset_restore_lock);
1505
1506         disable_rpm_wakeref_asserts(dev_priv);
1507
1508         /* We do a lot of poking in a lot of registers, make sure they work
1509          * properly. */
1510         intel_display_set_init_power(dev_priv, true);
1511
1512         drm_kms_helper_poll_disable(dev);
1513
1514         pci_save_state(pdev);
1515
1516         error = i915_gem_suspend(dev_priv);
1517         if (error) {
1518                 dev_err(&pdev->dev,
1519                         "GEM idle failed, resume might fail\n");
1520                 goto out;
1521         }
1522
1523         intel_display_suspend(dev);
1524
1525         intel_dp_mst_suspend(dev);
1526
1527         intel_runtime_pm_disable_interrupts(dev_priv);
1528         intel_hpd_cancel_work(dev_priv);
1529
1530         intel_suspend_encoders(dev_priv);
1531
1532         intel_suspend_hw(dev_priv);
1533
1534         i915_gem_suspend_gtt_mappings(dev_priv);
1535
1536         i915_save_state(dev_priv);
1537
1538         opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1539         intel_opregion_notify_adapter(dev_priv, opregion_target_state);
1540
1541         intel_uncore_suspend(dev_priv);
1542         intel_opregion_unregister(dev_priv);
1543
1544         intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1545
1546         dev_priv->suspend_count++;
1547
1548         intel_csr_ucode_suspend(dev_priv);
1549
1550 out:
1551         enable_rpm_wakeref_asserts(dev_priv);
1552
1553         return error;
1554 }
1555
1556 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1557 {
1558         struct drm_i915_private *dev_priv = to_i915(dev);
1559         struct pci_dev *pdev = dev_priv->drm.pdev;
1560         bool fw_csr;
1561         int ret;
1562
1563         disable_rpm_wakeref_asserts(dev_priv);
1564
1565         intel_display_set_init_power(dev_priv, false);
1566
1567         fw_csr = !IS_GEN9_LP(dev_priv) &&
1568                 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
1569         /*
1570          * In case of firmware assisted context save/restore don't manually
1571          * deinit the power domains. This also means the CSR/DMC firmware will
1572          * stay active, it will power down any HW resources as required and
1573          * also enable deeper system power states that would be blocked if the
1574          * firmware was inactive.
1575          */
1576         if (!fw_csr)
1577                 intel_power_domains_suspend(dev_priv);
1578
1579         ret = 0;
1580         if (IS_GEN9_LP(dev_priv))
1581                 bxt_enable_dc9(dev_priv);
1582         else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1583                 hsw_enable_pc8(dev_priv);
1584         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1585                 ret = vlv_suspend_complete(dev_priv);
1586
1587         if (ret) {
1588                 DRM_ERROR("Suspend complete failed: %d\n", ret);
1589                 if (!fw_csr)
1590                         intel_power_domains_init_hw(dev_priv, true);
1591
1592                 goto out;
1593         }
1594
1595         pci_disable_device(pdev);
1596         /*
1597          * During hibernation on some platforms the BIOS may try to access
1598          * the device even though it's already in D3 and hang the machine. So
1599          * leave the device in D0 on those platforms and hope the BIOS will
1600          * power down the device properly. The issue was seen on multiple old
1601          * GENs with different BIOS vendors, so having an explicit blacklist
1602          * is inpractical; apply the workaround on everything pre GEN6. The
1603          * platforms where the issue was seen:
1604          * Lenovo Thinkpad X301, X61s, X60, T60, X41
1605          * Fujitsu FSC S7110
1606          * Acer Aspire 1830T
1607          */
1608         if (!(hibernation && INTEL_GEN(dev_priv) < 6))
1609                 pci_set_power_state(pdev, PCI_D3hot);
1610
1611         dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
1612
1613 out:
1614         enable_rpm_wakeref_asserts(dev_priv);
1615
1616         return ret;
1617 }
1618
1619 static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
1620 {
1621         int error;
1622
1623         if (!dev) {
1624                 DRM_ERROR("dev: %p\n", dev);
1625                 DRM_ERROR("DRM not initialized, aborting suspend.\n");
1626                 return -ENODEV;
1627         }
1628
1629         if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
1630                          state.event != PM_EVENT_FREEZE))
1631                 return -EINVAL;
1632
1633         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1634                 return 0;
1635
1636         error = i915_drm_suspend(dev);
1637         if (error)
1638                 return error;
1639
1640         return i915_drm_suspend_late(dev, false);
1641 }
1642
1643 static int i915_drm_resume(struct drm_device *dev)
1644 {
1645         struct drm_i915_private *dev_priv = to_i915(dev);
1646         int ret;
1647
1648         disable_rpm_wakeref_asserts(dev_priv);
1649         intel_sanitize_gt_powersave(dev_priv);
1650
1651         ret = i915_ggtt_enable_hw(dev_priv);
1652         if (ret)
1653                 DRM_ERROR("failed to re-enable GGTT\n");
1654
1655         intel_csr_ucode_resume(dev_priv);
1656
1657         i915_gem_resume(dev_priv);
1658
1659         i915_restore_state(dev_priv);
1660         intel_pps_unlock_regs_wa(dev_priv);
1661         intel_opregion_setup(dev_priv);
1662
1663         intel_init_pch_refclk(dev_priv);
1664
1665         /*
1666          * Interrupts have to be enabled before any batches are run. If not the
1667          * GPU will hang. i915_gem_init_hw() will initiate batches to
1668          * update/restore the context.
1669          *
1670          * drm_mode_config_reset() needs AUX interrupts.
1671          *
1672          * Modeset enabling in intel_modeset_init_hw() also needs working
1673          * interrupts.
1674          */
1675         intel_runtime_pm_enable_interrupts(dev_priv);
1676
1677         drm_mode_config_reset(dev);
1678
1679         mutex_lock(&dev->struct_mutex);
1680         if (i915_gem_init_hw(dev_priv)) {
1681                 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
1682                 i915_gem_set_wedged(dev_priv);
1683         }
1684         mutex_unlock(&dev->struct_mutex);
1685
1686         intel_guc_resume(dev_priv);
1687
1688         intel_modeset_init_hw(dev);
1689
1690         spin_lock_irq(&dev_priv->irq_lock);
1691         if (dev_priv->display.hpd_irq_setup)
1692                 dev_priv->display.hpd_irq_setup(dev_priv);
1693         spin_unlock_irq(&dev_priv->irq_lock);
1694
1695         intel_dp_mst_resume(dev);
1696
1697         intel_display_resume(dev);
1698
1699         drm_kms_helper_poll_enable(dev);
1700
1701         /*
1702          * ... but also need to make sure that hotplug processing
1703          * doesn't cause havoc. Like in the driver load code we don't
1704          * bother with the tiny race here where we might loose hotplug
1705          * notifications.
1706          * */
1707         intel_hpd_init(dev_priv);
1708
1709         intel_opregion_register(dev_priv);
1710
1711         intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1712
1713         mutex_lock(&dev_priv->modeset_restore_lock);
1714         dev_priv->modeset_restore = MODESET_DONE;
1715         mutex_unlock(&dev_priv->modeset_restore_lock);
1716
1717         intel_opregion_notify_adapter(dev_priv, PCI_D0);
1718
1719         intel_autoenable_gt_powersave(dev_priv);
1720
1721         enable_rpm_wakeref_asserts(dev_priv);
1722
1723         return 0;
1724 }
1725
1726 static int i915_drm_resume_early(struct drm_device *dev)
1727 {
1728         struct drm_i915_private *dev_priv = to_i915(dev);
1729         struct pci_dev *pdev = dev_priv->drm.pdev;
1730         int ret;
1731
1732         /*
1733          * We have a resume ordering issue with the snd-hda driver also
1734          * requiring our device to be power up. Due to the lack of a
1735          * parent/child relationship we currently solve this with an early
1736          * resume hook.
1737          *
1738          * FIXME: This should be solved with a special hdmi sink device or
1739          * similar so that power domains can be employed.
1740          */
1741
1742         /*
1743          * Note that we need to set the power state explicitly, since we
1744          * powered off the device during freeze and the PCI core won't power
1745          * it back up for us during thaw. Powering off the device during
1746          * freeze is not a hard requirement though, and during the
1747          * suspend/resume phases the PCI core makes sure we get here with the
1748          * device powered on. So in case we change our freeze logic and keep
1749          * the device powered we can also remove the following set power state
1750          * call.
1751          */
1752         ret = pci_set_power_state(pdev, PCI_D0);
1753         if (ret) {
1754                 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
1755                 goto out;
1756         }
1757
1758         /*
1759          * Note that pci_enable_device() first enables any parent bridge
1760          * device and only then sets the power state for this device. The
1761          * bridge enabling is a nop though, since bridge devices are resumed
1762          * first. The order of enabling power and enabling the device is
1763          * imposed by the PCI core as described above, so here we preserve the
1764          * same order for the freeze/thaw phases.
1765          *
1766          * TODO: eventually we should remove pci_disable_device() /
1767          * pci_enable_enable_device() from suspend/resume. Due to how they
1768          * depend on the device enable refcount we can't anyway depend on them
1769          * disabling/enabling the device.
1770          */
1771         if (pci_enable_device(pdev)) {
1772                 ret = -EIO;
1773                 goto out;
1774         }
1775
1776         pci_set_master(pdev);
1777
1778         disable_rpm_wakeref_asserts(dev_priv);
1779
1780         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1781                 ret = vlv_resume_prepare(dev_priv, false);
1782         if (ret)
1783                 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
1784                           ret);
1785
1786         intel_uncore_resume_early(dev_priv);
1787
1788         if (IS_GEN9_LP(dev_priv)) {
1789                 if (!dev_priv->suspended_to_idle)
1790                         gen9_sanitize_dc_state(dev_priv);
1791                 bxt_disable_dc9(dev_priv);
1792         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1793                 hsw_disable_pc8(dev_priv);
1794         }
1795
1796         intel_uncore_sanitize(dev_priv);
1797
1798         if (IS_GEN9_LP(dev_priv) ||
1799             !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
1800                 intel_power_domains_init_hw(dev_priv, true);
1801
1802         i915_gem_sanitize(dev_priv);
1803
1804         enable_rpm_wakeref_asserts(dev_priv);
1805
1806 out:
1807         dev_priv->suspended_to_idle = false;
1808
1809         return ret;
1810 }
1811
1812 static int i915_resume_switcheroo(struct drm_device *dev)
1813 {
1814         int ret;
1815
1816         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1817                 return 0;
1818
1819         ret = i915_drm_resume_early(dev);
1820         if (ret)
1821                 return ret;
1822
1823         return i915_drm_resume(dev);
1824 }
1825
1826 /**
1827  * i915_reset - reset chip after a hang
1828  * @dev_priv: device private to reset
1829  *
1830  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
1831  * on failure.
1832  *
1833  * Caller must hold the struct_mutex.
1834  *
1835  * Procedure is fairly simple:
1836  *   - reset the chip using the reset reg
1837  *   - re-init context state
1838  *   - re-init hardware status page
1839  *   - re-init ring buffer
1840  *   - re-init interrupt state
1841  *   - re-init display
1842  */
1843 void i915_reset(struct drm_i915_private *dev_priv)
1844 {
1845         struct i915_gpu_error *error = &dev_priv->gpu_error;
1846         int ret;
1847
1848         lockdep_assert_held(&dev_priv->drm.struct_mutex);
1849         GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
1850
1851         if (!test_bit(I915_RESET_HANDOFF, &error->flags))
1852                 return;
1853
1854         /* Clear any previous failed attempts at recovery. Time to try again. */
1855         if (!i915_gem_unset_wedged(dev_priv))
1856                 goto wakeup;
1857
1858         error->reset_count++;
1859
1860         pr_notice("drm/i915: Resetting chip after gpu hang\n");
1861         disable_irq(dev_priv->drm.irq);
1862         ret = i915_gem_reset_prepare(dev_priv);
1863         if (ret) {
1864                 DRM_ERROR("GPU recovery failed\n");
1865                 intel_gpu_reset(dev_priv, ALL_ENGINES);
1866                 goto error;
1867         }
1868
1869         ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
1870         if (ret) {
1871                 if (ret != -ENODEV)
1872                         DRM_ERROR("Failed to reset chip: %i\n", ret);
1873                 else
1874                         DRM_DEBUG_DRIVER("GPU reset disabled\n");
1875                 goto error;
1876         }
1877
1878         i915_gem_reset(dev_priv);
1879         intel_overlay_reset(dev_priv);
1880
1881         /* Ok, now get things going again... */
1882
1883         /*
1884          * Everything depends on having the GTT running, so we need to start
1885          * there.  Fortunately we don't need to do this unless we reset the
1886          * chip at a PCI level.
1887          *
1888          * Next we need to restore the context, but we don't use those
1889          * yet either...
1890          *
1891          * Ring buffer needs to be re-initialized in the KMS case, or if X
1892          * was running at the time of the reset (i.e. we weren't VT
1893          * switched away).
1894          */
1895         ret = i915_gem_init_hw(dev_priv);
1896         if (ret) {
1897                 DRM_ERROR("Failed hw init on reset %d\n", ret);
1898                 goto error;
1899         }
1900
1901         i915_queue_hangcheck(dev_priv);
1902
1903 finish:
1904         i915_gem_reset_finish(dev_priv);
1905         enable_irq(dev_priv->drm.irq);
1906
1907 wakeup:
1908         clear_bit(I915_RESET_HANDOFF, &error->flags);
1909         wake_up_bit(&error->flags, I915_RESET_HANDOFF);
1910         return;
1911
1912 error:
1913         i915_gem_set_wedged(dev_priv);
1914         goto finish;
1915 }
1916
1917 static int i915_pm_suspend(struct device *kdev)
1918 {
1919         struct pci_dev *pdev = to_pci_dev(kdev);
1920         struct drm_device *dev = pci_get_drvdata(pdev);
1921
1922         if (!dev) {
1923                 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1924                 return -ENODEV;
1925         }
1926
1927         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1928                 return 0;
1929
1930         return i915_drm_suspend(dev);
1931 }
1932
1933 static int i915_pm_suspend_late(struct device *kdev)
1934 {
1935         struct drm_device *dev = &kdev_to_i915(kdev)->drm;
1936
1937         /*
1938          * We have a suspend ordering issue with the snd-hda driver also
1939          * requiring our device to be power up. Due to the lack of a
1940          * parent/child relationship we currently solve this with an late
1941          * suspend hook.
1942          *
1943          * FIXME: This should be solved with a special hdmi sink device or
1944          * similar so that power domains can be employed.
1945          */
1946         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1947                 return 0;
1948
1949         return i915_drm_suspend_late(dev, false);
1950 }
1951
1952 static int i915_pm_poweroff_late(struct device *kdev)
1953 {
1954         struct drm_device *dev = &kdev_to_i915(kdev)->drm;
1955
1956         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1957                 return 0;
1958
1959         return i915_drm_suspend_late(dev, true);
1960 }
1961
1962 static int i915_pm_resume_early(struct device *kdev)
1963 {
1964         struct drm_device *dev = &kdev_to_i915(kdev)->drm;
1965
1966         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1967                 return 0;
1968
1969         return i915_drm_resume_early(dev);
1970 }
1971
1972 static int i915_pm_resume(struct device *kdev)
1973 {
1974         struct drm_device *dev = &kdev_to_i915(kdev)->drm;
1975
1976         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1977                 return 0;
1978
1979         return i915_drm_resume(dev);
1980 }
1981
1982 /* freeze: before creating the hibernation_image */
1983 static int i915_pm_freeze(struct device *kdev)
1984 {
1985         int ret;
1986
1987         ret = i915_pm_suspend(kdev);
1988         if (ret)
1989                 return ret;
1990
1991         ret = i915_gem_freeze(kdev_to_i915(kdev));
1992         if (ret)
1993                 return ret;
1994
1995         return 0;
1996 }
1997
1998 static int i915_pm_freeze_late(struct device *kdev)
1999 {
2000         int ret;
2001
2002         ret = i915_pm_suspend_late(kdev);
2003         if (ret)
2004                 return ret;
2005
2006         ret = i915_gem_freeze_late(kdev_to_i915(kdev));
2007         if (ret)
2008                 return ret;
2009
2010         return 0;
2011 }
2012
2013 /* thaw: called after creating the hibernation image, but before turning off. */
2014 static int i915_pm_thaw_early(struct device *kdev)
2015 {
2016         return i915_pm_resume_early(kdev);
2017 }
2018
2019 static int i915_pm_thaw(struct device *kdev)
2020 {
2021         return i915_pm_resume(kdev);
2022 }
2023
2024 /* restore: called after loading the hibernation image. */
2025 static int i915_pm_restore_early(struct device *kdev)
2026 {
2027         return i915_pm_resume_early(kdev);
2028 }
2029
2030 static int i915_pm_restore(struct device *kdev)
2031 {
2032         return i915_pm_resume(kdev);
2033 }
2034
2035 /*
2036  * Save all Gunit registers that may be lost after a D3 and a subsequent
2037  * S0i[R123] transition. The list of registers needing a save/restore is
2038  * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
2039  * registers in the following way:
2040  * - Driver: saved/restored by the driver
2041  * - Punit : saved/restored by the Punit firmware
2042  * - No, w/o marking: no need to save/restore, since the register is R/O or
2043  *                    used internally by the HW in a way that doesn't depend
2044  *                    keeping the content across a suspend/resume.
2045  * - Debug : used for debugging
2046  *
2047  * We save/restore all registers marked with 'Driver', with the following
2048  * exceptions:
2049  * - Registers out of use, including also registers marked with 'Debug'.
2050  *   These have no effect on the driver's operation, so we don't save/restore
2051  *   them to reduce the overhead.
2052  * - Registers that are fully setup by an initialization function called from
2053  *   the resume path. For example many clock gating and RPS/RC6 registers.
2054  * - Registers that provide the right functionality with their reset defaults.
2055  *
2056  * TODO: Except for registers that based on the above 3 criteria can be safely
2057  * ignored, we save/restore all others, practically treating the HW context as
2058  * a black-box for the driver. Further investigation is needed to reduce the
2059  * saved/restored registers even further, by following the same 3 criteria.
2060  */
2061 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2062 {
2063         struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2064         int i;
2065
2066         /* GAM 0x4000-0x4770 */
2067         s->wr_watermark         = I915_READ(GEN7_WR_WATERMARK);
2068         s->gfx_prio_ctrl        = I915_READ(GEN7_GFX_PRIO_CTRL);
2069         s->arb_mode             = I915_READ(ARB_MODE);
2070         s->gfx_pend_tlb0        = I915_READ(GEN7_GFX_PEND_TLB0);
2071         s->gfx_pend_tlb1        = I915_READ(GEN7_GFX_PEND_TLB1);
2072
2073         for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2074                 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
2075
2076         s->media_max_req_count  = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
2077         s->gfx_max_req_count    = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
2078
2079         s->render_hwsp          = I915_READ(RENDER_HWS_PGA_GEN7);
2080         s->ecochk               = I915_READ(GAM_ECOCHK);
2081         s->bsd_hwsp             = I915_READ(BSD_HWS_PGA_GEN7);
2082         s->blt_hwsp             = I915_READ(BLT_HWS_PGA_GEN7);
2083
2084         s->tlb_rd_addr          = I915_READ(GEN7_TLB_RD_ADDR);
2085
2086         /* MBC 0x9024-0x91D0, 0x8500 */
2087         s->g3dctl               = I915_READ(VLV_G3DCTL);
2088         s->gsckgctl             = I915_READ(VLV_GSCKGCTL);
2089         s->mbctl                = I915_READ(GEN6_MBCTL);
2090
2091         /* GCP 0x9400-0x9424, 0x8100-0x810C */
2092         s->ucgctl1              = I915_READ(GEN6_UCGCTL1);
2093         s->ucgctl3              = I915_READ(GEN6_UCGCTL3);
2094         s->rcgctl1              = I915_READ(GEN6_RCGCTL1);
2095         s->rcgctl2              = I915_READ(GEN6_RCGCTL2);
2096         s->rstctl               = I915_READ(GEN6_RSTCTL);
2097         s->misccpctl            = I915_READ(GEN7_MISCCPCTL);
2098
2099         /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2100         s->gfxpause             = I915_READ(GEN6_GFXPAUSE);
2101         s->rpdeuhwtc            = I915_READ(GEN6_RPDEUHWTC);
2102         s->rpdeuc               = I915_READ(GEN6_RPDEUC);
2103         s->ecobus               = I915_READ(ECOBUS);
2104         s->pwrdwnupctl          = I915_READ(VLV_PWRDWNUPCTL);
2105         s->rp_down_timeout      = I915_READ(GEN6_RP_DOWN_TIMEOUT);
2106         s->rp_deucsw            = I915_READ(GEN6_RPDEUCSW);
2107         s->rcubmabdtmr          = I915_READ(GEN6_RCUBMABDTMR);
2108         s->rcedata              = I915_READ(VLV_RCEDATA);
2109         s->spare2gh             = I915_READ(VLV_SPAREG2H);
2110
2111         /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2112         s->gt_imr               = I915_READ(GTIMR);
2113         s->gt_ier               = I915_READ(GTIER);
2114         s->pm_imr               = I915_READ(GEN6_PMIMR);
2115         s->pm_ier               = I915_READ(GEN6_PMIER);
2116
2117         for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2118                 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
2119
2120         /* GT SA CZ domain, 0x100000-0x138124 */
2121         s->tilectl              = I915_READ(TILECTL);
2122         s->gt_fifoctl           = I915_READ(GTFIFOCTL);
2123         s->gtlc_wake_ctrl       = I915_READ(VLV_GTLC_WAKE_CTRL);
2124         s->gtlc_survive         = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2125         s->pmwgicz              = I915_READ(VLV_PMWGICZ);
2126
2127         /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2128         s->gu_ctl0              = I915_READ(VLV_GU_CTL0);
2129         s->gu_ctl1              = I915_READ(VLV_GU_CTL1);
2130         s->pcbr                 = I915_READ(VLV_PCBR);
2131         s->clock_gate_dis2      = I915_READ(VLV_GUNIT_CLOCK_GATE2);
2132
2133         /*
2134          * Not saving any of:
2135          * DFT,         0x9800-0x9EC0
2136          * SARB,        0xB000-0xB1FC
2137          * GAC,         0x5208-0x524C, 0x14000-0x14C000
2138          * PCI CFG
2139          */
2140 }
2141
2142 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2143 {
2144         struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2145         u32 val;
2146         int i;
2147
2148         /* GAM 0x4000-0x4770 */
2149         I915_WRITE(GEN7_WR_WATERMARK,   s->wr_watermark);
2150         I915_WRITE(GEN7_GFX_PRIO_CTRL,  s->gfx_prio_ctrl);
2151         I915_WRITE(ARB_MODE,            s->arb_mode | (0xffff << 16));
2152         I915_WRITE(GEN7_GFX_PEND_TLB0,  s->gfx_pend_tlb0);
2153         I915_WRITE(GEN7_GFX_PEND_TLB1,  s->gfx_pend_tlb1);
2154
2155         for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2156                 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
2157
2158         I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
2159         I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
2160
2161         I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
2162         I915_WRITE(GAM_ECOCHK,          s->ecochk);
2163         I915_WRITE(BSD_HWS_PGA_GEN7,    s->bsd_hwsp);
2164         I915_WRITE(BLT_HWS_PGA_GEN7,    s->blt_hwsp);
2165
2166         I915_WRITE(GEN7_TLB_RD_ADDR,    s->tlb_rd_addr);
2167
2168         /* MBC 0x9024-0x91D0, 0x8500 */
2169         I915_WRITE(VLV_G3DCTL,          s->g3dctl);
2170         I915_WRITE(VLV_GSCKGCTL,        s->gsckgctl);
2171         I915_WRITE(GEN6_MBCTL,          s->mbctl);
2172
2173         /* GCP 0x9400-0x9424, 0x8100-0x810C */
2174         I915_WRITE(GEN6_UCGCTL1,        s->ucgctl1);
2175         I915_WRITE(GEN6_UCGCTL3,        s->ucgctl3);
2176         I915_WRITE(GEN6_RCGCTL1,        s->rcgctl1);
2177         I915_WRITE(GEN6_RCGCTL2,        s->rcgctl2);
2178         I915_WRITE(GEN6_RSTCTL,         s->rstctl);
2179         I915_WRITE(GEN7_MISCCPCTL,      s->misccpctl);
2180
2181         /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2182         I915_WRITE(GEN6_GFXPAUSE,       s->gfxpause);
2183         I915_WRITE(GEN6_RPDEUHWTC,      s->rpdeuhwtc);
2184         I915_WRITE(GEN6_RPDEUC,         s->rpdeuc);
2185         I915_WRITE(ECOBUS,              s->ecobus);
2186         I915_WRITE(VLV_PWRDWNUPCTL,     s->pwrdwnupctl);
2187         I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
2188         I915_WRITE(GEN6_RPDEUCSW,       s->rp_deucsw);
2189         I915_WRITE(GEN6_RCUBMABDTMR,    s->rcubmabdtmr);
2190         I915_WRITE(VLV_RCEDATA,         s->rcedata);
2191         I915_WRITE(VLV_SPAREG2H,        s->spare2gh);
2192
2193         /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2194         I915_WRITE(GTIMR,               s->gt_imr);
2195         I915_WRITE(GTIER,               s->gt_ier);
2196         I915_WRITE(GEN6_PMIMR,          s->pm_imr);
2197         I915_WRITE(GEN6_PMIER,          s->pm_ier);
2198
2199         for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2200                 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
2201
2202         /* GT SA CZ domain, 0x100000-0x138124 */
2203         I915_WRITE(TILECTL,                     s->tilectl);
2204         I915_WRITE(GTFIFOCTL,                   s->gt_fifoctl);
2205         /*
2206          * Preserve the GT allow wake and GFX force clock bit, they are not
2207          * be restored, as they are used to control the s0ix suspend/resume
2208          * sequence by the caller.
2209          */
2210         val = I915_READ(VLV_GTLC_WAKE_CTRL);
2211         val &= VLV_GTLC_ALLOWWAKEREQ;
2212         val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
2213         I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2214
2215         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2216         val &= VLV_GFX_CLK_FORCE_ON_BIT;
2217         val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
2218         I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2219
2220         I915_WRITE(VLV_PMWGICZ,                 s->pmwgicz);
2221
2222         /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2223         I915_WRITE(VLV_GU_CTL0,                 s->gu_ctl0);
2224         I915_WRITE(VLV_GU_CTL1,                 s->gu_ctl1);
2225         I915_WRITE(VLV_PCBR,                    s->pcbr);
2226         I915_WRITE(VLV_GUNIT_CLOCK_GATE2,       s->clock_gate_dis2);
2227 }
2228
2229 static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
2230                                   u32 mask, u32 val)
2231 {
2232         /* The HW does not like us polling for PW_STATUS frequently, so
2233          * use the sleeping loop rather than risk the busy spin within
2234          * intel_wait_for_register().
2235          *
2236          * Transitioning between RC6 states should be at most 2ms (see
2237          * valleyview_enable_rps) so use a 3ms timeout.
2238          */
2239         return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
2240                         3);
2241 }
2242
2243 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2244 {
2245         u32 val;
2246         int err;
2247
2248         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2249         val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
2250         if (force_on)
2251                 val |= VLV_GFX_CLK_FORCE_ON_BIT;
2252         I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2253
2254         if (!force_on)
2255                 return 0;
2256
2257         err = intel_wait_for_register(dev_priv,
2258                                       VLV_GTLC_SURVIVABILITY_REG,
2259                                       VLV_GFX_CLK_STATUS_BIT,
2260                                       VLV_GFX_CLK_STATUS_BIT,
2261                                       20);
2262         if (err)
2263                 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
2264                           I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2265
2266         return err;
2267 }
2268
2269 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2270 {
2271         u32 mask;
2272         u32 val;
2273         int err;
2274
2275         val = I915_READ(VLV_GTLC_WAKE_CTRL);
2276         val &= ~VLV_GTLC_ALLOWWAKEREQ;
2277         if (allow)
2278                 val |= VLV_GTLC_ALLOWWAKEREQ;
2279         I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2280         POSTING_READ(VLV_GTLC_WAKE_CTRL);
2281
2282         mask = VLV_GTLC_ALLOWWAKEACK;
2283         val = allow ? mask : 0;
2284
2285         err = vlv_wait_for_pw_status(dev_priv, mask, val);
2286         if (err)
2287                 DRM_ERROR("timeout disabling GT waking\n");
2288
2289         return err;
2290 }
2291
2292 static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2293                                   bool wait_for_on)
2294 {
2295         u32 mask;
2296         u32 val;
2297
2298         mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2299         val = wait_for_on ? mask : 0;
2300
2301         /*
2302          * RC6 transitioning can be delayed up to 2 msec (see
2303          * valleyview_enable_rps), use 3 msec for safety.
2304          */
2305         if (vlv_wait_for_pw_status(dev_priv, mask, val))
2306                 DRM_ERROR("timeout waiting for GT wells to go %s\n",
2307                           onoff(wait_for_on));
2308 }
2309
2310 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
2311 {
2312         if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
2313                 return;
2314
2315         DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
2316         I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
2317 }
2318
2319 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2320 {
2321         u32 mask;
2322         int err;
2323
2324         /*
2325          * Bspec defines the following GT well on flags as debug only, so
2326          * don't treat them as hard failures.
2327          */
2328         vlv_wait_for_gt_wells(dev_priv, false);
2329
2330         mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2331         WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2332
2333         vlv_check_no_gt_access(dev_priv);
2334
2335         err = vlv_force_gfx_clock(dev_priv, true);
2336         if (err)
2337                 goto err1;
2338
2339         err = vlv_allow_gt_wake(dev_priv, false);
2340         if (err)
2341                 goto err2;
2342
2343         if (!IS_CHERRYVIEW(dev_priv))
2344                 vlv_save_gunit_s0ix_state(dev_priv);
2345
2346         err = vlv_force_gfx_clock(dev_priv, false);
2347         if (err)
2348                 goto err2;
2349
2350         return 0;
2351
2352 err2:
2353         /* For safety always re-enable waking and disable gfx clock forcing */
2354         vlv_allow_gt_wake(dev_priv, true);
2355 err1:
2356         vlv_force_gfx_clock(dev_priv, false);
2357
2358         return err;
2359 }
2360
2361 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2362                                 bool rpm_resume)
2363 {
2364         int err;
2365         int ret;
2366
2367         /*
2368          * If any of the steps fail just try to continue, that's the best we
2369          * can do at this point. Return the first error code (which will also
2370          * leave RPM permanently disabled).
2371          */
2372         ret = vlv_force_gfx_clock(dev_priv, true);
2373
2374         if (!IS_CHERRYVIEW(dev_priv))
2375                 vlv_restore_gunit_s0ix_state(dev_priv);
2376
2377         err = vlv_allow_gt_wake(dev_priv, true);
2378         if (!ret)
2379                 ret = err;
2380
2381         err = vlv_force_gfx_clock(dev_priv, false);
2382         if (!ret)
2383                 ret = err;
2384
2385         vlv_check_no_gt_access(dev_priv);
2386
2387         if (rpm_resume)
2388                 intel_init_clock_gating(dev_priv);
2389
2390         return ret;
2391 }
2392
2393 static int intel_runtime_suspend(struct device *kdev)
2394 {
2395         struct pci_dev *pdev = to_pci_dev(kdev);
2396         struct drm_device *dev = pci_get_drvdata(pdev);
2397         struct drm_i915_private *dev_priv = to_i915(dev);
2398         int ret;
2399
2400         if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
2401                 return -ENODEV;
2402
2403         if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2404                 return -ENODEV;
2405
2406         DRM_DEBUG_KMS("Suspending device\n");
2407
2408         disable_rpm_wakeref_asserts(dev_priv);
2409
2410         /*
2411          * We are safe here against re-faults, since the fault handler takes
2412          * an RPM reference.
2413          */
2414         i915_gem_runtime_suspend(dev_priv);
2415
2416         intel_guc_suspend(dev_priv);
2417
2418         intel_runtime_pm_disable_interrupts(dev_priv);
2419
2420         ret = 0;
2421         if (IS_GEN9_LP(dev_priv)) {
2422                 bxt_display_core_uninit(dev_priv);
2423                 bxt_enable_dc9(dev_priv);
2424         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2425                 hsw_enable_pc8(dev_priv);
2426         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2427                 ret = vlv_suspend_complete(dev_priv);
2428         }
2429
2430         if (ret) {
2431                 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
2432                 intel_runtime_pm_enable_interrupts(dev_priv);
2433
2434                 enable_rpm_wakeref_asserts(dev_priv);
2435
2436                 return ret;
2437         }
2438
2439         intel_uncore_suspend(dev_priv);
2440
2441         enable_rpm_wakeref_asserts(dev_priv);
2442         WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
2443
2444         if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
2445                 DRM_ERROR("Unclaimed access detected prior to suspending\n");
2446
2447         dev_priv->pm.suspended = true;
2448
2449         /*
2450          * FIXME: We really should find a document that references the arguments
2451          * used below!
2452          */
2453         if (IS_BROADWELL(dev_priv)) {
2454                 /*
2455                  * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
2456                  * being detected, and the call we do at intel_runtime_resume()
2457                  * won't be able to restore them. Since PCI_D3hot matches the
2458                  * actual specification and appears to be working, use it.
2459                  */
2460                 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
2461         } else {
2462                 /*
2463                  * current versions of firmware which depend on this opregion
2464                  * notification have repurposed the D1 definition to mean
2465                  * "runtime suspended" vs. what you would normally expect (D3)
2466                  * to distinguish it from notifications that might be sent via
2467                  * the suspend path.
2468                  */
2469                 intel_opregion_notify_adapter(dev_priv, PCI_D1);
2470         }
2471
2472         assert_forcewakes_inactive(dev_priv);
2473
2474         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2475                 intel_hpd_poll_init(dev_priv);
2476
2477         DRM_DEBUG_KMS("Device suspended\n");
2478         return 0;
2479 }
2480
2481 static int intel_runtime_resume(struct device *kdev)
2482 {
2483         struct pci_dev *pdev = to_pci_dev(kdev);
2484         struct drm_device *dev = pci_get_drvdata(pdev);
2485         struct drm_i915_private *dev_priv = to_i915(dev);
2486         int ret = 0;
2487
2488         if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2489                 return -ENODEV;
2490
2491         DRM_DEBUG_KMS("Resuming device\n");
2492
2493         WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
2494         disable_rpm_wakeref_asserts(dev_priv);
2495
2496         intel_opregion_notify_adapter(dev_priv, PCI_D0);
2497         dev_priv->pm.suspended = false;
2498         if (intel_uncore_unclaimed_mmio(dev_priv))
2499                 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
2500
2501         intel_guc_resume(dev_priv);
2502
2503         if (IS_GEN9_LP(dev_priv)) {
2504                 bxt_disable_dc9(dev_priv);
2505                 bxt_display_core_init(dev_priv, true);
2506                 if (dev_priv->csr.dmc_payload &&
2507                     (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2508                         gen9_enable_dc5(dev_priv);
2509         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2510                 hsw_disable_pc8(dev_priv);
2511         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2512                 ret = vlv_resume_prepare(dev_priv, true);
2513         }
2514
2515         /*
2516          * No point of rolling back things in case of an error, as the best
2517          * we can do is to hope that things will still work (and disable RPM).
2518          */
2519         i915_gem_init_swizzling(dev_priv);
2520         i915_gem_restore_fences(dev_priv);
2521
2522         intel_runtime_pm_enable_interrupts(dev_priv);
2523
2524         /*
2525          * On VLV/CHV display interrupts are part of the display
2526          * power well, so hpd is reinitialized from there. For
2527          * everyone else do it here.
2528          */
2529         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2530                 intel_hpd_init(dev_priv);
2531
2532         enable_rpm_wakeref_asserts(dev_priv);
2533
2534         if (ret)
2535                 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
2536         else
2537                 DRM_DEBUG_KMS("Device resumed\n");
2538
2539         return ret;
2540 }
2541
2542 const struct dev_pm_ops i915_pm_ops = {
2543         /*
2544          * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
2545          * PMSG_RESUME]
2546          */
2547         .suspend = i915_pm_suspend,
2548         .suspend_late = i915_pm_suspend_late,
2549         .resume_early = i915_pm_resume_early,
2550         .resume = i915_pm_resume,
2551
2552         /*
2553          * S4 event handlers
2554          * @freeze, @freeze_late    : called (1) before creating the
2555          *                            hibernation image [PMSG_FREEZE] and
2556          *                            (2) after rebooting, before restoring
2557          *                            the image [PMSG_QUIESCE]
2558          * @thaw, @thaw_early       : called (1) after creating the hibernation
2559          *                            image, before writing it [PMSG_THAW]
2560          *                            and (2) after failing to create or
2561          *                            restore the image [PMSG_RECOVER]
2562          * @poweroff, @poweroff_late: called after writing the hibernation
2563          *                            image, before rebooting [PMSG_HIBERNATE]
2564          * @restore, @restore_early : called after rebooting and restoring the
2565          *                            hibernation image [PMSG_RESTORE]
2566          */
2567         .freeze = i915_pm_freeze,
2568         .freeze_late = i915_pm_freeze_late,
2569         .thaw_early = i915_pm_thaw_early,
2570         .thaw = i915_pm_thaw,
2571         .poweroff = i915_pm_suspend,
2572         .poweroff_late = i915_pm_poweroff_late,
2573         .restore_early = i915_pm_restore_early,
2574         .restore = i915_pm_restore,
2575
2576         /* S0ix (via runtime suspend) event handlers */
2577         .runtime_suspend = intel_runtime_suspend,
2578         .runtime_resume = intel_runtime_resume,
2579 };
2580
2581 static const struct vm_operations_struct i915_gem_vm_ops = {
2582         .fault = i915_gem_fault,
2583         .open = drm_gem_vm_open,
2584         .close = drm_gem_vm_close,
2585 };
2586
2587 static const struct file_operations i915_driver_fops = {
2588         .owner = THIS_MODULE,
2589         .open = drm_open,
2590         .release = drm_release,
2591         .unlocked_ioctl = drm_ioctl,
2592         .mmap = drm_gem_mmap,
2593         .poll = drm_poll,
2594         .read = drm_read,
2595         .compat_ioctl = i915_compat_ioctl,
2596         .llseek = noop_llseek,
2597 };
2598
2599 static int
2600 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
2601                           struct drm_file *file)
2602 {
2603         return -ENODEV;
2604 }
2605
2606 static const struct drm_ioctl_desc i915_ioctls[] = {
2607         DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2608         DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
2609         DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
2610         DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2611         DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2612         DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2613         DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
2614         DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2615         DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2616         DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2617         DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2618         DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2619         DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2620         DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2621         DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
2622         DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2623         DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2624         DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2625         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
2626         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
2627         DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2628         DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2629         DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2630         DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
2631         DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2632         DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2633         DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2634         DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2635         DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
2636         DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
2637         DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
2638         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
2639         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
2640         DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
2641         DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
2642         DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
2643         DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
2644         DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2645         DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
2646         DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
2647         DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2648         DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2649         DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
2650         DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
2651         DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2652         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2653         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
2654         DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
2655         DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
2656         DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
2657         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
2658         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
2659         DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
2660 };
2661
2662 static struct drm_driver driver = {
2663         /* Don't use MTRRs here; the Xserver or userspace app should
2664          * deal with them for Intel hardware.
2665          */
2666         .driver_features =
2667             DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
2668             DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC,
2669         .release = i915_driver_release,
2670         .open = i915_driver_open,
2671         .lastclose = i915_driver_lastclose,
2672         .postclose = i915_driver_postclose,
2673         .set_busid = drm_pci_set_busid,
2674
2675         .gem_close_object = i915_gem_close_object,
2676         .gem_free_object_unlocked = i915_gem_free_object,
2677         .gem_vm_ops = &i915_gem_vm_ops,
2678
2679         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
2680         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
2681         .gem_prime_export = i915_gem_prime_export,
2682         .gem_prime_import = i915_gem_prime_import,
2683
2684         .dumb_create = i915_gem_dumb_create,
2685         .dumb_map_offset = i915_gem_mmap_gtt,
2686         .dumb_destroy = drm_gem_dumb_destroy,
2687         .ioctls = i915_ioctls,
2688         .num_ioctls = ARRAY_SIZE(i915_ioctls),
2689         .fops = &i915_driver_fops,
2690         .name = DRIVER_NAME,
2691         .desc = DRIVER_DESC,
2692         .date = DRIVER_DATE,
2693         .major = DRIVER_MAJOR,
2694         .minor = DRIVER_MINOR,
2695         .patchlevel = DRIVER_PATCHLEVEL,
2696 };
2697
2698 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2699 #include "selftests/mock_drm.c"
2700 #endif