Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / i915 / gt / intel_reset.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2018 Intel Corporation
5  */
6
7 #include <linux/sched/mm.h>
8 #include <linux/stop_machine.h>
9
10 #include "display/intel_overlay.h"
11
12 #include "gem/i915_gem_context.h"
13
14 #include "i915_drv.h"
15 #include "i915_gpu_error.h"
16 #include "i915_irq.h"
17 #include "intel_engine_pm.h"
18 #include "intel_gt_pm.h"
19 #include "intel_reset.h"
20
21 #include "intel_guc.h"
22
23 #define RESET_MAX_RETRIES 3
24
25 /* XXX How to handle concurrent GGTT updates using tiling registers? */
26 #define RESET_UNDER_STOP_MACHINE 0
27
28 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
29 {
30         intel_uncore_rmw(uncore, reg, 0, set);
31 }
32
33 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
34 {
35         intel_uncore_rmw(uncore, reg, clr, 0);
36 }
37
38 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
39 {
40         intel_uncore_rmw_fw(uncore, reg, 0, set);
41 }
42
43 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
44 {
45         intel_uncore_rmw_fw(uncore, reg, clr, 0);
46 }
47
48 static void engine_skip_context(struct i915_request *rq)
49 {
50         struct intel_engine_cs *engine = rq->engine;
51         struct i915_gem_context *hung_ctx = rq->gem_context;
52
53         lockdep_assert_held(&engine->active.lock);
54
55         if (!i915_request_is_active(rq))
56                 return;
57
58         list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
59                 if (rq->gem_context == hung_ctx)
60                         i915_request_skip(rq, -EIO);
61 }
62
63 static void client_mark_guilty(struct drm_i915_file_private *file_priv,
64                                const struct i915_gem_context *ctx)
65 {
66         unsigned int score;
67         unsigned long prev_hang;
68
69         if (i915_gem_context_is_banned(ctx))
70                 score = I915_CLIENT_SCORE_CONTEXT_BAN;
71         else
72                 score = 0;
73
74         prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
75         if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
76                 score += I915_CLIENT_SCORE_HANG_FAST;
77
78         if (score) {
79                 atomic_add(score, &file_priv->ban_score);
80
81                 DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
82                                  ctx->name, score,
83                                  atomic_read(&file_priv->ban_score));
84         }
85 }
86
87 static bool context_mark_guilty(struct i915_gem_context *ctx)
88 {
89         unsigned long prev_hang;
90         bool banned;
91         int i;
92
93         atomic_inc(&ctx->guilty_count);
94
95         /* Cool contexts are too cool to be banned! (Used for reset testing.) */
96         if (!i915_gem_context_is_bannable(ctx))
97                 return false;
98
99         /* Record the timestamp for the last N hangs */
100         prev_hang = ctx->hang_timestamp[0];
101         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
102                 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
103         ctx->hang_timestamp[i] = jiffies;
104
105         /* If we have hung N+1 times in rapid succession, we ban the context! */
106         banned = !i915_gem_context_is_recoverable(ctx);
107         if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
108                 banned = true;
109         if (banned) {
110                 DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
111                                  ctx->name, atomic_read(&ctx->guilty_count));
112                 i915_gem_context_set_banned(ctx);
113         }
114
115         if (!IS_ERR_OR_NULL(ctx->file_priv))
116                 client_mark_guilty(ctx->file_priv, ctx);
117
118         return banned;
119 }
120
121 static void context_mark_innocent(struct i915_gem_context *ctx)
122 {
123         atomic_inc(&ctx->active_count);
124 }
125
126 void i915_reset_request(struct i915_request *rq, bool guilty)
127 {
128         GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
129                   rq->engine->name,
130                   rq->fence.context,
131                   rq->fence.seqno,
132                   yesno(guilty));
133
134         lockdep_assert_held(&rq->engine->active.lock);
135         GEM_BUG_ON(i915_request_completed(rq));
136
137         if (guilty) {
138                 i915_request_skip(rq, -EIO);
139                 if (context_mark_guilty(rq->gem_context))
140                         engine_skip_context(rq);
141         } else {
142                 dma_fence_set_error(&rq->fence, -EAGAIN);
143                 context_mark_innocent(rq->gem_context);
144         }
145 }
146
147 static void gen3_stop_engine(struct intel_engine_cs *engine)
148 {
149         struct intel_uncore *uncore = engine->uncore;
150         const u32 base = engine->mmio_base;
151
152         GEM_TRACE("%s\n", engine->name);
153
154         if (intel_engine_stop_cs(engine))
155                 GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
156
157         intel_uncore_write_fw(uncore,
158                               RING_HEAD(base),
159                               intel_uncore_read_fw(uncore, RING_TAIL(base)));
160         intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
161
162         intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
163         intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
164         intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
165
166         /* The ring must be empty before it is disabled */
167         intel_uncore_write_fw(uncore, RING_CTL(base), 0);
168
169         /* Check acts as a post */
170         if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
171                 GEM_TRACE("%s: ring head [%x] not parked\n",
172                           engine->name,
173                           intel_uncore_read_fw(uncore, RING_HEAD(base)));
174 }
175
176 static void i915_stop_engines(struct drm_i915_private *i915,
177                               intel_engine_mask_t engine_mask)
178 {
179         struct intel_engine_cs *engine;
180         intel_engine_mask_t tmp;
181
182         if (INTEL_GEN(i915) < 3)
183                 return;
184
185         for_each_engine_masked(engine, i915, engine_mask, tmp)
186                 gen3_stop_engine(engine);
187 }
188
189 static bool i915_in_reset(struct pci_dev *pdev)
190 {
191         u8 gdrst;
192
193         pci_read_config_byte(pdev, I915_GDRST, &gdrst);
194         return gdrst & GRDOM_RESET_STATUS;
195 }
196
197 static int i915_do_reset(struct drm_i915_private *i915,
198                          intel_engine_mask_t engine_mask,
199                          unsigned int retry)
200 {
201         struct pci_dev *pdev = i915->drm.pdev;
202         int err;
203
204         /* Assert reset for at least 20 usec, and wait for acknowledgement. */
205         pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
206         udelay(50);
207         err = wait_for_atomic(i915_in_reset(pdev), 50);
208
209         /* Clear the reset request. */
210         pci_write_config_byte(pdev, I915_GDRST, 0);
211         udelay(50);
212         if (!err)
213                 err = wait_for_atomic(!i915_in_reset(pdev), 50);
214
215         return err;
216 }
217
218 static bool g4x_reset_complete(struct pci_dev *pdev)
219 {
220         u8 gdrst;
221
222         pci_read_config_byte(pdev, I915_GDRST, &gdrst);
223         return (gdrst & GRDOM_RESET_ENABLE) == 0;
224 }
225
226 static int g33_do_reset(struct drm_i915_private *i915,
227                         intel_engine_mask_t engine_mask,
228                         unsigned int retry)
229 {
230         struct pci_dev *pdev = i915->drm.pdev;
231
232         pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
233         return wait_for_atomic(g4x_reset_complete(pdev), 50);
234 }
235
236 static int g4x_do_reset(struct drm_i915_private *i915,
237                         intel_engine_mask_t engine_mask,
238                         unsigned int retry)
239 {
240         struct pci_dev *pdev = i915->drm.pdev;
241         struct intel_uncore *uncore = &i915->uncore;
242         int ret;
243
244         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
245         rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
246         intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
247
248         pci_write_config_byte(pdev, I915_GDRST,
249                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
250         ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
251         if (ret) {
252                 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
253                 goto out;
254         }
255
256         pci_write_config_byte(pdev, I915_GDRST,
257                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
258         ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
259         if (ret) {
260                 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
261                 goto out;
262         }
263
264 out:
265         pci_write_config_byte(pdev, I915_GDRST, 0);
266
267         rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
268         intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
269
270         return ret;
271 }
272
273 static int ironlake_do_reset(struct drm_i915_private *i915,
274                              intel_engine_mask_t engine_mask,
275                              unsigned int retry)
276 {
277         struct intel_uncore *uncore = &i915->uncore;
278         int ret;
279
280         intel_uncore_write_fw(uncore, ILK_GDSR,
281                               ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
282         ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
283                                            ILK_GRDOM_RESET_ENABLE, 0,
284                                            5000, 0,
285                                            NULL);
286         if (ret) {
287                 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
288                 goto out;
289         }
290
291         intel_uncore_write_fw(uncore, ILK_GDSR,
292                               ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
293         ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
294                                            ILK_GRDOM_RESET_ENABLE, 0,
295                                            5000, 0,
296                                            NULL);
297         if (ret) {
298                 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
299                 goto out;
300         }
301
302 out:
303         intel_uncore_write_fw(uncore, ILK_GDSR, 0);
304         intel_uncore_posting_read_fw(uncore, ILK_GDSR);
305         return ret;
306 }
307
308 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
309 static int gen6_hw_domain_reset(struct drm_i915_private *i915,
310                                 u32 hw_domain_mask)
311 {
312         struct intel_uncore *uncore = &i915->uncore;
313         int err;
314
315         /*
316          * GEN6_GDRST is not in the gt power well, no need to check
317          * for fifo space for the write or forcewake the chip for
318          * the read
319          */
320         intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
321
322         /* Wait for the device to ack the reset requests */
323         err = __intel_wait_for_register_fw(uncore,
324                                            GEN6_GDRST, hw_domain_mask, 0,
325                                            500, 0,
326                                            NULL);
327         if (err)
328                 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
329                                  hw_domain_mask);
330
331         return err;
332 }
333
334 static int gen6_reset_engines(struct drm_i915_private *i915,
335                               intel_engine_mask_t engine_mask,
336                               unsigned int retry)
337 {
338         struct intel_engine_cs *engine;
339         const u32 hw_engine_mask[] = {
340                 [RCS0]  = GEN6_GRDOM_RENDER,
341                 [BCS0]  = GEN6_GRDOM_BLT,
342                 [VCS0]  = GEN6_GRDOM_MEDIA,
343                 [VCS1]  = GEN8_GRDOM_MEDIA2,
344                 [VECS0] = GEN6_GRDOM_VECS,
345         };
346         u32 hw_mask;
347
348         if (engine_mask == ALL_ENGINES) {
349                 hw_mask = GEN6_GRDOM_FULL;
350         } else {
351                 intel_engine_mask_t tmp;
352
353                 hw_mask = 0;
354                 for_each_engine_masked(engine, i915, engine_mask, tmp) {
355                         GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
356                         hw_mask |= hw_engine_mask[engine->id];
357                 }
358         }
359
360         return gen6_hw_domain_reset(i915, hw_mask);
361 }
362
363 static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
364 {
365         struct intel_uncore *uncore = engine->uncore;
366         u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
367         i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
368         u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
369         i915_reg_t sfc_usage;
370         u32 sfc_usage_bit;
371         u32 sfc_reset_bit;
372
373         switch (engine->class) {
374         case VIDEO_DECODE_CLASS:
375                 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
376                         return 0;
377
378                 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
379                 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
380
381                 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
382                 sfc_forced_lock_ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;
383
384                 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
385                 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
386                 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
387                 break;
388
389         case VIDEO_ENHANCEMENT_CLASS:
390                 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
391                 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
392
393                 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
394                 sfc_forced_lock_ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;
395
396                 sfc_usage = GEN11_VECS_SFC_USAGE(engine);
397                 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
398                 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
399                 break;
400
401         default:
402                 return 0;
403         }
404
405         /*
406          * Tell the engine that a software reset is going to happen. The engine
407          * will then try to force lock the SFC (if currently locked, it will
408          * remain so until we tell the engine it is safe to unlock; if currently
409          * unlocked, it will ignore this and all new lock requests). If SFC
410          * ends up being locked to the engine we want to reset, we have to reset
411          * it as well (we will unlock it once the reset sequence is completed).
412          */
413         rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
414
415         if (__intel_wait_for_register_fw(uncore,
416                                          sfc_forced_lock_ack,
417                                          sfc_forced_lock_ack_bit,
418                                          sfc_forced_lock_ack_bit,
419                                          1000, 0, NULL)) {
420                 DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
421                 return 0;
422         }
423
424         if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
425                 return sfc_reset_bit;
426
427         return 0;
428 }
429
430 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
431 {
432         struct intel_uncore *uncore = engine->uncore;
433         u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
434         i915_reg_t sfc_forced_lock;
435         u32 sfc_forced_lock_bit;
436
437         switch (engine->class) {
438         case VIDEO_DECODE_CLASS:
439                 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
440                         return;
441
442                 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
443                 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
444                 break;
445
446         case VIDEO_ENHANCEMENT_CLASS:
447                 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
448                 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
449                 break;
450
451         default:
452                 return;
453         }
454
455         rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
456 }
457
458 static int gen11_reset_engines(struct drm_i915_private *i915,
459                                intel_engine_mask_t engine_mask,
460                                unsigned int retry)
461 {
462         const u32 hw_engine_mask[] = {
463                 [RCS0]  = GEN11_GRDOM_RENDER,
464                 [BCS0]  = GEN11_GRDOM_BLT,
465                 [VCS0]  = GEN11_GRDOM_MEDIA,
466                 [VCS1]  = GEN11_GRDOM_MEDIA2,
467                 [VCS2]  = GEN11_GRDOM_MEDIA3,
468                 [VCS3]  = GEN11_GRDOM_MEDIA4,
469                 [VECS0] = GEN11_GRDOM_VECS,
470                 [VECS1] = GEN11_GRDOM_VECS2,
471         };
472         struct intel_engine_cs *engine;
473         intel_engine_mask_t tmp;
474         u32 hw_mask;
475         int ret;
476
477         if (engine_mask == ALL_ENGINES) {
478                 hw_mask = GEN11_GRDOM_FULL;
479         } else {
480                 hw_mask = 0;
481                 for_each_engine_masked(engine, i915, engine_mask, tmp) {
482                         GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
483                         hw_mask |= hw_engine_mask[engine->id];
484                         hw_mask |= gen11_lock_sfc(engine);
485                 }
486         }
487
488         ret = gen6_hw_domain_reset(i915, hw_mask);
489
490         if (engine_mask != ALL_ENGINES)
491                 for_each_engine_masked(engine, i915, engine_mask, tmp)
492                         gen11_unlock_sfc(engine);
493
494         return ret;
495 }
496
497 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
498 {
499         struct intel_uncore *uncore = engine->uncore;
500         const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
501         u32 request, mask, ack;
502         int ret;
503
504         ack = intel_uncore_read_fw(uncore, reg);
505         if (ack & RESET_CTL_CAT_ERROR) {
506                 /*
507                  * For catastrophic errors, ready-for-reset sequence
508                  * needs to be bypassed: HAS#396813
509                  */
510                 request = RESET_CTL_CAT_ERROR;
511                 mask = RESET_CTL_CAT_ERROR;
512
513                 /* Catastrophic errors need to be cleared by HW */
514                 ack = 0;
515         } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
516                 request = RESET_CTL_REQUEST_RESET;
517                 mask = RESET_CTL_READY_TO_RESET;
518                 ack = RESET_CTL_READY_TO_RESET;
519         } else {
520                 return 0;
521         }
522
523         intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
524         ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
525                                            700, 0, NULL);
526         if (ret)
527                 DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
528                           engine->name, request,
529                           intel_uncore_read_fw(uncore, reg));
530
531         return ret;
532 }
533
534 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
535 {
536         intel_uncore_write_fw(engine->uncore,
537                               RING_RESET_CTL(engine->mmio_base),
538                               _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
539 }
540
541 static int gen8_reset_engines(struct drm_i915_private *i915,
542                               intel_engine_mask_t engine_mask,
543                               unsigned int retry)
544 {
545         struct intel_engine_cs *engine;
546         const bool reset_non_ready = retry >= 1;
547         intel_engine_mask_t tmp;
548         int ret;
549
550         for_each_engine_masked(engine, i915, engine_mask, tmp) {
551                 ret = gen8_engine_reset_prepare(engine);
552                 if (ret && !reset_non_ready)
553                         goto skip_reset;
554
555                 /*
556                  * If this is not the first failed attempt to prepare,
557                  * we decide to proceed anyway.
558                  *
559                  * By doing so we risk context corruption and with
560                  * some gens (kbl), possible system hang if reset
561                  * happens during active bb execution.
562                  *
563                  * We rather take context corruption instead of
564                  * failed reset with a wedged driver/gpu. And
565                  * active bb execution case should be covered by
566                  * i915_stop_engines we have before the reset.
567                  */
568         }
569
570         if (INTEL_GEN(i915) >= 11)
571                 ret = gen11_reset_engines(i915, engine_mask, retry);
572         else
573                 ret = gen6_reset_engines(i915, engine_mask, retry);
574
575 skip_reset:
576         for_each_engine_masked(engine, i915, engine_mask, tmp)
577                 gen8_engine_reset_cancel(engine);
578
579         return ret;
580 }
581
582 typedef int (*reset_func)(struct drm_i915_private *,
583                           intel_engine_mask_t engine_mask,
584                           unsigned int retry);
585
586 static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
587 {
588         if (INTEL_GEN(i915) >= 8)
589                 return gen8_reset_engines;
590         else if (INTEL_GEN(i915) >= 6)
591                 return gen6_reset_engines;
592         else if (INTEL_GEN(i915) >= 5)
593                 return ironlake_do_reset;
594         else if (IS_G4X(i915))
595                 return g4x_do_reset;
596         else if (IS_G33(i915) || IS_PINEVIEW(i915))
597                 return g33_do_reset;
598         else if (INTEL_GEN(i915) >= 3)
599                 return i915_do_reset;
600         else
601                 return NULL;
602 }
603
604 int intel_gpu_reset(struct drm_i915_private *i915,
605                     intel_engine_mask_t engine_mask)
606 {
607         const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
608         reset_func reset;
609         int ret = -ETIMEDOUT;
610         int retry;
611
612         reset = intel_get_gpu_reset(i915);
613         if (!reset)
614                 return -ENODEV;
615
616         /*
617          * If the power well sleeps during the reset, the reset
618          * request may be dropped and never completes (causing -EIO).
619          */
620         intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
621         for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
622                 /*
623                  * We stop engines, otherwise we might get failed reset and a
624                  * dead gpu (on elk). Also as modern gpu as kbl can suffer
625                  * from system hang if batchbuffer is progressing when
626                  * the reset is issued, regardless of READY_TO_RESET ack.
627                  * Thus assume it is best to stop engines on all gens
628                  * where we have a gpu reset.
629                  *
630                  * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
631                  *
632                  * WaMediaResetMainRingCleanup:ctg,elk (presumably)
633                  *
634                  * FIXME: Wa for more modern gens needs to be validated
635                  */
636                 if (retry)
637                         i915_stop_engines(i915, engine_mask);
638
639                 GEM_TRACE("engine_mask=%x\n", engine_mask);
640                 preempt_disable();
641                 ret = reset(i915, engine_mask, retry);
642                 preempt_enable();
643         }
644         intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
645
646         return ret;
647 }
648
649 bool intel_has_gpu_reset(struct drm_i915_private *i915)
650 {
651         if (!i915_modparams.reset)
652                 return NULL;
653
654         return intel_get_gpu_reset(i915);
655 }
656
657 bool intel_has_reset_engine(struct drm_i915_private *i915)
658 {
659         return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
660 }
661
662 int intel_reset_guc(struct drm_i915_private *i915)
663 {
664         u32 guc_domain =
665                 INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
666         int ret;
667
668         GEM_BUG_ON(!HAS_GUC(i915));
669
670         intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
671         ret = gen6_hw_domain_reset(i915, guc_domain);
672         intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
673
674         return ret;
675 }
676
677 /*
678  * Ensure irq handler finishes, and not run again.
679  * Also return the active request so that we only search for it once.
680  */
681 static void reset_prepare_engine(struct intel_engine_cs *engine)
682 {
683         /*
684          * During the reset sequence, we must prevent the engine from
685          * entering RC6. As the context state is undefined until we restart
686          * the engine, if it does enter RC6 during the reset, the state
687          * written to the powercontext is undefined and so we may lose
688          * GPU state upon resume, i.e. fail to restart after a reset.
689          */
690         intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
691         engine->reset.prepare(engine);
692 }
693
694 static void revoke_mmaps(struct drm_i915_private *i915)
695 {
696         int i;
697
698         for (i = 0; i < i915->ggtt.num_fences; i++) {
699                 struct drm_vma_offset_node *node;
700                 struct i915_vma *vma;
701                 u64 vma_offset;
702
703                 vma = READ_ONCE(i915->ggtt.fence_regs[i].vma);
704                 if (!vma)
705                         continue;
706
707                 if (!i915_vma_has_userfault(vma))
708                         continue;
709
710                 GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]);
711                 node = &vma->obj->base.vma_node;
712                 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
713                 unmap_mapping_range(i915->drm.anon_inode->i_mapping,
714                                     drm_vma_node_offset_addr(node) + vma_offset,
715                                     vma->size,
716                                     1);
717         }
718 }
719
720 static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
721 {
722         struct intel_engine_cs *engine;
723         intel_engine_mask_t awake = 0;
724         enum intel_engine_id id;
725
726         for_each_engine(engine, i915, id) {
727                 if (intel_engine_pm_get_if_awake(engine))
728                         awake |= engine->mask;
729                 reset_prepare_engine(engine);
730         }
731
732         intel_uc_reset_prepare(i915);
733
734         return awake;
735 }
736
737 static void gt_revoke(struct drm_i915_private *i915)
738 {
739         revoke_mmaps(i915);
740 }
741
742 static int gt_reset(struct drm_i915_private *i915,
743                     intel_engine_mask_t stalled_mask)
744 {
745         struct intel_engine_cs *engine;
746         enum intel_engine_id id;
747         int err;
748
749         /*
750          * Everything depends on having the GTT running, so we need to start
751          * there.
752          */
753         err = i915_ggtt_enable_hw(i915);
754         if (err)
755                 return err;
756
757         for_each_engine(engine, i915, id)
758                 intel_engine_reset(engine, stalled_mask & engine->mask);
759
760         i915_gem_restore_fences(i915);
761
762         return err;
763 }
764
765 static void reset_finish_engine(struct intel_engine_cs *engine)
766 {
767         engine->reset.finish(engine);
768         intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
769
770         intel_engine_signal_breadcrumbs(engine);
771 }
772
773 static void reset_finish(struct drm_i915_private *i915,
774                          intel_engine_mask_t awake)
775 {
776         struct intel_engine_cs *engine;
777         enum intel_engine_id id;
778
779         for_each_engine(engine, i915, id) {
780                 reset_finish_engine(engine);
781                 if (awake & engine->mask)
782                         intel_engine_pm_put(engine);
783         }
784 }
785
786 static void nop_submit_request(struct i915_request *request)
787 {
788         struct intel_engine_cs *engine = request->engine;
789         unsigned long flags;
790
791         GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
792                   engine->name, request->fence.context, request->fence.seqno);
793         dma_fence_set_error(&request->fence, -EIO);
794
795         spin_lock_irqsave(&engine->active.lock, flags);
796         __i915_request_submit(request);
797         i915_request_mark_complete(request);
798         spin_unlock_irqrestore(&engine->active.lock, flags);
799
800         intel_engine_queue_breadcrumbs(engine);
801 }
802
803 static void __i915_gem_set_wedged(struct drm_i915_private *i915)
804 {
805         struct i915_gpu_error *error = &i915->gpu_error;
806         struct intel_engine_cs *engine;
807         intel_engine_mask_t awake;
808         enum intel_engine_id id;
809
810         if (test_bit(I915_WEDGED, &error->flags))
811                 return;
812
813         if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
814                 struct drm_printer p = drm_debug_printer(__func__);
815
816                 for_each_engine(engine, i915, id)
817                         intel_engine_dump(engine, &p, "%s\n", engine->name);
818         }
819
820         GEM_TRACE("start\n");
821
822         /*
823          * First, stop submission to hw, but do not yet complete requests by
824          * rolling the global seqno forward (since this would complete requests
825          * for which we haven't set the fence error to EIO yet).
826          */
827         awake = reset_prepare(i915);
828
829         /* Even if the GPU reset fails, it should still stop the engines */
830         if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
831                 intel_gpu_reset(i915, ALL_ENGINES);
832
833         for_each_engine(engine, i915, id) {
834                 engine->submit_request = nop_submit_request;
835                 engine->schedule = NULL;
836         }
837         i915->caps.scheduler = 0;
838
839         /*
840          * Make sure no request can slip through without getting completed by
841          * either this call here to intel_engine_write_global_seqno, or the one
842          * in nop_submit_request.
843          */
844         synchronize_rcu_expedited();
845         set_bit(I915_WEDGED, &error->flags);
846
847         /* Mark all executing requests as skipped */
848         for_each_engine(engine, i915, id)
849                 engine->cancel_requests(engine);
850
851         reset_finish(i915, awake);
852
853         GEM_TRACE("end\n");
854 }
855
856 void i915_gem_set_wedged(struct drm_i915_private *i915)
857 {
858         struct i915_gpu_error *error = &i915->gpu_error;
859         intel_wakeref_t wakeref;
860
861         mutex_lock(&error->wedge_mutex);
862         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
863                 __i915_gem_set_wedged(i915);
864         mutex_unlock(&error->wedge_mutex);
865 }
866
867 static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
868 {
869         struct i915_gpu_error *error = &i915->gpu_error;
870         struct i915_timeline *tl;
871
872         if (!test_bit(I915_WEDGED, &error->flags))
873                 return true;
874
875         if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
876                 return false;
877
878         GEM_TRACE("start\n");
879
880         /*
881          * Before unwedging, make sure that all pending operations
882          * are flushed and errored out - we may have requests waiting upon
883          * third party fences. We marked all inflight requests as EIO, and
884          * every execbuf since returned EIO, for consistency we want all
885          * the currently pending requests to also be marked as EIO, which
886          * is done inside our nop_submit_request - and so we must wait.
887          *
888          * No more can be submitted until we reset the wedged bit.
889          */
890         mutex_lock(&i915->gt.timelines.mutex);
891         list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
892                 struct i915_request *rq;
893
894                 rq = i915_active_request_get_unlocked(&tl->last_request);
895                 if (!rq)
896                         continue;
897
898                 /*
899                  * All internal dependencies (i915_requests) will have
900                  * been flushed by the set-wedge, but we may be stuck waiting
901                  * for external fences. These should all be capped to 10s
902                  * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
903                  * in the worst case.
904                  */
905                 dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
906                 i915_request_put(rq);
907         }
908         mutex_unlock(&i915->gt.timelines.mutex);
909
910         intel_gt_sanitize(i915, false);
911
912         /*
913          * Undo nop_submit_request. We prevent all new i915 requests from
914          * being queued (by disallowing execbuf whilst wedged) so having
915          * waited for all active requests above, we know the system is idle
916          * and do not have to worry about a thread being inside
917          * engine->submit_request() as we swap over. So unlike installing
918          * the nop_submit_request on reset, we can do this from normal
919          * context and do not require stop_machine().
920          */
921         intel_engines_reset_default_submission(i915);
922
923         GEM_TRACE("end\n");
924
925         smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
926         clear_bit(I915_WEDGED, &i915->gpu_error.flags);
927
928         return true;
929 }
930
931 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
932 {
933         struct i915_gpu_error *error = &i915->gpu_error;
934         bool result;
935
936         mutex_lock(&error->wedge_mutex);
937         result = __i915_gem_unset_wedged(i915);
938         mutex_unlock(&error->wedge_mutex);
939
940         return result;
941 }
942
943 static int do_reset(struct drm_i915_private *i915,
944                     intel_engine_mask_t stalled_mask)
945 {
946         int err, i;
947
948         gt_revoke(i915);
949
950         err = intel_gpu_reset(i915, ALL_ENGINES);
951         for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
952                 msleep(10 * (i + 1));
953                 err = intel_gpu_reset(i915, ALL_ENGINES);
954         }
955         if (err)
956                 return err;
957
958         return gt_reset(i915, stalled_mask);
959 }
960
961 static int resume(struct drm_i915_private *i915)
962 {
963         struct intel_engine_cs *engine;
964         enum intel_engine_id id;
965         int ret;
966
967         for_each_engine(engine, i915, id) {
968                 ret = engine->resume(engine);
969                 if (ret)
970                         return ret;
971         }
972
973         return 0;
974 }
975
976 /**
977  * i915_reset - reset chip after a hang
978  * @i915: #drm_i915_private to reset
979  * @stalled_mask: mask of the stalled engines with the guilty requests
980  * @reason: user error message for why we are resetting
981  *
982  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
983  * on failure.
984  *
985  * Procedure is fairly simple:
986  *   - reset the chip using the reset reg
987  *   - re-init context state
988  *   - re-init hardware status page
989  *   - re-init ring buffer
990  *   - re-init interrupt state
991  *   - re-init display
992  */
993 void i915_reset(struct drm_i915_private *i915,
994                 intel_engine_mask_t stalled_mask,
995                 const char *reason)
996 {
997         struct i915_gpu_error *error = &i915->gpu_error;
998         intel_engine_mask_t awake;
999         int ret;
1000
1001         GEM_TRACE("flags=%lx\n", error->flags);
1002
1003         might_sleep();
1004         GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
1005         mutex_lock(&error->wedge_mutex);
1006
1007         /* Clear any previous failed attempts at recovery. Time to try again. */
1008         if (!__i915_gem_unset_wedged(i915))
1009                 goto unlock;
1010
1011         if (reason)
1012                 dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
1013         error->reset_count++;
1014
1015         awake = reset_prepare(i915);
1016
1017         if (!intel_has_gpu_reset(i915)) {
1018                 if (i915_modparams.reset)
1019                         dev_err(i915->drm.dev, "GPU reset not supported\n");
1020                 else
1021                         DRM_DEBUG_DRIVER("GPU reset disabled\n");
1022                 goto error;
1023         }
1024
1025         if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1026                 intel_runtime_pm_disable_interrupts(i915);
1027
1028         if (do_reset(i915, stalled_mask)) {
1029                 dev_err(i915->drm.dev, "Failed to reset chip\n");
1030                 goto taint;
1031         }
1032
1033         if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1034                 intel_runtime_pm_enable_interrupts(i915);
1035
1036         intel_overlay_reset(i915);
1037
1038         /*
1039          * Next we need to restore the context, but we don't use those
1040          * yet either...
1041          *
1042          * Ring buffer needs to be re-initialized in the KMS case, or if X
1043          * was running at the time of the reset (i.e. we weren't VT
1044          * switched away).
1045          */
1046         ret = i915_gem_init_hw(i915);
1047         if (ret) {
1048                 DRM_ERROR("Failed to initialise HW following reset (%d)\n",
1049                           ret);
1050                 goto taint;
1051         }
1052
1053         ret = resume(i915);
1054         if (ret)
1055                 goto taint;
1056
1057         i915_queue_hangcheck(i915);
1058
1059 finish:
1060         reset_finish(i915, awake);
1061 unlock:
1062         mutex_unlock(&error->wedge_mutex);
1063         return;
1064
1065 taint:
1066         /*
1067          * History tells us that if we cannot reset the GPU now, we
1068          * never will. This then impacts everything that is run
1069          * subsequently. On failing the reset, we mark the driver
1070          * as wedged, preventing further execution on the GPU.
1071          * We also want to go one step further and add a taint to the
1072          * kernel so that any subsequent faults can be traced back to
1073          * this failure. This is important for CI, where if the
1074          * GPU/driver fails we would like to reboot and restart testing
1075          * rather than continue on into oblivion. For everyone else,
1076          * the system should still plod along, but they have been warned!
1077          */
1078         add_taint_for_CI(TAINT_WARN);
1079 error:
1080         __i915_gem_set_wedged(i915);
1081         goto finish;
1082 }
1083
1084 static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
1085                                         struct intel_engine_cs *engine)
1086 {
1087         return intel_gpu_reset(i915, engine->mask);
1088 }
1089
1090 /**
1091  * i915_reset_engine - reset GPU engine to recover from a hang
1092  * @engine: engine to reset
1093  * @msg: reason for GPU reset; or NULL for no dev_notice()
1094  *
1095  * Reset a specific GPU engine. Useful if a hang is detected.
1096  * Returns zero on successful reset or otherwise an error code.
1097  *
1098  * Procedure is:
1099  *  - identifies the request that caused the hang and it is dropped
1100  *  - reset engine (which will force the engine to idle)
1101  *  - re-init/configure engine
1102  */
1103 int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
1104 {
1105         struct i915_gpu_error *error = &engine->i915->gpu_error;
1106         int ret;
1107
1108         GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
1109         GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
1110
1111         if (!intel_engine_pm_get_if_awake(engine))
1112                 return 0;
1113
1114         reset_prepare_engine(engine);
1115
1116         if (msg)
1117                 dev_notice(engine->i915->drm.dev,
1118                            "Resetting %s for %s\n", engine->name, msg);
1119         error->reset_engine_count[engine->id]++;
1120
1121         if (!engine->i915->guc.execbuf_client)
1122                 ret = intel_gt_reset_engine(engine->i915, engine);
1123         else
1124                 ret = intel_guc_reset_engine(&engine->i915->guc, engine);
1125         if (ret) {
1126                 /* If we fail here, we expect to fallback to a global reset */
1127                 DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
1128                                  engine->i915->guc.execbuf_client ? "GuC " : "",
1129                                  engine->name, ret);
1130                 goto out;
1131         }
1132
1133         /*
1134          * The request that caused the hang is stuck on elsp, we know the
1135          * active request and can drop it, adjust head to skip the offending
1136          * request to resume executing remaining requests in the queue.
1137          */
1138         intel_engine_reset(engine, true);
1139
1140         /*
1141          * The engine and its registers (and workarounds in case of render)
1142          * have been reset to their default values. Follow the init_ring
1143          * process to program RING_MODE, HWSP and re-enable submission.
1144          */
1145         ret = engine->resume(engine);
1146
1147 out:
1148         intel_engine_cancel_stop_cs(engine);
1149         reset_finish_engine(engine);
1150         intel_engine_pm_put(engine);
1151         return ret;
1152 }
1153
1154 static void i915_reset_device(struct drm_i915_private *i915,
1155                               u32 engine_mask,
1156                               const char *reason)
1157 {
1158         struct i915_gpu_error *error = &i915->gpu_error;
1159         struct kobject *kobj = &i915->drm.primary->kdev->kobj;
1160         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1161         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1162         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1163         struct i915_wedge_me w;
1164
1165         kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1166
1167         DRM_DEBUG_DRIVER("resetting chip\n");
1168         kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1169
1170         /* Use a watchdog to ensure that our reset completes */
1171         i915_wedge_on_timeout(&w, i915, 5 * HZ) {
1172                 intel_prepare_reset(i915);
1173
1174                 /* Flush everyone using a resource about to be clobbered */
1175                 synchronize_srcu_expedited(&error->reset_backoff_srcu);
1176
1177                 i915_reset(i915, engine_mask, reason);
1178
1179                 intel_finish_reset(i915);
1180         }
1181
1182         if (!test_bit(I915_WEDGED, &error->flags))
1183                 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1184 }
1185
1186 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
1187 {
1188         intel_uncore_rmw(uncore, reg, 0, 0);
1189 }
1190
1191 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
1192 {
1193         GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
1194         GEN6_RING_FAULT_REG_POSTING_READ(engine);
1195 }
1196
1197 static void clear_error_registers(struct drm_i915_private *i915,
1198                                   intel_engine_mask_t engine_mask)
1199 {
1200         struct intel_uncore *uncore = &i915->uncore;
1201         u32 eir;
1202
1203         if (!IS_GEN(i915, 2))
1204                 clear_register(uncore, PGTBL_ER);
1205
1206         if (INTEL_GEN(i915) < 4)
1207                 clear_register(uncore, IPEIR(RENDER_RING_BASE));
1208         else
1209                 clear_register(uncore, IPEIR_I965);
1210
1211         clear_register(uncore, EIR);
1212         eir = intel_uncore_read(uncore, EIR);
1213         if (eir) {
1214                 /*
1215                  * some errors might have become stuck,
1216                  * mask them.
1217                  */
1218                 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
1219                 rmw_set(uncore, EMR, eir);
1220                 intel_uncore_write(uncore, GEN2_IIR,
1221                                    I915_MASTER_ERROR_INTERRUPT);
1222         }
1223
1224         if (INTEL_GEN(i915) >= 8) {
1225                 rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
1226                 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
1227         } else if (INTEL_GEN(i915) >= 6) {
1228                 struct intel_engine_cs *engine;
1229                 enum intel_engine_id id;
1230
1231                 for_each_engine_masked(engine, i915, engine_mask, id)
1232                         gen8_clear_engine_error_register(engine);
1233         }
1234 }
1235
1236 static void gen6_check_faults(struct drm_i915_private *dev_priv)
1237 {
1238         struct intel_engine_cs *engine;
1239         enum intel_engine_id id;
1240         u32 fault;
1241
1242         for_each_engine(engine, dev_priv, id) {
1243                 fault = GEN6_RING_FAULT_REG_READ(engine);
1244                 if (fault & RING_FAULT_VALID) {
1245                         DRM_DEBUG_DRIVER("Unexpected fault\n"
1246                                          "\tAddr: 0x%08lx\n"
1247                                          "\tAddress space: %s\n"
1248                                          "\tSource ID: %d\n"
1249                                          "\tType: %d\n",
1250                                          fault & PAGE_MASK,
1251                                          fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
1252                                          RING_FAULT_SRCID(fault),
1253                                          RING_FAULT_FAULT_TYPE(fault));
1254                 }
1255         }
1256 }
1257
1258 static void gen8_check_faults(struct drm_i915_private *dev_priv)
1259 {
1260         u32 fault = I915_READ(GEN8_RING_FAULT_REG);
1261
1262         if (fault & RING_FAULT_VALID) {
1263                 u32 fault_data0, fault_data1;
1264                 u64 fault_addr;
1265
1266                 fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1267                 fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1268                 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
1269                              ((u64)fault_data0 << 12);
1270
1271                 DRM_DEBUG_DRIVER("Unexpected fault\n"
1272                                  "\tAddr: 0x%08x_%08x\n"
1273                                  "\tAddress space: %s\n"
1274                                  "\tEngine ID: %d\n"
1275                                  "\tSource ID: %d\n"
1276                                  "\tType: %d\n",
1277                                  upper_32_bits(fault_addr),
1278                                  lower_32_bits(fault_addr),
1279                                  fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
1280                                  GEN8_RING_FAULT_ENGINE_ID(fault),
1281                                  RING_FAULT_SRCID(fault),
1282                                  RING_FAULT_FAULT_TYPE(fault));
1283         }
1284 }
1285
1286 void i915_check_and_clear_faults(struct drm_i915_private *i915)
1287 {
1288         /* From GEN8 onwards we only have one 'All Engine Fault Register' */
1289         if (INTEL_GEN(i915) >= 8)
1290                 gen8_check_faults(i915);
1291         else if (INTEL_GEN(i915) >= 6)
1292                 gen6_check_faults(i915);
1293         else
1294                 return;
1295
1296         clear_error_registers(i915, ALL_ENGINES);
1297 }
1298
1299 /**
1300  * i915_handle_error - handle a gpu error
1301  * @i915: i915 device private
1302  * @engine_mask: mask representing engines that are hung
1303  * @flags: control flags
1304  * @fmt: Error message format string
1305  *
1306  * Do some basic checking of register state at error time and
1307  * dump it to the syslog.  Also call i915_capture_error_state() to make
1308  * sure we get a record and make it available in debugfs.  Fire a uevent
1309  * so userspace knows something bad happened (should trigger collection
1310  * of a ring dump etc.).
1311  */
1312 void i915_handle_error(struct drm_i915_private *i915,
1313                        intel_engine_mask_t engine_mask,
1314                        unsigned long flags,
1315                        const char *fmt, ...)
1316 {
1317         struct i915_gpu_error *error = &i915->gpu_error;
1318         struct intel_engine_cs *engine;
1319         intel_wakeref_t wakeref;
1320         intel_engine_mask_t tmp;
1321         char error_msg[80];
1322         char *msg = NULL;
1323
1324         if (fmt) {
1325                 va_list args;
1326
1327                 va_start(args, fmt);
1328                 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1329                 va_end(args);
1330
1331                 msg = error_msg;
1332         }
1333
1334         /*
1335          * In most cases it's guaranteed that we get here with an RPM
1336          * reference held, for example because there is a pending GPU
1337          * request that won't finish until the reset is done. This
1338          * isn't the case at least when we get here by doing a
1339          * simulated reset via debugfs, so get an RPM reference.
1340          */
1341         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1342
1343         engine_mask &= INTEL_INFO(i915)->engine_mask;
1344
1345         if (flags & I915_ERROR_CAPTURE) {
1346                 i915_capture_error_state(i915, engine_mask, msg);
1347                 clear_error_registers(i915, engine_mask);
1348         }
1349
1350         /*
1351          * Try engine reset when available. We fall back to full reset if
1352          * single reset fails.
1353          */
1354         if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
1355                 for_each_engine_masked(engine, i915, engine_mask, tmp) {
1356                         BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1357                         if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1358                                              &error->flags))
1359                                 continue;
1360
1361                         if (i915_reset_engine(engine, msg) == 0)
1362                                 engine_mask &= ~engine->mask;
1363
1364                         clear_bit(I915_RESET_ENGINE + engine->id,
1365                                   &error->flags);
1366                         wake_up_bit(&error->flags,
1367                                     I915_RESET_ENGINE + engine->id);
1368                 }
1369         }
1370
1371         if (!engine_mask)
1372                 goto out;
1373
1374         /* Full reset needs the mutex, stop any other user trying to do so. */
1375         if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
1376                 wait_event(error->reset_queue,
1377                            !test_bit(I915_RESET_BACKOFF, &error->flags));
1378                 goto out; /* piggy-back on the other reset */
1379         }
1380
1381         /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1382         synchronize_rcu_expedited();
1383
1384         /* Prevent any other reset-engine attempt. */
1385         for_each_engine(engine, i915, tmp) {
1386                 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1387                                         &error->flags))
1388                         wait_on_bit(&error->flags,
1389                                     I915_RESET_ENGINE + engine->id,
1390                                     TASK_UNINTERRUPTIBLE);
1391         }
1392
1393         i915_reset_device(i915, engine_mask, msg);
1394
1395         for_each_engine(engine, i915, tmp) {
1396                 clear_bit(I915_RESET_ENGINE + engine->id,
1397                           &error->flags);
1398         }
1399
1400         clear_bit(I915_RESET_BACKOFF, &error->flags);
1401         wake_up_all(&error->reset_queue);
1402
1403 out:
1404         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1405 }
1406
1407 int i915_reset_trylock(struct drm_i915_private *i915)
1408 {
1409         struct i915_gpu_error *error = &i915->gpu_error;
1410         int srcu;
1411
1412         might_lock(&error->reset_backoff_srcu);
1413         might_sleep();
1414
1415         rcu_read_lock();
1416         while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
1417                 rcu_read_unlock();
1418
1419                 if (wait_event_interruptible(error->reset_queue,
1420                                              !test_bit(I915_RESET_BACKOFF,
1421                                                        &error->flags)))
1422                         return -EINTR;
1423
1424                 rcu_read_lock();
1425         }
1426         srcu = srcu_read_lock(&error->reset_backoff_srcu);
1427         rcu_read_unlock();
1428
1429         return srcu;
1430 }
1431
1432 void i915_reset_unlock(struct drm_i915_private *i915, int tag)
1433 __releases(&i915->gpu_error.reset_backoff_srcu)
1434 {
1435         struct i915_gpu_error *error = &i915->gpu_error;
1436
1437         srcu_read_unlock(&error->reset_backoff_srcu, tag);
1438 }
1439
1440 int i915_terminally_wedged(struct drm_i915_private *i915)
1441 {
1442         struct i915_gpu_error *error = &i915->gpu_error;
1443
1444         might_sleep();
1445
1446         if (!__i915_wedged(error))
1447                 return 0;
1448
1449         /* Reset still in progress? Maybe we will recover? */
1450         if (!test_bit(I915_RESET_BACKOFF, &error->flags))
1451                 return -EIO;
1452
1453         /* XXX intel_reset_finish() still takes struct_mutex!!! */
1454         if (mutex_is_locked(&i915->drm.struct_mutex))
1455                 return -EAGAIN;
1456
1457         if (wait_event_interruptible(error->reset_queue,
1458                                      !test_bit(I915_RESET_BACKOFF,
1459                                                &error->flags)))
1460                 return -EINTR;
1461
1462         return __i915_wedged(error) ? -EIO : 0;
1463 }
1464
1465 static void i915_wedge_me(struct work_struct *work)
1466 {
1467         struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
1468
1469         dev_err(w->i915->drm.dev,
1470                 "%s timed out, cancelling all in-flight rendering.\n",
1471                 w->name);
1472         i915_gem_set_wedged(w->i915);
1473 }
1474
1475 void __i915_init_wedge(struct i915_wedge_me *w,
1476                        struct drm_i915_private *i915,
1477                        long timeout,
1478                        const char *name)
1479 {
1480         w->i915 = i915;
1481         w->name = name;
1482
1483         INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me);
1484         schedule_delayed_work(&w->work, timeout);
1485 }
1486
1487 void __i915_fini_wedge(struct i915_wedge_me *w)
1488 {
1489         cancel_delayed_work_sync(&w->work);
1490         destroy_delayed_work_on_stack(&w->work);
1491         w->i915 = NULL;
1492 }
1493
1494 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1495 #include "selftest_reset.c"
1496 #endif