Linux-libre 5.4.49-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / i915 / intel_uncore.c
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <linux/pm_runtime.h>
25 #include <asm/iosf_mbi.h>
26
27 #include "i915_drv.h"
28 #include "i915_trace.h"
29 #include "i915_vgpu.h"
30 #include "intel_pm.h"
31
32 #define FORCEWAKE_ACK_TIMEOUT_MS 50
33 #define GT_FIFO_TIMEOUT_MS       10
34
35 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
36
37 void
38 intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
39 {
40         spin_lock_init(&mmio_debug->lock);
41         mmio_debug->unclaimed_mmio_check = 1;
42 }
43
44 static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
45 {
46         lockdep_assert_held(&mmio_debug->lock);
47
48         /* Save and disable mmio debugging for the user bypass */
49         if (!mmio_debug->suspend_count++) {
50                 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
51                 mmio_debug->unclaimed_mmio_check = 0;
52         }
53 }
54
55 static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
56 {
57         lockdep_assert_held(&mmio_debug->lock);
58
59         if (!--mmio_debug->suspend_count)
60                 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
61 }
62
63 static const char * const forcewake_domain_names[] = {
64         "render",
65         "blitter",
66         "media",
67         "vdbox0",
68         "vdbox1",
69         "vdbox2",
70         "vdbox3",
71         "vebox0",
72         "vebox1",
73 };
74
75 const char *
76 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
77 {
78         BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
79
80         if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
81                 return forcewake_domain_names[id];
82
83         WARN_ON(id);
84
85         return "unknown";
86 }
87
88 #define fw_ack(d) readl((d)->reg_ack)
89 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
90 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
91
92 static inline void
93 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
94 {
95         /*
96          * We don't really know if the powerwell for the forcewake domain we are
97          * trying to reset here does exist at this point (engines could be fused
98          * off in ICL+), so no waiting for acks
99          */
100         /* WaRsClearFWBitsAtReset:bdw,skl */
101         fw_clear(d, 0xffff);
102 }
103
104 static inline void
105 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
106 {
107         GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
108         d->uncore->fw_domains_timer |= d->mask;
109         d->wake_count++;
110         hrtimer_start_range_ns(&d->timer,
111                                NSEC_PER_MSEC,
112                                NSEC_PER_MSEC,
113                                HRTIMER_MODE_REL);
114 }
115
116 static inline int
117 __wait_for_ack(const struct intel_uncore_forcewake_domain *d,
118                const u32 ack,
119                const u32 value)
120 {
121         return wait_for_atomic((fw_ack(d) & ack) == value,
122                                FORCEWAKE_ACK_TIMEOUT_MS);
123 }
124
125 static inline int
126 wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
127                const u32 ack)
128 {
129         return __wait_for_ack(d, ack, 0);
130 }
131
132 static inline int
133 wait_ack_set(const struct intel_uncore_forcewake_domain *d,
134              const u32 ack)
135 {
136         return __wait_for_ack(d, ack, ack);
137 }
138
139 static inline void
140 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
141 {
142         if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
143                 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
144                           intel_uncore_forcewake_domain_to_str(d->id));
145                 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
146         }
147 }
148
149 enum ack_type {
150         ACK_CLEAR = 0,
151         ACK_SET
152 };
153
154 static int
155 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
156                                  const enum ack_type type)
157 {
158         const u32 ack_bit = FORCEWAKE_KERNEL;
159         const u32 value = type == ACK_SET ? ack_bit : 0;
160         unsigned int pass;
161         bool ack_detected;
162
163         /*
164          * There is a possibility of driver's wake request colliding
165          * with hardware's own wake requests and that can cause
166          * hardware to not deliver the driver's ack message.
167          *
168          * Use a fallback bit toggle to kick the gpu state machine
169          * in the hope that the original ack will be delivered along with
170          * the fallback ack.
171          *
172          * This workaround is described in HSDES #1604254524 and it's known as:
173          * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
174          * although the name is a bit misleading.
175          */
176
177         pass = 1;
178         do {
179                 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
180
181                 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
182                 /* Give gt some time to relax before the polling frenzy */
183                 udelay(10 * pass);
184                 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
185
186                 ack_detected = (fw_ack(d) & ack_bit) == value;
187
188                 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
189         } while (!ack_detected && pass++ < 10);
190
191         DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
192                          intel_uncore_forcewake_domain_to_str(d->id),
193                          type == ACK_SET ? "set" : "clear",
194                          fw_ack(d),
195                          pass);
196
197         return ack_detected ? 0 : -ETIMEDOUT;
198 }
199
200 static inline void
201 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
202 {
203         if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
204                 return;
205
206         if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
207                 fw_domain_wait_ack_clear(d);
208 }
209
210 static inline void
211 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
212 {
213         fw_set(d, FORCEWAKE_KERNEL);
214 }
215
216 static inline void
217 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
218 {
219         if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
220                 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
221                           intel_uncore_forcewake_domain_to_str(d->id));
222                 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
223         }
224 }
225
226 static inline void
227 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
228 {
229         if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
230                 return;
231
232         if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
233                 fw_domain_wait_ack_set(d);
234 }
235
236 static inline void
237 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
238 {
239         fw_clear(d, FORCEWAKE_KERNEL);
240 }
241
242 static void
243 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
244 {
245         struct intel_uncore_forcewake_domain *d;
246         unsigned int tmp;
247
248         GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
249
250         for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
251                 fw_domain_wait_ack_clear(d);
252                 fw_domain_get(d);
253         }
254
255         for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
256                 fw_domain_wait_ack_set(d);
257
258         uncore->fw_domains_active |= fw_domains;
259 }
260
261 static void
262 fw_domains_get_with_fallback(struct intel_uncore *uncore,
263                              enum forcewake_domains fw_domains)
264 {
265         struct intel_uncore_forcewake_domain *d;
266         unsigned int tmp;
267
268         GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
269
270         for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
271                 fw_domain_wait_ack_clear_fallback(d);
272                 fw_domain_get(d);
273         }
274
275         for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
276                 fw_domain_wait_ack_set_fallback(d);
277
278         uncore->fw_domains_active |= fw_domains;
279 }
280
281 static void
282 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
283 {
284         struct intel_uncore_forcewake_domain *d;
285         unsigned int tmp;
286
287         GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
288
289         for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
290                 fw_domain_put(d);
291
292         uncore->fw_domains_active &= ~fw_domains;
293 }
294
295 static void
296 fw_domains_reset(struct intel_uncore *uncore,
297                  enum forcewake_domains fw_domains)
298 {
299         struct intel_uncore_forcewake_domain *d;
300         unsigned int tmp;
301
302         if (!fw_domains)
303                 return;
304
305         GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
306
307         for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
308                 fw_domain_reset(d);
309 }
310
311 static inline u32 gt_thread_status(struct intel_uncore *uncore)
312 {
313         u32 val;
314
315         val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
316         val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
317
318         return val;
319 }
320
321 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
322 {
323         /*
324          * w/a for a sporadic read returning 0 by waiting for the GT
325          * thread to wake up.
326          */
327         WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
328                   "GT thread status wait timed out\n");
329 }
330
331 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
332                                               enum forcewake_domains fw_domains)
333 {
334         fw_domains_get(uncore, fw_domains);
335
336         /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
337         __gen6_gt_wait_for_thread_c0(uncore);
338 }
339
340 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
341 {
342         u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
343
344         return count & GT_FIFO_FREE_ENTRIES_MASK;
345 }
346
347 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
348 {
349         u32 n;
350
351         /* On VLV, FIFO will be shared by both SW and HW.
352          * So, we need to read the FREE_ENTRIES everytime */
353         if (IS_VALLEYVIEW(uncore->i915))
354                 n = fifo_free_entries(uncore);
355         else
356                 n = uncore->fifo_count;
357
358         if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
359                 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
360                                     GT_FIFO_NUM_RESERVED_ENTRIES,
361                                     GT_FIFO_TIMEOUT_MS)) {
362                         DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
363                         return;
364                 }
365         }
366
367         uncore->fifo_count = n - 1;
368 }
369
370 static enum hrtimer_restart
371 intel_uncore_fw_release_timer(struct hrtimer *timer)
372 {
373         struct intel_uncore_forcewake_domain *domain =
374                container_of(timer, struct intel_uncore_forcewake_domain, timer);
375         struct intel_uncore *uncore = domain->uncore;
376         unsigned long irqflags;
377
378         assert_rpm_device_not_suspended(uncore->rpm);
379
380         if (xchg(&domain->active, false))
381                 return HRTIMER_RESTART;
382
383         spin_lock_irqsave(&uncore->lock, irqflags);
384
385         uncore->fw_domains_timer &= ~domain->mask;
386
387         GEM_BUG_ON(!domain->wake_count);
388         if (--domain->wake_count == 0)
389                 uncore->funcs.force_wake_put(uncore, domain->mask);
390
391         spin_unlock_irqrestore(&uncore->lock, irqflags);
392
393         return HRTIMER_NORESTART;
394 }
395
396 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
397 static unsigned int
398 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
399 {
400         unsigned long irqflags;
401         struct intel_uncore_forcewake_domain *domain;
402         int retry_count = 100;
403         enum forcewake_domains fw, active_domains;
404
405         iosf_mbi_assert_punit_acquired();
406
407         /* Hold uncore.lock across reset to prevent any register access
408          * with forcewake not set correctly. Wait until all pending
409          * timers are run before holding.
410          */
411         while (1) {
412                 unsigned int tmp;
413
414                 active_domains = 0;
415
416                 for_each_fw_domain(domain, uncore, tmp) {
417                         smp_store_mb(domain->active, false);
418                         if (hrtimer_cancel(&domain->timer) == 0)
419                                 continue;
420
421                         intel_uncore_fw_release_timer(&domain->timer);
422                 }
423
424                 spin_lock_irqsave(&uncore->lock, irqflags);
425
426                 for_each_fw_domain(domain, uncore, tmp) {
427                         if (hrtimer_active(&domain->timer))
428                                 active_domains |= domain->mask;
429                 }
430
431                 if (active_domains == 0)
432                         break;
433
434                 if (--retry_count == 0) {
435                         DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
436                         break;
437                 }
438
439                 spin_unlock_irqrestore(&uncore->lock, irqflags);
440                 cond_resched();
441         }
442
443         WARN_ON(active_domains);
444
445         fw = uncore->fw_domains_active;
446         if (fw)
447                 uncore->funcs.force_wake_put(uncore, fw);
448
449         fw_domains_reset(uncore, uncore->fw_domains);
450         assert_forcewakes_inactive(uncore);
451
452         spin_unlock_irqrestore(&uncore->lock, irqflags);
453
454         return fw; /* track the lost user forcewake domains */
455 }
456
457 static bool
458 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
459 {
460         u32 dbg;
461
462         dbg = __raw_uncore_read32(uncore, FPGA_DBG);
463         if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
464                 return false;
465
466         __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
467
468         return true;
469 }
470
471 static bool
472 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
473 {
474         u32 cer;
475
476         cer = __raw_uncore_read32(uncore, CLAIM_ER);
477         if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
478                 return false;
479
480         __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
481
482         return true;
483 }
484
485 static bool
486 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
487 {
488         u32 fifodbg;
489
490         fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
491
492         if (unlikely(fifodbg)) {
493                 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
494                 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
495         }
496
497         return fifodbg;
498 }
499
500 static bool
501 check_for_unclaimed_mmio(struct intel_uncore *uncore)
502 {
503         bool ret = false;
504
505         lockdep_assert_held(&uncore->debug->lock);
506
507         if (uncore->debug->suspend_count)
508                 return false;
509
510         if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
511                 ret |= fpga_check_for_unclaimed_mmio(uncore);
512
513         if (intel_uncore_has_dbg_unclaimed(uncore))
514                 ret |= vlv_check_for_unclaimed_mmio(uncore);
515
516         if (intel_uncore_has_fifo(uncore))
517                 ret |= gen6_check_for_fifo_debug(uncore);
518
519         return ret;
520 }
521
522 static void forcewake_early_sanitize(struct intel_uncore *uncore,
523                                      unsigned int restore_forcewake)
524 {
525         GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
526
527         /* WaDisableShadowRegForCpd:chv */
528         if (IS_CHERRYVIEW(uncore->i915)) {
529                 __raw_uncore_write32(uncore, GTFIFOCTL,
530                                      __raw_uncore_read32(uncore, GTFIFOCTL) |
531                                      GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
532                                      GT_FIFO_CTL_RC6_POLICY_STALL);
533         }
534
535         iosf_mbi_punit_acquire();
536         intel_uncore_forcewake_reset(uncore);
537         if (restore_forcewake) {
538                 spin_lock_irq(&uncore->lock);
539                 uncore->funcs.force_wake_get(uncore, restore_forcewake);
540
541                 if (intel_uncore_has_fifo(uncore))
542                         uncore->fifo_count = fifo_free_entries(uncore);
543                 spin_unlock_irq(&uncore->lock);
544         }
545         iosf_mbi_punit_release();
546 }
547
548 void intel_uncore_suspend(struct intel_uncore *uncore)
549 {
550         if (!intel_uncore_has_forcewake(uncore))
551                 return;
552
553         iosf_mbi_punit_acquire();
554         iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
555                 &uncore->pmic_bus_access_nb);
556         uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
557         iosf_mbi_punit_release();
558 }
559
560 void intel_uncore_resume_early(struct intel_uncore *uncore)
561 {
562         unsigned int restore_forcewake;
563
564         if (intel_uncore_unclaimed_mmio(uncore))
565                 DRM_DEBUG("unclaimed mmio detected on resume, clearing\n");
566
567         if (!intel_uncore_has_forcewake(uncore))
568                 return;
569
570         restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
571         forcewake_early_sanitize(uncore, restore_forcewake);
572
573         iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
574 }
575
576 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
577 {
578         if (!intel_uncore_has_forcewake(uncore))
579                 return;
580
581         iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
582 }
583
584 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
585                                          enum forcewake_domains fw_domains)
586 {
587         struct intel_uncore_forcewake_domain *domain;
588         unsigned int tmp;
589
590         fw_domains &= uncore->fw_domains;
591
592         for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
593                 if (domain->wake_count++) {
594                         fw_domains &= ~domain->mask;
595                         domain->active = true;
596                 }
597         }
598
599         if (fw_domains)
600                 uncore->funcs.force_wake_get(uncore, fw_domains);
601 }
602
603 /**
604  * intel_uncore_forcewake_get - grab forcewake domain references
605  * @uncore: the intel_uncore structure
606  * @fw_domains: forcewake domains to get reference on
607  *
608  * This function can be used get GT's forcewake domain references.
609  * Normal register access will handle the forcewake domains automatically.
610  * However if some sequence requires the GT to not power down a particular
611  * forcewake domains this function should be called at the beginning of the
612  * sequence. And subsequently the reference should be dropped by symmetric
613  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
614  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
615  */
616 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
617                                 enum forcewake_domains fw_domains)
618 {
619         unsigned long irqflags;
620
621         if (!uncore->funcs.force_wake_get)
622                 return;
623
624         assert_rpm_wakelock_held(uncore->rpm);
625
626         spin_lock_irqsave(&uncore->lock, irqflags);
627         __intel_uncore_forcewake_get(uncore, fw_domains);
628         spin_unlock_irqrestore(&uncore->lock, irqflags);
629 }
630
631 /**
632  * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
633  * @uncore: the intel_uncore structure
634  *
635  * This function is a wrapper around intel_uncore_forcewake_get() to acquire
636  * the GT powerwell and in the process disable our debugging for the
637  * duration of userspace's bypass.
638  */
639 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
640 {
641         spin_lock_irq(&uncore->lock);
642         if (!uncore->user_forcewake_count++) {
643                 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
644                 spin_lock(&uncore->debug->lock);
645                 mmio_debug_suspend(uncore->debug);
646                 spin_unlock(&uncore->debug->lock);
647         }
648         spin_unlock_irq(&uncore->lock);
649 }
650
651 /**
652  * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
653  * @uncore: the intel_uncore structure
654  *
655  * This function complements intel_uncore_forcewake_user_get() and releases
656  * the GT powerwell taken on behalf of the userspace bypass.
657  */
658 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
659 {
660         spin_lock_irq(&uncore->lock);
661         if (!--uncore->user_forcewake_count) {
662                 spin_lock(&uncore->debug->lock);
663                 mmio_debug_resume(uncore->debug);
664
665                 if (check_for_unclaimed_mmio(uncore))
666                         dev_info(uncore->i915->drm.dev,
667                                  "Invalid mmio detected during user access\n");
668                 spin_unlock(&uncore->debug->lock);
669
670                 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
671         }
672         spin_unlock_irq(&uncore->lock);
673 }
674
675 /**
676  * intel_uncore_forcewake_get__locked - grab forcewake domain references
677  * @uncore: the intel_uncore structure
678  * @fw_domains: forcewake domains to get reference on
679  *
680  * See intel_uncore_forcewake_get(). This variant places the onus
681  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
682  */
683 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
684                                         enum forcewake_domains fw_domains)
685 {
686         lockdep_assert_held(&uncore->lock);
687
688         if (!uncore->funcs.force_wake_get)
689                 return;
690
691         __intel_uncore_forcewake_get(uncore, fw_domains);
692 }
693
694 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
695                                          enum forcewake_domains fw_domains)
696 {
697         struct intel_uncore_forcewake_domain *domain;
698         unsigned int tmp;
699
700         fw_domains &= uncore->fw_domains;
701
702         for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
703                 GEM_BUG_ON(!domain->wake_count);
704
705                 if (--domain->wake_count) {
706                         domain->active = true;
707                         continue;
708                 }
709
710                 fw_domain_arm_timer(domain);
711         }
712 }
713
714 /**
715  * intel_uncore_forcewake_put - release a forcewake domain reference
716  * @uncore: the intel_uncore structure
717  * @fw_domains: forcewake domains to put references
718  *
719  * This function drops the device-level forcewakes for specified
720  * domains obtained by intel_uncore_forcewake_get().
721  */
722 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
723                                 enum forcewake_domains fw_domains)
724 {
725         unsigned long irqflags;
726
727         if (!uncore->funcs.force_wake_put)
728                 return;
729
730         spin_lock_irqsave(&uncore->lock, irqflags);
731         __intel_uncore_forcewake_put(uncore, fw_domains);
732         spin_unlock_irqrestore(&uncore->lock, irqflags);
733 }
734
735 /**
736  * intel_uncore_forcewake_put__locked - grab forcewake domain references
737  * @uncore: the intel_uncore structure
738  * @fw_domains: forcewake domains to get reference on
739  *
740  * See intel_uncore_forcewake_put(). This variant places the onus
741  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
742  */
743 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
744                                         enum forcewake_domains fw_domains)
745 {
746         lockdep_assert_held(&uncore->lock);
747
748         if (!uncore->funcs.force_wake_put)
749                 return;
750
751         __intel_uncore_forcewake_put(uncore, fw_domains);
752 }
753
754 void assert_forcewakes_inactive(struct intel_uncore *uncore)
755 {
756         if (!uncore->funcs.force_wake_get)
757                 return;
758
759         WARN(uncore->fw_domains_active,
760              "Expected all fw_domains to be inactive, but %08x are still on\n",
761              uncore->fw_domains_active);
762 }
763
764 void assert_forcewakes_active(struct intel_uncore *uncore,
765                               enum forcewake_domains fw_domains)
766 {
767         struct intel_uncore_forcewake_domain *domain;
768         unsigned int tmp;
769
770         if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
771                 return;
772
773         if (!uncore->funcs.force_wake_get)
774                 return;
775
776         spin_lock_irq(&uncore->lock);
777
778         assert_rpm_wakelock_held(uncore->rpm);
779
780         fw_domains &= uncore->fw_domains;
781         WARN(fw_domains & ~uncore->fw_domains_active,
782              "Expected %08x fw_domains to be active, but %08x are off\n",
783              fw_domains, fw_domains & ~uncore->fw_domains_active);
784
785         /*
786          * Check that the caller has an explicit wakeref and we don't mistake
787          * it for the auto wakeref.
788          */
789         for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
790                 unsigned int actual = READ_ONCE(domain->wake_count);
791                 unsigned int expect = 1;
792
793                 if (uncore->fw_domains_timer & domain->mask)
794                         expect++; /* pending automatic release */
795
796                 if (WARN(actual < expect,
797                          "Expected domain %d to be held awake by caller, count=%d\n",
798                          domain->id, actual))
799                         break;
800         }
801
802         spin_unlock_irq(&uncore->lock);
803 }
804
805 /* We give fast paths for the really cool registers */
806 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
807
808 #define GEN11_NEEDS_FORCE_WAKE(reg) \
809         ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
810
811 #define __gen6_reg_read_fw_domains(uncore, offset) \
812 ({ \
813         enum forcewake_domains __fwd; \
814         if (NEEDS_FORCE_WAKE(offset)) \
815                 __fwd = FORCEWAKE_RENDER; \
816         else \
817                 __fwd = 0; \
818         __fwd; \
819 })
820
821 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
822 {
823         if (offset < entry->start)
824                 return -1;
825         else if (offset > entry->end)
826                 return 1;
827         else
828                 return 0;
829 }
830
831 /* Copied and "macroized" from lib/bsearch.c */
832 #define BSEARCH(key, base, num, cmp) ({                                 \
833         unsigned int start__ = 0, end__ = (num);                        \
834         typeof(base) result__ = NULL;                                   \
835         while (start__ < end__) {                                       \
836                 unsigned int mid__ = start__ + (end__ - start__) / 2;   \
837                 int ret__ = (cmp)((key), (base) + mid__);               \
838                 if (ret__ < 0) {                                        \
839                         end__ = mid__;                                  \
840                 } else if (ret__ > 0) {                                 \
841                         start__ = mid__ + 1;                            \
842                 } else {                                                \
843                         result__ = (base) + mid__;                      \
844                         break;                                          \
845                 }                                                       \
846         }                                                               \
847         result__;                                                       \
848 })
849
850 static enum forcewake_domains
851 find_fw_domain(struct intel_uncore *uncore, u32 offset)
852 {
853         const struct intel_forcewake_range *entry;
854
855         entry = BSEARCH(offset,
856                         uncore->fw_domains_table,
857                         uncore->fw_domains_table_entries,
858                         fw_range_cmp);
859
860         if (!entry)
861                 return 0;
862
863         /*
864          * The list of FW domains depends on the SKU in gen11+ so we
865          * can't determine it statically. We use FORCEWAKE_ALL and
866          * translate it here to the list of available domains.
867          */
868         if (entry->domains == FORCEWAKE_ALL)
869                 return uncore->fw_domains;
870
871         WARN(entry->domains & ~uncore->fw_domains,
872              "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
873              entry->domains & ~uncore->fw_domains, offset);
874
875         return entry->domains;
876 }
877
878 #define GEN_FW_RANGE(s, e, d) \
879         { .start = (s), .end = (e), .domains = (d) }
880
881 #define HAS_FWTABLE(dev_priv) \
882         (INTEL_GEN(dev_priv) >= 9 || \
883          IS_CHERRYVIEW(dev_priv) || \
884          IS_VALLEYVIEW(dev_priv))
885
886 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
887 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
888         GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
889         GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
890         GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
891         GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
892         GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
893         GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
894         GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
895 };
896
897 #define __fwtable_reg_read_fw_domains(uncore, offset) \
898 ({ \
899         enum forcewake_domains __fwd = 0; \
900         if (NEEDS_FORCE_WAKE((offset))) \
901                 __fwd = find_fw_domain(uncore, offset); \
902         __fwd; \
903 })
904
905 #define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
906 ({ \
907         enum forcewake_domains __fwd = 0; \
908         if (GEN11_NEEDS_FORCE_WAKE((offset))) \
909                 __fwd = find_fw_domain(uncore, offset); \
910         __fwd; \
911 })
912
913 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
914 static const i915_reg_t gen8_shadowed_regs[] = {
915         RING_TAIL(RENDER_RING_BASE),    /* 0x2000 (base) */
916         GEN6_RPNSWREQ,                  /* 0xA008 */
917         GEN6_RC_VIDEO_FREQ,             /* 0xA00C */
918         RING_TAIL(GEN6_BSD_RING_BASE),  /* 0x12000 (base) */
919         RING_TAIL(VEBOX_RING_BASE),     /* 0x1a000 (base) */
920         RING_TAIL(BLT_RING_BASE),       /* 0x22000 (base) */
921         /* TODO: Other registers are not yet used */
922 };
923
924 static const i915_reg_t gen11_shadowed_regs[] = {
925         RING_TAIL(RENDER_RING_BASE),            /* 0x2000 (base) */
926         GEN6_RPNSWREQ,                          /* 0xA008 */
927         GEN6_RC_VIDEO_FREQ,                     /* 0xA00C */
928         RING_TAIL(BLT_RING_BASE),               /* 0x22000 (base) */
929         RING_TAIL(GEN11_BSD_RING_BASE),         /* 0x1C0000 (base) */
930         RING_TAIL(GEN11_BSD2_RING_BASE),        /* 0x1C4000 (base) */
931         RING_TAIL(GEN11_VEBOX_RING_BASE),       /* 0x1C8000 (base) */
932         RING_TAIL(GEN11_BSD3_RING_BASE),        /* 0x1D0000 (base) */
933         RING_TAIL(GEN11_BSD4_RING_BASE),        /* 0x1D4000 (base) */
934         RING_TAIL(GEN11_VEBOX2_RING_BASE),      /* 0x1D8000 (base) */
935         /* TODO: Other registers are not yet used */
936 };
937
938 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
939 {
940         u32 offset = i915_mmio_reg_offset(*reg);
941
942         if (key < offset)
943                 return -1;
944         else if (key > offset)
945                 return 1;
946         else
947                 return 0;
948 }
949
950 #define __is_genX_shadowed(x) \
951 static bool is_gen##x##_shadowed(u32 offset) \
952 { \
953         const i915_reg_t *regs = gen##x##_shadowed_regs; \
954         return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \
955                        mmio_reg_cmp); \
956 }
957
958 __is_genX_shadowed(8)
959 __is_genX_shadowed(11)
960
961 static enum forcewake_domains
962 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
963 {
964         return FORCEWAKE_RENDER;
965 }
966
967 #define __gen8_reg_write_fw_domains(uncore, offset) \
968 ({ \
969         enum forcewake_domains __fwd; \
970         if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
971                 __fwd = FORCEWAKE_RENDER; \
972         else \
973                 __fwd = 0; \
974         __fwd; \
975 })
976
977 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
978 static const struct intel_forcewake_range __chv_fw_ranges[] = {
979         GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
980         GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
981         GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
982         GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
983         GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
984         GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
985         GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
986         GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
987         GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
988         GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
989         GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
990         GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
991         GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
992         GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
993         GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
994         GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
995 };
996
997 #define __fwtable_reg_write_fw_domains(uncore, offset) \
998 ({ \
999         enum forcewake_domains __fwd = 0; \
1000         if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
1001                 __fwd = find_fw_domain(uncore, offset); \
1002         __fwd; \
1003 })
1004
1005 #define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
1006 ({ \
1007         enum forcewake_domains __fwd = 0; \
1008         if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
1009                 __fwd = find_fw_domain(uncore, offset); \
1010         __fwd; \
1011 })
1012
1013 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1014 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1015         GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
1016         GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1017         GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1018         GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1019         GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1020         GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1021         GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1022         GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
1023         GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1024         GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1025         GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1026         GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1027         GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
1028         GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1029         GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
1030         GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1031         GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1032         GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1033         GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1034         GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1035         GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
1036         GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1037         GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
1038         GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1039         GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
1040         GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1041         GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
1042         GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1043         GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
1044         GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1045         GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
1046         GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1047 };
1048
1049 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1050 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1051         GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
1052         GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1053         GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1054         GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1055         GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1056         GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1057         GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1058         GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
1059         GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1060         GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1061         GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1062         GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
1063         GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1064         GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1065         GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
1066         GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1067         GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1068         GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
1069         GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1070         GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
1071         GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1072         GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
1073         GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1074         GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1075         GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
1076         GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
1077         GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
1078         GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1079         GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
1080         GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
1081 };
1082
1083 static void
1084 ilk_dummy_write(struct intel_uncore *uncore)
1085 {
1086         /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1087          * the chip from rc6 before touching it for real. MI_MODE is masked,
1088          * hence harmless to write 0 into. */
1089         __raw_uncore_write32(uncore, MI_MODE, 0);
1090 }
1091
1092 static void
1093 __unclaimed_reg_debug(struct intel_uncore *uncore,
1094                       const i915_reg_t reg,
1095                       const bool read,
1096                       const bool before)
1097 {
1098         if (WARN(check_for_unclaimed_mmio(uncore) && !before,
1099                  "Unclaimed %s register 0x%x\n",
1100                  read ? "read from" : "write to",
1101                  i915_mmio_reg_offset(reg)))
1102                 /* Only report the first N failures */
1103                 i915_modparams.mmio_debug--;
1104 }
1105
1106 static inline void
1107 unclaimed_reg_debug(struct intel_uncore *uncore,
1108                     const i915_reg_t reg,
1109                     const bool read,
1110                     const bool before)
1111 {
1112         if (likely(!i915_modparams.mmio_debug))
1113                 return;
1114
1115         /* interrupts are disabled and re-enabled around uncore->lock usage */
1116         lockdep_assert_held(&uncore->lock);
1117
1118         if (before)
1119                 spin_lock(&uncore->debug->lock);
1120
1121         __unclaimed_reg_debug(uncore, reg, read, before);
1122
1123         if (!before)
1124                 spin_unlock(&uncore->debug->lock);
1125 }
1126
1127 #define GEN2_READ_HEADER(x) \
1128         u##x val = 0; \
1129         assert_rpm_wakelock_held(uncore->rpm);
1130
1131 #define GEN2_READ_FOOTER \
1132         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1133         return val
1134
1135 #define __gen2_read(x) \
1136 static u##x \
1137 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1138         GEN2_READ_HEADER(x); \
1139         val = __raw_uncore_read##x(uncore, reg); \
1140         GEN2_READ_FOOTER; \
1141 }
1142
1143 #define __gen5_read(x) \
1144 static u##x \
1145 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1146         GEN2_READ_HEADER(x); \
1147         ilk_dummy_write(uncore); \
1148         val = __raw_uncore_read##x(uncore, reg); \
1149         GEN2_READ_FOOTER; \
1150 }
1151
1152 __gen5_read(8)
1153 __gen5_read(16)
1154 __gen5_read(32)
1155 __gen5_read(64)
1156 __gen2_read(8)
1157 __gen2_read(16)
1158 __gen2_read(32)
1159 __gen2_read(64)
1160
1161 #undef __gen5_read
1162 #undef __gen2_read
1163
1164 #undef GEN2_READ_FOOTER
1165 #undef GEN2_READ_HEADER
1166
1167 #define GEN6_READ_HEADER(x) \
1168         u32 offset = i915_mmio_reg_offset(reg); \
1169         unsigned long irqflags; \
1170         u##x val = 0; \
1171         assert_rpm_wakelock_held(uncore->rpm); \
1172         spin_lock_irqsave(&uncore->lock, irqflags); \
1173         unclaimed_reg_debug(uncore, reg, true, true)
1174
1175 #define GEN6_READ_FOOTER \
1176         unclaimed_reg_debug(uncore, reg, true, false); \
1177         spin_unlock_irqrestore(&uncore->lock, irqflags); \
1178         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1179         return val
1180
1181 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1182                                         enum forcewake_domains fw_domains)
1183 {
1184         struct intel_uncore_forcewake_domain *domain;
1185         unsigned int tmp;
1186
1187         GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1188
1189         for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1190                 fw_domain_arm_timer(domain);
1191
1192         uncore->funcs.force_wake_get(uncore, fw_domains);
1193 }
1194
1195 static inline void __force_wake_auto(struct intel_uncore *uncore,
1196                                      enum forcewake_domains fw_domains)
1197 {
1198         GEM_BUG_ON(!fw_domains);
1199
1200         /* Turn on all requested but inactive supported forcewake domains. */
1201         fw_domains &= uncore->fw_domains;
1202         fw_domains &= ~uncore->fw_domains_active;
1203
1204         if (fw_domains)
1205                 ___force_wake_auto(uncore, fw_domains);
1206 }
1207
1208 #define __gen_read(func, x) \
1209 static u##x \
1210 func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1211         enum forcewake_domains fw_engine; \
1212         GEN6_READ_HEADER(x); \
1213         fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
1214         if (fw_engine) \
1215                 __force_wake_auto(uncore, fw_engine); \
1216         val = __raw_uncore_read##x(uncore, reg); \
1217         GEN6_READ_FOOTER; \
1218 }
1219
1220 #define __gen_reg_read_funcs(func) \
1221 static enum forcewake_domains \
1222 func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1223         return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1224 } \
1225 \
1226 __gen_read(func, 8) \
1227 __gen_read(func, 16) \
1228 __gen_read(func, 32) \
1229 __gen_read(func, 64)
1230
1231 __gen_reg_read_funcs(gen11_fwtable);
1232 __gen_reg_read_funcs(fwtable);
1233 __gen_reg_read_funcs(gen6);
1234
1235 #undef __gen_reg_read_funcs
1236 #undef GEN6_READ_FOOTER
1237 #undef GEN6_READ_HEADER
1238
1239 #define GEN2_WRITE_HEADER \
1240         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1241         assert_rpm_wakelock_held(uncore->rpm); \
1242
1243 #define GEN2_WRITE_FOOTER
1244
1245 #define __gen2_write(x) \
1246 static void \
1247 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1248         GEN2_WRITE_HEADER; \
1249         __raw_uncore_write##x(uncore, reg, val); \
1250         GEN2_WRITE_FOOTER; \
1251 }
1252
1253 #define __gen5_write(x) \
1254 static void \
1255 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1256         GEN2_WRITE_HEADER; \
1257         ilk_dummy_write(uncore); \
1258         __raw_uncore_write##x(uncore, reg, val); \
1259         GEN2_WRITE_FOOTER; \
1260 }
1261
1262 __gen5_write(8)
1263 __gen5_write(16)
1264 __gen5_write(32)
1265 __gen2_write(8)
1266 __gen2_write(16)
1267 __gen2_write(32)
1268
1269 #undef __gen5_write
1270 #undef __gen2_write
1271
1272 #undef GEN2_WRITE_FOOTER
1273 #undef GEN2_WRITE_HEADER
1274
1275 #define GEN6_WRITE_HEADER \
1276         u32 offset = i915_mmio_reg_offset(reg); \
1277         unsigned long irqflags; \
1278         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1279         assert_rpm_wakelock_held(uncore->rpm); \
1280         spin_lock_irqsave(&uncore->lock, irqflags); \
1281         unclaimed_reg_debug(uncore, reg, false, true)
1282
1283 #define GEN6_WRITE_FOOTER \
1284         unclaimed_reg_debug(uncore, reg, false, false); \
1285         spin_unlock_irqrestore(&uncore->lock, irqflags)
1286
1287 #define __gen6_write(x) \
1288 static void \
1289 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1290         GEN6_WRITE_HEADER; \
1291         if (NEEDS_FORCE_WAKE(offset)) \
1292                 __gen6_gt_wait_for_fifo(uncore); \
1293         __raw_uncore_write##x(uncore, reg, val); \
1294         GEN6_WRITE_FOOTER; \
1295 }
1296 __gen6_write(8)
1297 __gen6_write(16)
1298 __gen6_write(32)
1299
1300 #define __gen_write(func, x) \
1301 static void \
1302 func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1303         enum forcewake_domains fw_engine; \
1304         GEN6_WRITE_HEADER; \
1305         fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
1306         if (fw_engine) \
1307                 __force_wake_auto(uncore, fw_engine); \
1308         __raw_uncore_write##x(uncore, reg, val); \
1309         GEN6_WRITE_FOOTER; \
1310 }
1311
1312 #define __gen_reg_write_funcs(func) \
1313 static enum forcewake_domains \
1314 func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1315         return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1316 } \
1317 \
1318 __gen_write(func, 8) \
1319 __gen_write(func, 16) \
1320 __gen_write(func, 32)
1321
1322 __gen_reg_write_funcs(gen11_fwtable);
1323 __gen_reg_write_funcs(fwtable);
1324 __gen_reg_write_funcs(gen8);
1325
1326 #undef __gen_reg_write_funcs
1327 #undef GEN6_WRITE_FOOTER
1328 #undef GEN6_WRITE_HEADER
1329
1330 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
1331 do { \
1332         (uncore)->funcs.mmio_writeb = x##_write8; \
1333         (uncore)->funcs.mmio_writew = x##_write16; \
1334         (uncore)->funcs.mmio_writel = x##_write32; \
1335 } while (0)
1336
1337 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
1338 do { \
1339         (uncore)->funcs.mmio_readb = x##_read8; \
1340         (uncore)->funcs.mmio_readw = x##_read16; \
1341         (uncore)->funcs.mmio_readl = x##_read32; \
1342         (uncore)->funcs.mmio_readq = x##_read64; \
1343 } while (0)
1344
1345 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1346 do { \
1347         ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
1348         (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1349 } while (0)
1350
1351 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1352 do { \
1353         ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
1354         (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1355 } while (0)
1356
1357 static int __fw_domain_init(struct intel_uncore *uncore,
1358                             enum forcewake_domain_id domain_id,
1359                             i915_reg_t reg_set,
1360                             i915_reg_t reg_ack)
1361 {
1362         struct intel_uncore_forcewake_domain *d;
1363
1364         GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1365         GEM_BUG_ON(uncore->fw_domain[domain_id]);
1366
1367         if (i915_inject_probe_failure(uncore->i915))
1368                 return -ENOMEM;
1369
1370         d = kzalloc(sizeof(*d), GFP_KERNEL);
1371         if (!d)
1372                 return -ENOMEM;
1373
1374         WARN_ON(!i915_mmio_reg_valid(reg_set));
1375         WARN_ON(!i915_mmio_reg_valid(reg_ack));
1376
1377         d->uncore = uncore;
1378         d->wake_count = 0;
1379         d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1380         d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
1381
1382         d->id = domain_id;
1383
1384         BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1385         BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1386         BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1387         BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1388         BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1389         BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1390         BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1391         BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1392         BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
1393
1394         d->mask = BIT(domain_id);
1395
1396         hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1397         d->timer.function = intel_uncore_fw_release_timer;
1398
1399         uncore->fw_domains |= BIT(domain_id);
1400
1401         fw_domain_reset(d);
1402
1403         uncore->fw_domain[domain_id] = d;
1404
1405         return 0;
1406 }
1407
1408 static void fw_domain_fini(struct intel_uncore *uncore,
1409                            enum forcewake_domain_id domain_id)
1410 {
1411         struct intel_uncore_forcewake_domain *d;
1412
1413         GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1414
1415         d = fetch_and_zero(&uncore->fw_domain[domain_id]);
1416         if (!d)
1417                 return;
1418
1419         uncore->fw_domains &= ~BIT(domain_id);
1420         WARN_ON(d->wake_count);
1421         WARN_ON(hrtimer_cancel(&d->timer));
1422         kfree(d);
1423 }
1424
1425 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
1426 {
1427         struct intel_uncore_forcewake_domain *d;
1428         int tmp;
1429
1430         for_each_fw_domain(d, uncore, tmp)
1431                 fw_domain_fini(uncore, d->id);
1432 }
1433
1434 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
1435 {
1436         struct drm_i915_private *i915 = uncore->i915;
1437         int ret = 0;
1438
1439         GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1440
1441 #define fw_domain_init(uncore__, id__, set__, ack__) \
1442         (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
1443
1444         if (INTEL_GEN(i915) >= 11) {
1445                 int i;
1446
1447                 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1448                 uncore->funcs.force_wake_put = fw_domains_put;
1449                 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1450                                FORCEWAKE_RENDER_GEN9,
1451                                FORCEWAKE_ACK_RENDER_GEN9);
1452                 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1453                                FORCEWAKE_BLITTER_GEN9,
1454                                FORCEWAKE_ACK_BLITTER_GEN9);
1455
1456                 for (i = 0; i < I915_MAX_VCS; i++) {
1457                         if (!HAS_ENGINE(i915, _VCS(i)))
1458                                 continue;
1459
1460                         fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
1461                                        FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1462                                        FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1463                 }
1464                 for (i = 0; i < I915_MAX_VECS; i++) {
1465                         if (!HAS_ENGINE(i915, _VECS(i)))
1466                                 continue;
1467
1468                         fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
1469                                        FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1470                                        FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1471                 }
1472         } else if (IS_GEN_RANGE(i915, 9, 10)) {
1473                 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1474                 uncore->funcs.force_wake_put = fw_domains_put;
1475                 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1476                                FORCEWAKE_RENDER_GEN9,
1477                                FORCEWAKE_ACK_RENDER_GEN9);
1478                 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1479                                FORCEWAKE_BLITTER_GEN9,
1480                                FORCEWAKE_ACK_BLITTER_GEN9);
1481                 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1482                                FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1483         } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1484                 uncore->funcs.force_wake_get = fw_domains_get;
1485                 uncore->funcs.force_wake_put = fw_domains_put;
1486                 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1487                                FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1488                 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1489                                FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1490         } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1491                 uncore->funcs.force_wake_get =
1492                         fw_domains_get_with_thread_status;
1493                 uncore->funcs.force_wake_put = fw_domains_put;
1494                 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1495                                FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1496         } else if (IS_IVYBRIDGE(i915)) {
1497                 u32 ecobus;
1498
1499                 /* IVB configs may use multi-threaded forcewake */
1500
1501                 /* A small trick here - if the bios hasn't configured
1502                  * MT forcewake, and if the device is in RC6, then
1503                  * force_wake_mt_get will not wake the device and the
1504                  * ECOBUS read will return zero. Which will be
1505                  * (correctly) interpreted by the test below as MT
1506                  * forcewake being disabled.
1507                  */
1508                 uncore->funcs.force_wake_get =
1509                         fw_domains_get_with_thread_status;
1510                 uncore->funcs.force_wake_put = fw_domains_put;
1511
1512                 /* We need to init first for ECOBUS access and then
1513                  * determine later if we want to reinit, in case of MT access is
1514                  * not working. In this stage we don't know which flavour this
1515                  * ivb is, so it is better to reset also the gen6 fw registers
1516                  * before the ecobus check.
1517                  */
1518
1519                 __raw_uncore_write32(uncore, FORCEWAKE, 0);
1520                 __raw_posting_read(uncore, ECOBUS);
1521
1522                 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1523                                        FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1524                 if (ret)
1525                         goto out;
1526
1527                 spin_lock_irq(&uncore->lock);
1528                 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
1529                 ecobus = __raw_uncore_read32(uncore, ECOBUS);
1530                 fw_domains_put(uncore, FORCEWAKE_RENDER);
1531                 spin_unlock_irq(&uncore->lock);
1532
1533                 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1534                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1535                         DRM_INFO("when using vblank-synced partial screen updates.\n");
1536                         fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
1537                         fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1538                                        FORCEWAKE, FORCEWAKE_ACK);
1539                 }
1540         } else if (IS_GEN(i915, 6)) {
1541                 uncore->funcs.force_wake_get =
1542                         fw_domains_get_with_thread_status;
1543                 uncore->funcs.force_wake_put = fw_domains_put;
1544                 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1545                                FORCEWAKE, FORCEWAKE_ACK);
1546         }
1547
1548 #undef fw_domain_init
1549
1550         /* All future platforms are expected to require complex power gating */
1551         WARN_ON(!ret && uncore->fw_domains == 0);
1552
1553 out:
1554         if (ret)
1555                 intel_uncore_fw_domains_fini(uncore);
1556
1557         return ret;
1558 }
1559
1560 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
1561 { \
1562         (uncore)->fw_domains_table = \
1563                         (struct intel_forcewake_range *)(d); \
1564         (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
1565 }
1566
1567 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1568                                          unsigned long action, void *data)
1569 {
1570         struct intel_uncore *uncore = container_of(nb,
1571                         struct intel_uncore, pmic_bus_access_nb);
1572
1573         switch (action) {
1574         case MBI_PMIC_BUS_ACCESS_BEGIN:
1575                 /*
1576                  * forcewake all now to make sure that we don't need to do a
1577                  * forcewake later which on systems where this notifier gets
1578                  * called requires the punit to access to the shared pmic i2c
1579                  * bus, which will be busy after this notification, leading to:
1580                  * "render: timed out waiting for forcewake ack request."
1581                  * errors.
1582                  *
1583                  * The notifier is unregistered during intel_runtime_suspend(),
1584                  * so it's ok to access the HW here without holding a RPM
1585                  * wake reference -> disable wakeref asserts for the time of
1586                  * the access.
1587                  */
1588                 disable_rpm_wakeref_asserts(uncore->rpm);
1589                 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1590                 enable_rpm_wakeref_asserts(uncore->rpm);
1591                 break;
1592         case MBI_PMIC_BUS_ACCESS_END:
1593                 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1594                 break;
1595         }
1596
1597         return NOTIFY_OK;
1598 }
1599
1600 static int uncore_mmio_setup(struct intel_uncore *uncore)
1601 {
1602         struct drm_i915_private *i915 = uncore->i915;
1603         struct pci_dev *pdev = i915->drm.pdev;
1604         int mmio_bar;
1605         int mmio_size;
1606
1607         mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
1608         /*
1609          * Before gen4, the registers and the GTT are behind different BARs.
1610          * However, from gen4 onwards, the registers and the GTT are shared
1611          * in the same BAR, so we want to restrict this ioremap from
1612          * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1613          * the register BAR remains the same size for all the earlier
1614          * generations up to Ironlake.
1615          */
1616         if (INTEL_GEN(i915) < 5)
1617                 mmio_size = 512 * 1024;
1618         else
1619                 mmio_size = 2 * 1024 * 1024;
1620         uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
1621         if (uncore->regs == NULL) {
1622                 DRM_ERROR("failed to map registers\n");
1623
1624                 return -EIO;
1625         }
1626
1627         return 0;
1628 }
1629
1630 static void uncore_mmio_cleanup(struct intel_uncore *uncore)
1631 {
1632         struct pci_dev *pdev = uncore->i915->drm.pdev;
1633
1634         pci_iounmap(pdev, uncore->regs);
1635 }
1636
1637 void intel_uncore_init_early(struct intel_uncore *uncore,
1638                              struct drm_i915_private *i915)
1639 {
1640         spin_lock_init(&uncore->lock);
1641         uncore->i915 = i915;
1642         uncore->rpm = &i915->runtime_pm;
1643         uncore->debug = &i915->mmio_debug;
1644 }
1645
1646 static void uncore_raw_init(struct intel_uncore *uncore)
1647 {
1648         GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
1649
1650         if (IS_GEN(uncore->i915, 5)) {
1651                 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
1652                 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
1653         } else {
1654                 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
1655                 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
1656         }
1657 }
1658
1659 static int uncore_forcewake_init(struct intel_uncore *uncore)
1660 {
1661         struct drm_i915_private *i915 = uncore->i915;
1662         int ret;
1663
1664         GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1665
1666         ret = intel_uncore_fw_domains_init(uncore);
1667         if (ret)
1668                 return ret;
1669         forcewake_early_sanitize(uncore, 0);
1670
1671         if (IS_GEN_RANGE(i915, 6, 7)) {
1672                 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
1673
1674                 if (IS_VALLEYVIEW(i915)) {
1675                         ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
1676                         ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1677                 } else {
1678                         ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1679                 }
1680         } else if (IS_GEN(i915, 8)) {
1681                 if (IS_CHERRYVIEW(i915)) {
1682                         ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
1683                         ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1684                         ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1685                 } else {
1686                         ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
1687                         ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1688                 }
1689         } else if (IS_GEN_RANGE(i915, 9, 10)) {
1690                 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
1691                 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1692                 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1693         } else {
1694                 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
1695                 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
1696                 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
1697         }
1698
1699         uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
1700         iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
1701
1702         return 0;
1703 }
1704
1705 int intel_uncore_init_mmio(struct intel_uncore *uncore)
1706 {
1707         struct drm_i915_private *i915 = uncore->i915;
1708         int ret;
1709
1710         ret = uncore_mmio_setup(uncore);
1711         if (ret)
1712                 return ret;
1713
1714         if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
1715                 uncore->flags |= UNCORE_HAS_FORCEWAKE;
1716
1717         if (!intel_uncore_has_forcewake(uncore)) {
1718                 uncore_raw_init(uncore);
1719         } else {
1720                 ret = uncore_forcewake_init(uncore);
1721                 if (ret)
1722                         goto out_mmio_cleanup;
1723         }
1724
1725         /* make sure fw funcs are set if and only if we have fw*/
1726         GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
1727         GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
1728         GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
1729         GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
1730
1731         if (HAS_FPGA_DBG_UNCLAIMED(i915))
1732                 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
1733
1734         if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1735                 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
1736
1737         if (IS_GEN_RANGE(i915, 6, 7))
1738                 uncore->flags |= UNCORE_HAS_FIFO;
1739
1740         /* clear out unclaimed reg detection bit */
1741         if (intel_uncore_unclaimed_mmio(uncore))
1742                 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
1743
1744         return 0;
1745
1746 out_mmio_cleanup:
1747         uncore_mmio_cleanup(uncore);
1748
1749         return ret;
1750 }
1751
1752 /*
1753  * We might have detected that some engines are fused off after we initialized
1754  * the forcewake domains. Prune them, to make sure they only reference existing
1755  * engines.
1756  */
1757 void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
1758 {
1759         struct drm_i915_private *i915 = uncore->i915;
1760         enum forcewake_domains fw_domains = uncore->fw_domains;
1761         enum forcewake_domain_id domain_id;
1762         int i;
1763
1764         if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
1765                 return;
1766
1767         for (i = 0; i < I915_MAX_VCS; i++) {
1768                 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
1769
1770                 if (HAS_ENGINE(i915, _VCS(i)))
1771                         continue;
1772
1773                 if (fw_domains & BIT(domain_id))
1774                         fw_domain_fini(uncore, domain_id);
1775         }
1776
1777         for (i = 0; i < I915_MAX_VECS; i++) {
1778                 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
1779
1780                 if (HAS_ENGINE(i915, _VECS(i)))
1781                         continue;
1782
1783                 if (fw_domains & BIT(domain_id))
1784                         fw_domain_fini(uncore, domain_id);
1785         }
1786 }
1787
1788 void intel_uncore_fini_mmio(struct intel_uncore *uncore)
1789 {
1790         if (intel_uncore_has_forcewake(uncore)) {
1791                 iosf_mbi_punit_acquire();
1792                 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1793                         &uncore->pmic_bus_access_nb);
1794                 intel_uncore_forcewake_reset(uncore);
1795                 intel_uncore_fw_domains_fini(uncore);
1796                 iosf_mbi_punit_release();
1797         }
1798
1799         uncore_mmio_cleanup(uncore);
1800 }
1801
1802 static const struct reg_whitelist {
1803         i915_reg_t offset_ldw;
1804         i915_reg_t offset_udw;
1805         u16 gen_mask;
1806         u8 size;
1807 } reg_read_whitelist[] = { {
1808         .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1809         .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1810         .gen_mask = INTEL_GEN_MASK(4, 12),
1811         .size = 8
1812 } };
1813
1814 int i915_reg_read_ioctl(struct drm_device *dev,
1815                         void *data, struct drm_file *file)
1816 {
1817         struct drm_i915_private *i915 = to_i915(dev);
1818         struct intel_uncore *uncore = &i915->uncore;
1819         struct drm_i915_reg_read *reg = data;
1820         struct reg_whitelist const *entry;
1821         intel_wakeref_t wakeref;
1822         unsigned int flags;
1823         int remain;
1824         int ret = 0;
1825
1826         entry = reg_read_whitelist;
1827         remain = ARRAY_SIZE(reg_read_whitelist);
1828         while (remain) {
1829                 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1830
1831                 GEM_BUG_ON(!is_power_of_2(entry->size));
1832                 GEM_BUG_ON(entry->size > 8);
1833                 GEM_BUG_ON(entry_offset & (entry->size - 1));
1834
1835                 if (INTEL_INFO(i915)->gen_mask & entry->gen_mask &&
1836                     entry_offset == (reg->offset & -entry->size))
1837                         break;
1838                 entry++;
1839                 remain--;
1840         }
1841
1842         if (!remain)
1843                 return -EINVAL;
1844
1845         flags = reg->offset & (entry->size - 1);
1846
1847         with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1848                 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1849                         reg->val = intel_uncore_read64_2x32(uncore,
1850                                                             entry->offset_ldw,
1851                                                             entry->offset_udw);
1852                 else if (entry->size == 8 && flags == 0)
1853                         reg->val = intel_uncore_read64(uncore,
1854                                                        entry->offset_ldw);
1855                 else if (entry->size == 4 && flags == 0)
1856                         reg->val = intel_uncore_read(uncore, entry->offset_ldw);
1857                 else if (entry->size == 2 && flags == 0)
1858                         reg->val = intel_uncore_read16(uncore,
1859                                                        entry->offset_ldw);
1860                 else if (entry->size == 1 && flags == 0)
1861                         reg->val = intel_uncore_read8(uncore,
1862                                                       entry->offset_ldw);
1863                 else
1864                         ret = -EINVAL;
1865         }
1866
1867         return ret;
1868 }
1869
1870 /**
1871  * __intel_wait_for_register_fw - wait until register matches expected state
1872  * @uncore: the struct intel_uncore
1873  * @reg: the register to read
1874  * @mask: mask to apply to register value
1875  * @value: expected value
1876  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1877  * @slow_timeout_ms: slow timeout in millisecond
1878  * @out_value: optional placeholder to hold registry value
1879  *
1880  * This routine waits until the target register @reg contains the expected
1881  * @value after applying the @mask, i.e. it waits until ::
1882  *
1883  *     (I915_READ_FW(reg) & mask) == value
1884  *
1885  * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
1886  * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
1887  * must be not larger than 20,0000 microseconds.
1888  *
1889  * Note that this routine assumes the caller holds forcewake asserted, it is
1890  * not suitable for very long waits. See intel_wait_for_register() if you
1891  * wish to wait without holding forcewake for the duration (i.e. you expect
1892  * the wait to be slow).
1893  *
1894  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1895  */
1896 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
1897                                  i915_reg_t reg,
1898                                  u32 mask,
1899                                  u32 value,
1900                                  unsigned int fast_timeout_us,
1901                                  unsigned int slow_timeout_ms,
1902                                  u32 *out_value)
1903 {
1904         u32 uninitialized_var(reg_value);
1905 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
1906         int ret;
1907
1908         /* Catch any overuse of this function */
1909         might_sleep_if(slow_timeout_ms);
1910         GEM_BUG_ON(fast_timeout_us > 20000);
1911
1912         ret = -ETIMEDOUT;
1913         if (fast_timeout_us && fast_timeout_us <= 20000)
1914                 ret = _wait_for_atomic(done, fast_timeout_us, 0);
1915         if (ret && slow_timeout_ms)
1916                 ret = wait_for(done, slow_timeout_ms);
1917
1918         if (out_value)
1919                 *out_value = reg_value;
1920
1921         return ret;
1922 #undef done
1923 }
1924
1925 /**
1926  * __intel_wait_for_register - wait until register matches expected state
1927  * @uncore: the struct intel_uncore
1928  * @reg: the register to read
1929  * @mask: mask to apply to register value
1930  * @value: expected value
1931  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1932  * @slow_timeout_ms: slow timeout in millisecond
1933  * @out_value: optional placeholder to hold registry value
1934  *
1935  * This routine waits until the target register @reg contains the expected
1936  * @value after applying the @mask, i.e. it waits until ::
1937  *
1938  *     (I915_READ(reg) & mask) == value
1939  *
1940  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1941  *
1942  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1943  */
1944 int __intel_wait_for_register(struct intel_uncore *uncore,
1945                               i915_reg_t reg,
1946                               u32 mask,
1947                               u32 value,
1948                               unsigned int fast_timeout_us,
1949                               unsigned int slow_timeout_ms,
1950                               u32 *out_value)
1951 {
1952         unsigned fw =
1953                 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
1954         u32 reg_value;
1955         int ret;
1956
1957         might_sleep_if(slow_timeout_ms);
1958
1959         spin_lock_irq(&uncore->lock);
1960         intel_uncore_forcewake_get__locked(uncore, fw);
1961
1962         ret = __intel_wait_for_register_fw(uncore,
1963                                            reg, mask, value,
1964                                            fast_timeout_us, 0, &reg_value);
1965
1966         intel_uncore_forcewake_put__locked(uncore, fw);
1967         spin_unlock_irq(&uncore->lock);
1968
1969         if (ret && slow_timeout_ms)
1970                 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
1971                                                                        reg),
1972                                  (reg_value & mask) == value,
1973                                  slow_timeout_ms * 1000, 10, 1000);
1974
1975         /* just trace the final value */
1976         trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
1977
1978         if (out_value)
1979                 *out_value = reg_value;
1980
1981         return ret;
1982 }
1983
1984 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
1985 {
1986         bool ret;
1987
1988         spin_lock_irq(&uncore->debug->lock);
1989         ret = check_for_unclaimed_mmio(uncore);
1990         spin_unlock_irq(&uncore->debug->lock);
1991
1992         return ret;
1993 }
1994
1995 bool
1996 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
1997 {
1998         bool ret = false;
1999
2000         spin_lock_irq(&uncore->debug->lock);
2001
2002         if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2003                 goto out;
2004
2005         if (unlikely(check_for_unclaimed_mmio(uncore))) {
2006                 if (!i915_modparams.mmio_debug) {
2007                         DRM_DEBUG("Unclaimed register detected, "
2008                                   "enabling oneshot unclaimed register reporting. "
2009                                   "Please use i915.mmio_debug=N for more information.\n");
2010                         i915_modparams.mmio_debug++;
2011                 }
2012                 uncore->debug->unclaimed_mmio_check--;
2013                 ret = true;
2014         }
2015
2016 out:
2017         spin_unlock_irq(&uncore->debug->lock);
2018
2019         return ret;
2020 }
2021
2022 /**
2023  * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2024  *                                  a register
2025  * @uncore: pointer to struct intel_uncore
2026  * @reg: register in question
2027  * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2028  *
2029  * Returns a set of forcewake domains required to be taken with for example
2030  * intel_uncore_forcewake_get for the specified register to be accessible in the
2031  * specified mode (read, write or read/write) with raw mmio accessors.
2032  *
2033  * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2034  * callers to do FIFO management on their own or risk losing writes.
2035  */
2036 enum forcewake_domains
2037 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2038                                i915_reg_t reg, unsigned int op)
2039 {
2040         enum forcewake_domains fw_domains = 0;
2041
2042         WARN_ON(!op);
2043
2044         if (!intel_uncore_has_forcewake(uncore))
2045                 return 0;
2046
2047         if (op & FW_REG_READ)
2048                 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2049
2050         if (op & FW_REG_WRITE)
2051                 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2052
2053         WARN_ON(fw_domains & ~uncore->fw_domains);
2054
2055         return fw_domains;
2056 }
2057
2058 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2059 #include "selftests/mock_uncore.c"
2060 #include "selftests/intel_uncore.c"
2061 #endif