2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
32 #include <drm/drm_print.h>
35 #include "i915_trace.h"
40 * The i915 driver supports dynamic enabling and disabling of entire hardware
41 * blocks at runtime. This is especially important on the display side where
42 * software is supposed to control many power gates manually on recent hardware,
43 * since on the GT side a lot of the power management is done by the hardware.
44 * But even there some manual control at the device level is required.
46 * Since i915 supports a diverse set of platforms with a unified codebase and
47 * hardware engineers just love to shuffle functionality around between power
48 * domains there's a sizeable amount of indirection required. This file provides
49 * generic functions to the driver for grabbing and releasing references for
50 * abstract power domains. It then maps those to the actual power wells
51 * present for a given platform.
54 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
56 #include <linux/sort.h>
60 static noinline depot_stack_handle_t __save_depot_stack(void)
62 unsigned long entries[STACKDEPTH];
65 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
66 return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
69 static void __print_depot_stack(depot_stack_handle_t stack,
70 char *buf, int sz, int indent)
72 unsigned long *entries;
73 unsigned int nr_entries;
75 nr_entries = stack_depot_fetch(stack, &entries);
76 stack_trace_snprint(buf, sz, entries, nr_entries, indent);
79 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
81 spin_lock_init(&rpm->debug.lock);
84 static noinline depot_stack_handle_t
85 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
87 depot_stack_handle_t stack, *stacks;
93 stack = __save_depot_stack();
97 spin_lock_irqsave(&rpm->debug.lock, flags);
99 if (!rpm->debug.count)
100 rpm->debug.last_acquire = stack;
102 stacks = krealloc(rpm->debug.owners,
103 (rpm->debug.count + 1) * sizeof(*stacks),
104 GFP_NOWAIT | __GFP_NOWARN);
106 stacks[rpm->debug.count++] = stack;
107 rpm->debug.owners = stacks;
112 spin_unlock_irqrestore(&rpm->debug.lock, flags);
117 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
118 depot_stack_handle_t stack)
120 unsigned long flags, n;
123 if (unlikely(stack == -1))
126 spin_lock_irqsave(&rpm->debug.lock, flags);
127 for (n = rpm->debug.count; n--; ) {
128 if (rpm->debug.owners[n] == stack) {
129 memmove(rpm->debug.owners + n,
130 rpm->debug.owners + n + 1,
131 (--rpm->debug.count - n) * sizeof(stack));
136 spin_unlock_irqrestore(&rpm->debug.lock, flags);
139 "Unmatched wakeref (tracking %lu), count %u\n",
140 rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
143 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
147 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
148 DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
150 stack = READ_ONCE(rpm->debug.last_release);
152 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
153 DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
160 static int cmphandle(const void *_a, const void *_b)
162 const depot_stack_handle_t * const a = _a, * const b = _b;
173 __print_intel_runtime_pm_wakeref(struct drm_printer *p,
174 const struct intel_runtime_pm_debug *dbg)
179 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
183 if (dbg->last_acquire) {
184 __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
185 drm_printf(p, "Wakeref last acquired:\n%s", buf);
188 if (dbg->last_release) {
189 __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
190 drm_printf(p, "Wakeref last released:\n%s", buf);
193 drm_printf(p, "Wakeref count: %lu\n", dbg->count);
195 sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
197 for (i = 0; i < dbg->count; i++) {
198 depot_stack_handle_t stack = dbg->owners[i];
202 while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
204 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
205 drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
212 __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
213 struct intel_runtime_pm_debug *saved)
217 debug->owners = NULL;
219 debug->last_release = __save_depot_stack();
223 dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
226 struct drm_printer p = drm_debug_printer("i915");
228 __print_intel_runtime_pm_wakeref(&p, debug);
231 kfree(debug->owners);
235 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
237 struct intel_runtime_pm_debug dbg = {};
240 if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
245 __untrack_all_wakerefs(&rpm->debug, &dbg);
246 spin_unlock_irqrestore(&rpm->debug.lock, flags);
248 dump_and_free_wakeref_tracking(&dbg);
252 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
254 struct intel_runtime_pm_debug dbg = {};
257 spin_lock_irqsave(&rpm->debug.lock, flags);
258 __untrack_all_wakerefs(&rpm->debug, &dbg);
259 spin_unlock_irqrestore(&rpm->debug.lock, flags);
261 dump_and_free_wakeref_tracking(&dbg);
264 void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
265 struct drm_printer *p)
267 struct intel_runtime_pm_debug dbg = {};
270 unsigned long alloc = dbg.count;
271 depot_stack_handle_t *s;
273 spin_lock_irq(&rpm->debug.lock);
274 dbg.count = rpm->debug.count;
275 if (dbg.count <= alloc) {
278 dbg.count * sizeof(*s));
280 dbg.last_acquire = rpm->debug.last_acquire;
281 dbg.last_release = rpm->debug.last_release;
282 spin_unlock_irq(&rpm->debug.lock);
283 if (dbg.count <= alloc)
286 s = krealloc(dbg.owners,
287 dbg.count * sizeof(*s),
288 GFP_NOWAIT | __GFP_NOWARN);
295 __print_intel_runtime_pm_wakeref(p, &dbg);
303 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
307 static depot_stack_handle_t
308 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
313 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
314 intel_wakeref_t wref)
319 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
321 atomic_dec(&rpm->wakeref_count);
325 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
332 intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
335 atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
336 assert_rpm_wakelock_held(rpm);
338 atomic_inc(&rpm->wakeref_count);
339 assert_rpm_raw_wakeref_held(rpm);
344 intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
347 assert_rpm_wakelock_held(rpm);
348 atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
350 assert_rpm_raw_wakeref_held(rpm);
353 __intel_wakeref_dec_and_check_tracking(rpm);
356 static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
361 ret = pm_runtime_get_sync(rpm->kdev);
362 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
364 intel_runtime_pm_acquire(rpm, wakelock);
366 return track_intel_runtime_pm_wakeref(rpm);
370 * intel_runtime_pm_get_raw - grab a raw runtime pm reference
371 * @rpm: the intel_runtime_pm structure
373 * This is the unlocked version of intel_display_power_is_enabled() and should
374 * only be used from error capture and recovery code where deadlocks are
376 * This function grabs a device-level runtime pm reference (mostly used for
377 * asynchronous PM management from display code) and ensures that it is powered
378 * up. Raw references are not considered during wakelock assert checks.
380 * Any runtime pm reference obtained by this function must have a symmetric
381 * call to intel_runtime_pm_put_raw() to release the reference again.
383 * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
384 * as True if the wakeref was acquired, or False otherwise.
386 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
388 return __intel_runtime_pm_get(rpm, false);
392 * intel_runtime_pm_get - grab a runtime pm reference
393 * @rpm: the intel_runtime_pm structure
395 * This function grabs a device-level runtime pm reference (mostly used for GEM
396 * code to ensure the GTT or GT is on) and ensures that it is powered up.
398 * Any runtime pm reference obtained by this function must have a symmetric
399 * call to intel_runtime_pm_put() to release the reference again.
401 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
403 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
405 return __intel_runtime_pm_get(rpm, true);
409 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
410 * @rpm: the intel_runtime_pm structure
412 * This function grabs a device-level runtime pm reference if the device is
413 * already in use and ensures that it is powered up. It is illegal to try
414 * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
416 * Any runtime pm reference obtained by this function must have a symmetric
417 * call to intel_runtime_pm_put() to release the reference again.
419 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
420 * as True if the wakeref was acquired, or False otherwise.
422 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
424 if (IS_ENABLED(CONFIG_PM)) {
426 * In cases runtime PM is disabled by the RPM core and we get
427 * an -EINVAL return value we are not supposed to call this
428 * function, since the power state is undefined. This applies
429 * atm to the late/early system suspend/resume handlers.
431 if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
435 intel_runtime_pm_acquire(rpm, true);
437 return track_intel_runtime_pm_wakeref(rpm);
441 * intel_runtime_pm_get_noresume - grab a runtime pm reference
442 * @rpm: the intel_runtime_pm structure
444 * This function grabs a device-level runtime pm reference (mostly used for GEM
445 * code to ensure the GTT or GT is on).
447 * It will _not_ power up the device but instead only check that it's powered
448 * on. Therefore it is only valid to call this functions from contexts where
449 * the device is known to be powered up and where trying to power it up would
450 * result in hilarity and deadlocks. That pretty much means only the system
451 * suspend/resume code where this is used to grab runtime pm references for
452 * delayed setup down in work items.
454 * Any runtime pm reference obtained by this function must have a symmetric
455 * call to intel_runtime_pm_put() to release the reference again.
457 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
459 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
461 assert_rpm_wakelock_held(rpm);
462 pm_runtime_get_noresume(rpm->kdev);
464 intel_runtime_pm_acquire(rpm, true);
466 return track_intel_runtime_pm_wakeref(rpm);
469 static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
470 intel_wakeref_t wref,
473 struct device *kdev = rpm->kdev;
475 untrack_intel_runtime_pm_wakeref(rpm, wref);
477 intel_runtime_pm_release(rpm, wakelock);
479 pm_runtime_mark_last_busy(kdev);
480 pm_runtime_put_autosuspend(kdev);
484 * intel_runtime_pm_put_raw - release a raw runtime pm reference
485 * @rpm: the intel_runtime_pm structure
486 * @wref: wakeref acquired for the reference that is being released
488 * This function drops the device-level runtime pm reference obtained by
489 * intel_runtime_pm_get_raw() and might power down the corresponding
490 * hardware block right away if this is the last reference.
493 intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
495 __intel_runtime_pm_put(rpm, wref, false);
499 * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
500 * @rpm: the intel_runtime_pm structure
502 * This function drops the device-level runtime pm reference obtained by
503 * intel_runtime_pm_get() and might power down the corresponding
504 * hardware block right away if this is the last reference.
506 * This function exists only for historical reasons and should be avoided in
507 * new code, as the correctness of its use cannot be checked. Always use
508 * intel_runtime_pm_put() instead.
510 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
512 __intel_runtime_pm_put(rpm, -1, true);
515 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
517 * intel_runtime_pm_put - release a runtime pm reference
518 * @rpm: the intel_runtime_pm structure
519 * @wref: wakeref acquired for the reference that is being released
521 * This function drops the device-level runtime pm reference obtained by
522 * intel_runtime_pm_get() and might power down the corresponding
523 * hardware block right away if this is the last reference.
525 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
527 __intel_runtime_pm_put(rpm, wref, true);
532 * intel_runtime_pm_enable - enable runtime pm
533 * @rpm: the intel_runtime_pm structure
535 * This function enables runtime pm at the end of the driver load sequence.
537 * Note that this function does currently not enable runtime pm for the
538 * subordinate display power domains. That is done by
539 * intel_power_domains_enable().
541 void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
543 struct device *kdev = rpm->kdev;
546 * Disable the system suspend direct complete optimization, which can
547 * leave the device suspended skipping the driver's suspend handlers
548 * if the device was already runtime suspended. This is needed due to
549 * the difference in our runtime and system suspend sequence and
550 * becaue the HDA driver may require us to enable the audio power
551 * domain during system suspend.
553 dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
555 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
556 pm_runtime_mark_last_busy(kdev);
559 * Take a permanent reference to disable the RPM functionality and drop
560 * it only when unloading the driver. Use the low level get/put helpers,
561 * so the driver's own RPM reference tracking asserts also work on
562 * platforms without RPM support.
564 if (!rpm->available) {
567 pm_runtime_dont_use_autosuspend(kdev);
568 ret = pm_runtime_get_sync(kdev);
569 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
571 pm_runtime_use_autosuspend(kdev);
575 * The core calls the driver load handler with an RPM reference held.
576 * We drop that here and will reacquire it during unloading in
577 * intel_power_domains_fini().
579 pm_runtime_put_autosuspend(kdev);
582 void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
584 struct device *kdev = rpm->kdev;
586 /* Transfer rpm ownership back to core */
587 WARN(pm_runtime_get_sync(kdev) < 0,
588 "Failed to pass rpm ownership back to core\n");
590 pm_runtime_dont_use_autosuspend(kdev);
593 pm_runtime_put(kdev);
596 void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
598 int count = atomic_read(&rpm->wakeref_count);
601 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
602 intel_rpm_raw_wakeref_count(count),
603 intel_rpm_wakelock_count(count));
605 untrack_all_intel_runtime_pm_wakerefs(rpm);
608 void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
610 struct drm_i915_private *i915 =
611 container_of(rpm, struct drm_i915_private, runtime_pm);
612 struct pci_dev *pdev = i915->drm.pdev;
613 struct device *kdev = &pdev->dev;
616 rpm->available = HAS_RUNTIME_PM(i915);
618 init_intel_runtime_pm_wakeref(rpm);