2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <linux/wait_bit.h>
9 #include "intel_runtime_pm.h"
10 #include "intel_wakeref.h"
12 static void rpm_get(struct intel_wakeref *wf)
14 wf->wakeref = intel_runtime_pm_get(wf->rpm);
17 static void rpm_put(struct intel_wakeref *wf)
19 intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
21 intel_runtime_pm_put(wf->rpm, wakeref);
22 INTEL_WAKEREF_BUG_ON(!wakeref);
25 int __intel_wakeref_get_first(struct intel_wakeref *wf)
28 * Treat get/put as different subclasses, as we may need to run
29 * the put callback from under the shrinker and do not want to
30 * cross-contanimate that callback with any extra work performed
31 * upon acquiring the wakeref.
33 mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
34 if (!atomic_read(&wf->count)) {
39 err = wf->ops->get(wf);
42 mutex_unlock(&wf->mutex);
46 smp_mb__before_atomic(); /* release wf->count */
48 atomic_inc(&wf->count);
49 mutex_unlock(&wf->mutex);
51 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
55 static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
57 if (!atomic_dec_and_test(&wf->count))
60 /* ops->put() must reschedule its own release on error/deferral */
61 if (likely(!wf->ops->put(wf))) {
63 wake_up_var(&wf->wakeref);
67 mutex_unlock(&wf->mutex);
70 void __intel_wakeref_put_last(struct intel_wakeref *wf)
72 INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
74 /* Assume we are not in process context and so cannot sleep. */
75 if (wf->ops->flags & INTEL_WAKEREF_PUT_ASYNC ||
76 !mutex_trylock(&wf->mutex)) {
77 schedule_work(&wf->work);
81 ____intel_wakeref_put_last(wf);
84 static void __intel_wakeref_put_work(struct work_struct *wrk)
86 struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
88 if (atomic_add_unless(&wf->count, -1, 1))
91 mutex_lock(&wf->mutex);
92 ____intel_wakeref_put_last(wf);
95 void __intel_wakeref_init(struct intel_wakeref *wf,
96 struct intel_runtime_pm *rpm,
97 const struct intel_wakeref_ops *ops,
98 struct lock_class_key *key)
103 __mutex_init(&wf->mutex, "wakeref", key);
104 atomic_set(&wf->count, 0);
107 INIT_WORK(&wf->work, __intel_wakeref_put_work);
110 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
112 return wait_var_event_killable(&wf->wakeref,
113 !intel_wakeref_is_active(wf));
116 static void wakeref_auto_timeout(struct timer_list *t)
118 struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
119 intel_wakeref_t wakeref;
122 if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
125 wakeref = fetch_and_zero(&wf->wakeref);
126 spin_unlock_irqrestore(&wf->lock, flags);
128 intel_runtime_pm_put(wf->rpm, wakeref);
131 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
132 struct intel_runtime_pm *rpm)
134 spin_lock_init(&wf->lock);
135 timer_setup(&wf->timer, wakeref_auto_timeout, 0);
136 refcount_set(&wf->count, 0);
141 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
146 if (del_timer_sync(&wf->timer))
147 wakeref_auto_timeout(&wf->timer);
151 /* Our mission is that we only extend an already active wakeref */
152 assert_rpm_wakelock_held(wf->rpm);
154 if (!refcount_inc_not_zero(&wf->count)) {
155 spin_lock_irqsave(&wf->lock, flags);
156 if (!refcount_inc_not_zero(&wf->count)) {
157 INTEL_WAKEREF_BUG_ON(wf->wakeref);
158 wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
159 refcount_set(&wf->count, 1);
161 spin_unlock_irqrestore(&wf->lock, flags);
165 * If we extend a pending timer, we will only get a single timer
166 * callback and so need to cancel the local inc by running the
167 * elided callback to keep the wf->count balanced.
169 if (mod_timer(&wf->timer, jiffies + timeout))
170 wakeref_auto_timeout(&wf->timer);
173 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
175 intel_wakeref_auto(wf, 0);
176 INTEL_WAKEREF_BUG_ON(wf->wakeref);