2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/kref.h>
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_gt.h"
12 #include "i915_selftest.h"
14 #include "igt_flush_test.h"
15 #include "lib_sw_fence.h"
18 struct i915_active base;
23 static void __live_get(struct live_active *active)
25 kref_get(&active->ref);
28 static void __live_free(struct live_active *active)
30 i915_active_fini(&active->base);
34 static void __live_release(struct kref *ref)
36 struct live_active *active = container_of(ref, typeof(*active), ref);
41 static void __live_put(struct live_active *active)
43 kref_put(&active->ref, __live_release);
46 static int __live_active(struct i915_active *base)
48 struct live_active *active = container_of(base, typeof(*active), base);
54 static void __live_retire(struct i915_active *base)
56 struct live_active *active = container_of(base, typeof(*active), base);
58 active->retired = true;
62 static struct live_active *__live_alloc(struct drm_i915_private *i915)
64 struct live_active *active;
66 active = kzalloc(sizeof(*active), GFP_KERNEL);
70 kref_init(&active->ref);
71 i915_active_init(i915, &active->base, __live_active, __live_retire);
76 static struct live_active *
77 __live_active_setup(struct drm_i915_private *i915)
79 struct intel_engine_cs *engine;
80 struct i915_sw_fence *submit;
81 struct live_active *active;
82 enum intel_engine_id id;
83 unsigned int count = 0;
86 active = __live_alloc(i915);
88 return ERR_PTR(-ENOMEM);
90 submit = heap_fence_create(GFP_KERNEL);
93 return ERR_PTR(-ENOMEM);
96 err = i915_active_acquire(&active->base);
100 for_each_engine(engine, i915, id) {
101 struct i915_request *rq;
103 rq = i915_request_create(engine->kernel_context);
109 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
113 err = i915_active_ref(&active->base, rq->timeline, rq);
114 i915_request_add(rq);
116 pr_err("Failed to track active ref!\n");
123 i915_active_release(&active->base);
124 if (active->retired && count) {
125 pr_err("i915_active retired before submission!\n");
128 if (atomic_read(&active->base.count) != count) {
129 pr_err("i915_active not tracking all requests, found %d, expected %d\n",
130 atomic_read(&active->base.count), count);
135 i915_sw_fence_commit(submit);
136 heap_fence_put(submit);
139 active = ERR_PTR(err);
145 static int live_active_wait(void *arg)
147 struct drm_i915_private *i915 = arg;
148 struct live_active *active;
149 intel_wakeref_t wakeref;
152 /* Check that we get a callback when requests retire upon waiting */
154 mutex_lock(&i915->drm.struct_mutex);
155 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
157 active = __live_active_setup(i915);
158 if (IS_ERR(active)) {
159 err = PTR_ERR(active);
163 i915_active_wait(&active->base);
164 if (!active->retired) {
165 pr_err("i915_active not retired after waiting!\n");
171 if (igt_flush_test(i915, I915_WAIT_LOCKED))
175 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
176 mutex_unlock(&i915->drm.struct_mutex);
181 static int live_active_retire(void *arg)
183 struct drm_i915_private *i915 = arg;
184 struct live_active *active;
185 intel_wakeref_t wakeref;
188 /* Check that we get a callback when requests are indirectly retired */
190 mutex_lock(&i915->drm.struct_mutex);
191 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
193 active = __live_active_setup(i915);
194 if (IS_ERR(active)) {
195 err = PTR_ERR(active);
199 /* waits for & retires all requests */
200 if (igt_flush_test(i915, I915_WAIT_LOCKED))
203 if (!active->retired) {
204 pr_err("i915_active not retired after flushing!\n");
211 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
212 mutex_unlock(&i915->drm.struct_mutex);
217 int i915_active_live_selftests(struct drm_i915_private *i915)
219 static const struct i915_subtest tests[] = {
220 SUBTEST(live_active_wait),
221 SUBTEST(live_active_retire),
224 if (intel_gt_is_wedged(&i915->gt))
227 return i915_subtests(tests, i915);