Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / i915 / selftests / i915_gem.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include <linux/random.h>
8
9 #include "gem/selftests/igt_gem_utils.h"
10 #include "gem/selftests/mock_context.h"
11
12 #include "i915_selftest.h"
13
14 #include "igt_flush_test.h"
15 #include "mock_drm.h"
16
17 static int switch_to_context(struct drm_i915_private *i915,
18                              struct i915_gem_context *ctx)
19 {
20         struct intel_engine_cs *engine;
21         enum intel_engine_id id;
22
23         for_each_engine(engine, i915, id) {
24                 struct i915_request *rq;
25
26                 rq = igt_request_alloc(ctx, engine);
27                 if (IS_ERR(rq))
28                         return PTR_ERR(rq);
29
30                 i915_request_add(rq);
31         }
32
33         return 0;
34 }
35
36 static void trash_stolen(struct drm_i915_private *i915)
37 {
38         struct i915_ggtt *ggtt = &i915->ggtt;
39         const u64 slot = ggtt->error_capture.start;
40         const resource_size_t size = resource_size(&i915->dsm);
41         unsigned long page;
42         u32 prng = 0x12345678;
43
44         for (page = 0; page < size; page += PAGE_SIZE) {
45                 const dma_addr_t dma = i915->dsm.start + page;
46                 u32 __iomem *s;
47                 int x;
48
49                 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
50
51                 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
52                 for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
53                         prng = next_pseudo_random32(prng);
54                         iowrite32(prng, &s[x]);
55                 }
56                 io_mapping_unmap_atomic(s);
57         }
58
59         ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
60 }
61
62 static void simulate_hibernate(struct drm_i915_private *i915)
63 {
64         intel_wakeref_t wakeref;
65
66         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
67
68         /*
69          * As a final sting in the tail, invalidate stolen. Under a real S4,
70          * stolen is lost and needs to be refilled on resume. However, under
71          * CI we merely do S4-device testing (as full S4 is too unreliable
72          * for automated testing across a cluster), so to simulate the effect
73          * of stolen being trashed across S4, we trash it ourselves.
74          */
75         trash_stolen(i915);
76
77         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
78 }
79
80 static int pm_prepare(struct drm_i915_private *i915)
81 {
82         i915_gem_suspend(i915);
83
84         return 0;
85 }
86
87 static void pm_suspend(struct drm_i915_private *i915)
88 {
89         intel_wakeref_t wakeref;
90
91         with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
92                 i915_gem_suspend_gtt_mappings(i915);
93                 i915_gem_suspend_late(i915);
94         }
95 }
96
97 static void pm_hibernate(struct drm_i915_private *i915)
98 {
99         intel_wakeref_t wakeref;
100
101         with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
102                 i915_gem_suspend_gtt_mappings(i915);
103
104                 i915_gem_freeze(i915);
105                 i915_gem_freeze_late(i915);
106         }
107 }
108
109 static void pm_resume(struct drm_i915_private *i915)
110 {
111         intel_wakeref_t wakeref;
112
113         /*
114          * Both suspend and hibernate follow the same wakeup path and assume
115          * that runtime-pm just works.
116          */
117         with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
118                 intel_gt_sanitize(i915, false);
119                 i915_gem_sanitize(i915);
120
121                 mutex_lock(&i915->drm.struct_mutex);
122                 i915_gem_restore_gtt_mappings(i915);
123                 i915_gem_restore_fences(i915);
124                 mutex_unlock(&i915->drm.struct_mutex);
125
126                 i915_gem_resume(i915);
127         }
128 }
129
130 static int igt_gem_suspend(void *arg)
131 {
132         struct drm_i915_private *i915 = arg;
133         struct i915_gem_context *ctx;
134         struct drm_file *file;
135         int err;
136
137         file = mock_file(i915);
138         if (IS_ERR(file))
139                 return PTR_ERR(file);
140
141         err = -ENOMEM;
142         mutex_lock(&i915->drm.struct_mutex);
143         ctx = live_context(i915, file);
144         if (!IS_ERR(ctx))
145                 err = switch_to_context(i915, ctx);
146         mutex_unlock(&i915->drm.struct_mutex);
147         if (err)
148                 goto out;
149
150         err = pm_prepare(i915);
151         if (err)
152                 goto out;
153
154         pm_suspend(i915);
155
156         /* Here be dragons! Note that with S3RST any S3 may become S4! */
157         simulate_hibernate(i915);
158
159         pm_resume(i915);
160
161         mutex_lock(&i915->drm.struct_mutex);
162         err = switch_to_context(i915, ctx);
163         if (igt_flush_test(i915, I915_WAIT_LOCKED))
164                 err = -EIO;
165         mutex_unlock(&i915->drm.struct_mutex);
166 out:
167         mock_file_free(i915, file);
168         return err;
169 }
170
171 static int igt_gem_hibernate(void *arg)
172 {
173         struct drm_i915_private *i915 = arg;
174         struct i915_gem_context *ctx;
175         struct drm_file *file;
176         int err;
177
178         file = mock_file(i915);
179         if (IS_ERR(file))
180                 return PTR_ERR(file);
181
182         err = -ENOMEM;
183         mutex_lock(&i915->drm.struct_mutex);
184         ctx = live_context(i915, file);
185         if (!IS_ERR(ctx))
186                 err = switch_to_context(i915, ctx);
187         mutex_unlock(&i915->drm.struct_mutex);
188         if (err)
189                 goto out;
190
191         err = pm_prepare(i915);
192         if (err)
193                 goto out;
194
195         pm_hibernate(i915);
196
197         /* Here be dragons! */
198         simulate_hibernate(i915);
199
200         pm_resume(i915);
201
202         mutex_lock(&i915->drm.struct_mutex);
203         err = switch_to_context(i915, ctx);
204         if (igt_flush_test(i915, I915_WAIT_LOCKED))
205                 err = -EIO;
206         mutex_unlock(&i915->drm.struct_mutex);
207 out:
208         mock_file_free(i915, file);
209         return err;
210 }
211
212 int i915_gem_live_selftests(struct drm_i915_private *i915)
213 {
214         static const struct i915_subtest tests[] = {
215                 SUBTEST(igt_gem_suspend),
216                 SUBTEST(igt_gem_hibernate),
217         };
218
219         if (i915_terminally_wedged(i915))
220                 return 0;
221
222         return i915_subtests(tests, i915);
223 }