Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / i915 / gem / selftests / i915_gem_client_blt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "i915_selftest.h"
7
8 #include "selftests/igt_flush_test.h"
9 #include "selftests/mock_drm.h"
10 #include "mock_context.h"
11
12 static int igt_client_fill(void *arg)
13 {
14         struct intel_context *ce = arg;
15         struct drm_i915_private *i915 = ce->gem_context->i915;
16         struct drm_i915_gem_object *obj;
17         struct rnd_state prng;
18         IGT_TIMEOUT(end);
19         u32 *vaddr;
20         int err = 0;
21
22         prandom_seed_state(&prng, i915_selftest.random_seed);
23
24         do {
25                 u32 sz = prandom_u32_state(&prng) % SZ_32M;
26                 u32 val = prandom_u32_state(&prng);
27                 u32 i;
28
29                 sz = round_up(sz, PAGE_SIZE);
30
31                 pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
32
33                 obj = i915_gem_object_create_internal(i915, sz);
34                 if (IS_ERR(obj)) {
35                         err = PTR_ERR(obj);
36                         goto err_flush;
37                 }
38
39                 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
40                 if (IS_ERR(vaddr)) {
41                         err = PTR_ERR(vaddr);
42                         goto err_put;
43                 }
44
45                 /*
46                  * XXX: The goal is move this to get_pages, so try to dirty the
47                  * CPU cache first to check that we do the required clflush
48                  * before scheduling the blt for !llc platforms. This matches
49                  * some version of reality where at get_pages the pages
50                  * themselves may not yet be coherent with the GPU(swap-in). If
51                  * we are missing the flush then we should see the stale cache
52                  * values after we do the set_to_cpu_domain and pick it up as a
53                  * test failure.
54                  */
55                 memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
56
57                 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
58                         obj->cache_dirty = true;
59
60                 err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
61                                                        &obj->mm.page_sizes,
62                                                        val);
63                 if (err)
64                         goto err_unpin;
65
66                 /*
67                  * XXX: For now do the wait without the object resv lock to
68                  * ensure we don't deadlock.
69                  */
70                 err = i915_gem_object_wait(obj,
71                                            I915_WAIT_INTERRUPTIBLE |
72                                            I915_WAIT_ALL,
73                                            MAX_SCHEDULE_TIMEOUT);
74                 if (err)
75                         goto err_unpin;
76
77                 i915_gem_object_lock(obj);
78                 err = i915_gem_object_set_to_cpu_domain(obj, false);
79                 i915_gem_object_unlock(obj);
80                 if (err)
81                         goto err_unpin;
82
83                 for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
84                         if (vaddr[i] != val) {
85                                 pr_err("vaddr[%u]=%x, expected=%x\n", i,
86                                        vaddr[i], val);
87                                 err = -EINVAL;
88                                 goto err_unpin;
89                         }
90                 }
91
92                 i915_gem_object_unpin_map(obj);
93                 i915_gem_object_put(obj);
94         } while (!time_after(jiffies, end));
95
96         goto err_flush;
97
98 err_unpin:
99         i915_gem_object_unpin_map(obj);
100 err_put:
101         i915_gem_object_put(obj);
102 err_flush:
103         mutex_lock(&i915->drm.struct_mutex);
104         if (igt_flush_test(i915, I915_WAIT_LOCKED))
105                 err = -EIO;
106         mutex_unlock(&i915->drm.struct_mutex);
107
108         if (err == -ENOMEM)
109                 err = 0;
110
111         return err;
112 }
113
114 int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
115 {
116         static const struct i915_subtest tests[] = {
117                 SUBTEST(igt_client_fill),
118         };
119
120         if (i915_terminally_wedged(i915))
121                 return 0;
122
123         if (!HAS_ENGINE(i915, BCS0))
124                 return 0;
125
126         return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
127 }