1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "i915_selftest.h"
8 #include "selftests/igt_flush_test.h"
9 #include "selftests/mock_drm.h"
10 #include "mock_context.h"
12 static int igt_client_fill(void *arg)
14 struct intel_context *ce = arg;
15 struct drm_i915_private *i915 = ce->gem_context->i915;
16 struct drm_i915_gem_object *obj;
17 struct rnd_state prng;
22 prandom_seed_state(&prng, i915_selftest.random_seed);
25 u32 sz = prandom_u32_state(&prng) % SZ_32M;
26 u32 val = prandom_u32_state(&prng);
29 sz = round_up(sz, PAGE_SIZE);
31 pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
33 obj = i915_gem_object_create_internal(i915, sz);
39 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
46 * XXX: The goal is move this to get_pages, so try to dirty the
47 * CPU cache first to check that we do the required clflush
48 * before scheduling the blt for !llc platforms. This matches
49 * some version of reality where at get_pages the pages
50 * themselves may not yet be coherent with the GPU(swap-in). If
51 * we are missing the flush then we should see the stale cache
52 * values after we do the set_to_cpu_domain and pick it up as a
55 memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
57 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
58 obj->cache_dirty = true;
60 err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
67 * XXX: For now do the wait without the object resv lock to
68 * ensure we don't deadlock.
70 err = i915_gem_object_wait(obj,
71 I915_WAIT_INTERRUPTIBLE |
73 MAX_SCHEDULE_TIMEOUT);
77 i915_gem_object_lock(obj);
78 err = i915_gem_object_set_to_cpu_domain(obj, false);
79 i915_gem_object_unlock(obj);
83 for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
84 if (vaddr[i] != val) {
85 pr_err("vaddr[%u]=%x, expected=%x\n", i,
92 i915_gem_object_unpin_map(obj);
93 i915_gem_object_put(obj);
94 } while (!time_after(jiffies, end));
99 i915_gem_object_unpin_map(obj);
101 i915_gem_object_put(obj);
103 mutex_lock(&i915->drm.struct_mutex);
104 if (igt_flush_test(i915, I915_WAIT_LOCKED))
106 mutex_unlock(&i915->drm.struct_mutex);
114 int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
116 static const struct i915_subtest tests[] = {
117 SUBTEST(igt_client_fill),
120 if (i915_terminally_wedged(i915))
123 if (!HAS_ENGINE(i915, BCS0))
126 return i915_subtests(tests, i915->engine[BCS0]->kernel_context);