1 /* SPDX-License-Identifier: MIT */
2 #ifndef _INTEL_RINGBUFFER_H_
3 #define _INTEL_RINGBUFFER_H_
5 #include <drm/drm_util.h>
7 #include <linux/hashtable.h>
8 #include <linux/irq_work.h>
9 #include <linux/random.h>
10 #include <linux/seqlock.h>
12 #include "i915_gem_batch_pool.h"
15 #include "i915_request.h"
16 #include "i915_selftest.h"
17 #include "i915_timeline.h"
18 #include "intel_engine_types.h"
19 #include "intel_gpu_commands.h"
20 #include "intel_workarounds.h"
24 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
25 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
26 * to give some inclination as to some of the magic values used in the various
29 #define CACHELINE_BYTES 64
30 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
33 * The register defines to be used with the following macros need to accept a
36 * REG_FOO(base) _MMIO((base) + <relative offset>)
37 * ENGINE_READ(engine, REG_FOO);
39 * register arrays are to be defined and accessed as follows:
41 * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
42 * ENGINE_READ_IDX(engine, REG_BAR, i)
45 #define __ENGINE_REG_OP(op__, engine__, ...) \
46 intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
48 #define __ENGINE_READ_OP(op__, engine__, reg__) \
49 __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
51 #define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__)
52 #define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
53 #define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
54 #define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
55 #define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
57 #define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
58 __ENGINE_REG_OP(read64_2x32, (engine__), \
59 lower_reg__((engine__)->mmio_base), \
60 upper_reg__((engine__)->mmio_base))
62 #define ENGINE_READ_IDX(engine__, reg__, idx__) \
63 __ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
65 #define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
66 __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
68 #define ENGINE_WRITE16(...) __ENGINE_WRITE_OP(write16, __VA_ARGS__)
69 #define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
70 #define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
72 #define GEN6_RING_FAULT_REG_READ(engine__) \
73 intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))
75 #define GEN6_RING_FAULT_REG_POSTING_READ(engine__) \
76 intel_uncore_posting_read((engine__)->uncore, RING_FAULT_REG(engine__))
78 #define GEN6_RING_FAULT_REG_RMW(engine__, clear__, set__) \
82 __val = intel_uncore_read((engine__)->uncore, \
83 RING_FAULT_REG(engine__)); \
84 __val &= ~(clear__); \
86 intel_uncore_write((engine__)->uncore, RING_FAULT_REG(engine__), \
90 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
91 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
93 enum intel_engine_hangcheck_action {
98 ENGINE_ACTIVE_SUBUNITS,
103 static inline const char *
104 hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
111 case ENGINE_ACTIVE_SEQNO:
112 return "active seqno";
113 case ENGINE_ACTIVE_HEAD:
114 return "active head";
115 case ENGINE_ACTIVE_SUBUNITS:
116 return "active subunits";
117 case ENGINE_WAIT_KICK:
126 void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
129 execlists_set_active(struct intel_engine_execlists *execlists,
132 __set_bit(bit, (unsigned long *)&execlists->active);
136 execlists_set_active_once(struct intel_engine_execlists *execlists,
139 return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
143 execlists_clear_active(struct intel_engine_execlists *execlists,
146 __clear_bit(bit, (unsigned long *)&execlists->active);
150 execlists_clear_all_active(struct intel_engine_execlists *execlists)
152 execlists->active = 0;
156 execlists_is_active(const struct intel_engine_execlists *execlists,
159 return test_bit(bit, (unsigned long *)&execlists->active);
162 void execlists_user_begin(struct intel_engine_execlists *execlists,
163 const struct execlist_port *port);
164 void execlists_user_end(struct intel_engine_execlists *execlists);
167 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
169 struct i915_request *
170 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
172 static inline unsigned int
173 execlists_num_ports(const struct intel_engine_execlists * const execlists)
175 return execlists->port_mask + 1;
178 static inline struct execlist_port *
179 execlists_port_complete(struct intel_engine_execlists * const execlists,
180 struct execlist_port * const port)
182 const unsigned int m = execlists->port_mask;
184 GEM_BUG_ON(port_index(port, execlists) != 0);
185 GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
187 memmove(port, port + 1, m * sizeof(struct execlist_port));
188 memset(port + m, 0, sizeof(struct execlist_port));
194 intel_read_status_page(const struct intel_engine_cs *engine, int reg)
196 /* Ensure that the compiler doesn't optimize away the load. */
197 return READ_ONCE(engine->status_page.addr[reg]);
201 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
203 /* Writing into the status page should be done sparingly. Since
204 * we do when we are uncertain of the device state, we take a bit
205 * of extra paranoia to try and ensure that the HWS takes the value
206 * we give and that it doesn't end up trapped inside the CPU!
208 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
210 clflush(&engine->status_page.addr[reg]);
211 engine->status_page.addr[reg] = value;
212 clflush(&engine->status_page.addr[reg]);
215 WRITE_ONCE(engine->status_page.addr[reg], value);
220 * Reads a dword out of the status page, which is written to from the command
221 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
224 * The following dwords have a reserved meaning:
225 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
226 * 0x04: ring 0 head pointer
227 * 0x05: ring 1 head pointer (915-class)
228 * 0x06: ring 2 head pointer (915-class)
229 * 0x10-0x1b: Context status DWords (GM45)
230 * 0x1f: Last written status offset. (GM45)
231 * 0x20-0x2f: Reserved (Gen6+)
233 * The area from dword 0x30 to 0x3ff is available for driver usage.
235 #define I915_GEM_HWS_PREEMPT 0x32
236 #define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
237 #define I915_GEM_HWS_SEQNO 0x40
238 #define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
239 #define I915_GEM_HWS_SCRATCH 0x80
240 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32))
242 #define I915_HWS_CSB_BUF0_INDEX 0x10
243 #define I915_HWS_CSB_WRITE_INDEX 0x1f
244 #define CNL_HWS_CSB_WRITE_INDEX 0x2f
247 intel_engine_create_ring(struct intel_engine_cs *engine,
248 struct i915_timeline *timeline,
250 int intel_ring_pin(struct intel_ring *ring);
251 void intel_ring_reset(struct intel_ring *ring, u32 tail);
252 unsigned int intel_ring_update_space(struct intel_ring *ring);
253 void intel_ring_unpin(struct intel_ring *ring);
254 void intel_ring_free(struct kref *ref);
256 static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
258 kref_get(&ring->ref);
262 static inline void intel_ring_put(struct intel_ring *ring)
264 kref_put(&ring->ref, intel_ring_free);
267 void intel_engine_stop(struct intel_engine_cs *engine);
268 void intel_engine_cleanup(struct intel_engine_cs *engine);
270 int __must_check intel_ring_cacheline_align(struct i915_request *rq);
272 u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
274 static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
278 * This serves as a placeholder in the code so that the reader
279 * can compare against the preceding intel_ring_begin() and
280 * check that the number of dwords emitted matches the space
281 * reserved for the command packet (i.e. the value passed to
282 * intel_ring_begin()).
284 GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
287 static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
289 return pos & (ring->size - 1);
293 intel_ring_offset_valid(const struct intel_ring *ring,
296 if (pos & -ring->size) /* must be strictly within the ring */
299 if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
305 static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
307 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
308 u32 offset = addr - rq->ring->vaddr;
309 GEM_BUG_ON(offset > rq->ring->size);
310 return intel_ring_wrap(rq->ring, offset);
314 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
316 GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
320 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
321 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
322 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
323 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
324 * same cacheline, the Head Pointer must not be greater than the Tail
327 * We use ring->head as the last known location of the actual RING_HEAD,
328 * it may have advanced but in the worst case it is equally the same
329 * as ring->head and so we should never program RING_TAIL to advance
330 * into the same cacheline as ring->head.
332 #define cacheline(a) round_down(a, CACHELINE_BYTES)
333 GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
338 static inline unsigned int
339 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
341 /* Whilst writes to the tail are strictly order, there is no
342 * serialisation between readers and the writers. The tail may be
343 * read by i915_request_retire() just as it is being updated
344 * by execlists, as although the breadcrumb is complete, the context
345 * switch hasn't been seen.
347 assert_ring_tail_valid(ring, tail);
352 static inline unsigned int
353 __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
356 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
357 * same cacheline, the Head Pointer must not be greater than the Tail
360 GEM_BUG_ON(!is_power_of_2(size));
361 return (head - tail - CACHELINE_BYTES) & (size - 1);
364 int intel_engines_init_mmio(struct drm_i915_private *i915);
365 int intel_engines_setup(struct drm_i915_private *i915);
366 int intel_engines_init(struct drm_i915_private *i915);
367 void intel_engines_cleanup(struct drm_i915_private *i915);
369 int intel_engine_init_common(struct intel_engine_cs *engine);
370 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
372 int intel_ring_submission_setup(struct intel_engine_cs *engine);
373 int intel_ring_submission_init(struct intel_engine_cs *engine);
375 int intel_engine_stop_cs(struct intel_engine_cs *engine);
376 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
378 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
380 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
381 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
383 void intel_engine_get_instdone(struct intel_engine_cs *engine,
384 struct intel_instdone *instdone);
386 void intel_engine_init_execlists(struct intel_engine_cs *engine);
388 void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
389 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
391 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
392 void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
394 void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
395 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
398 intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
400 irq_work_queue(&engine->breadcrumbs.irq_work);
403 void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
405 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
406 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
408 void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
409 struct drm_printer *p);
411 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
413 memset(batch, 0, 6 * sizeof(u32));
415 batch[0] = GFX_OP_PIPE_CONTROL(6);
423 gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
425 /* We're using qword write, offset should be aligned to 8 bytes. */
426 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
428 /* w/a for post sync ops following a GPGPU operation we
429 * need a prior CS_STALL, which is emitted by the flush
430 * following the batch.
432 *cs++ = GFX_OP_PIPE_CONTROL(6);
433 *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
437 /* We're thrashing one dword of HWS. */
444 gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
446 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
447 GEM_BUG_ON(gtt_offset & (1 << 5));
448 /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
449 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
451 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
452 *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
459 static inline void intel_engine_reset(struct intel_engine_cs *engine,
462 if (engine->reset.reset)
463 engine->reset.reset(engine, stalled);
464 engine->serial++; /* contexts lost */
467 bool intel_engine_is_idle(struct intel_engine_cs *engine);
468 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
470 void intel_engines_reset_default_submission(struct drm_i915_private *i915);
471 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
473 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
476 void intel_engine_dump(struct intel_engine_cs *engine,
477 struct drm_printer *m,
478 const char *header, ...);
480 struct intel_engine_cs *
481 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
483 static inline void intel_engine_context_in(struct intel_engine_cs *engine)
487 if (READ_ONCE(engine->stats.enabled) == 0)
490 write_seqlock_irqsave(&engine->stats.lock, flags);
492 if (engine->stats.enabled > 0) {
493 if (engine->stats.active++ == 0)
494 engine->stats.start = ktime_get();
495 GEM_BUG_ON(engine->stats.active == 0);
498 write_sequnlock_irqrestore(&engine->stats.lock, flags);
501 static inline void intel_engine_context_out(struct intel_engine_cs *engine)
505 if (READ_ONCE(engine->stats.enabled) == 0)
508 write_seqlock_irqsave(&engine->stats.lock, flags);
510 if (engine->stats.enabled > 0) {
513 if (engine->stats.active && --engine->stats.active == 0) {
515 * Decrement the active context count and in case GPU
516 * is now idle add up to the running total.
518 last = ktime_sub(ktime_get(), engine->stats.start);
520 engine->stats.total = ktime_add(engine->stats.total,
522 } else if (engine->stats.active == 0) {
524 * After turning on engine stats, context out might be
525 * the first event in which case we account from the
526 * time stats gathering was turned on.
528 last = ktime_sub(ktime_get(), engine->stats.enabled_at);
530 engine->stats.total = ktime_add(engine->stats.total,
535 write_sequnlock_irqrestore(&engine->stats.lock, flags);
538 int intel_enable_engine_stats(struct intel_engine_cs *engine);
539 void intel_disable_engine_stats(struct intel_engine_cs *engine);
541 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
543 struct i915_request *
544 intel_engine_find_active_request(struct intel_engine_cs *engine);
546 u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class);
548 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
550 static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
552 if (!execlists->preempt_hang.inject_hang)
555 complete(&execlists->preempt_hang.completion);
561 static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
568 void intel_engine_init_active(struct intel_engine_cs *engine,
569 unsigned int subclass);
570 #define ENGINE_PHYSICAL 0
571 #define ENGINE_MOCK 1
572 #define ENGINE_VIRTUAL 2
574 #endif /* _INTEL_RINGBUFFER_H_ */