Linux-libre 4.9.22-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / i915 / intel_lrc.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *    Michel Thierry <michel.thierry@intel.com>
26  *    Thomas Daniel <thomas.daniel@intel.com>
27  *    Oscar Mateo <oscar.mateo@intel.com>
28  *
29  */
30
31 /**
32  * DOC: Logical Rings, Logical Ring Contexts and Execlists
33  *
34  * Motivation:
35  * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36  * These expanded contexts enable a number of new abilities, especially
37  * "Execlists" (also implemented in this file).
38  *
39  * One of the main differences with the legacy HW contexts is that logical
40  * ring contexts incorporate many more things to the context's state, like
41  * PDPs or ringbuffer control registers:
42  *
43  * The reason why PDPs are included in the context is straightforward: as
44  * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45  * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46  * instead, the GPU will do it for you on the context switch.
47  *
48  * But, what about the ringbuffer control registers (head, tail, etc..)?
49  * shouldn't we just need a set of those per engine command streamer? This is
50  * where the name "Logical Rings" starts to make sense: by virtualizing the
51  * rings, the engine cs shifts to a new "ring buffer" with every context
52  * switch. When you want to submit a workload to the GPU you: A) choose your
53  * context, B) find its appropriate virtualized ring, C) write commands to it
54  * and then, finally, D) tell the GPU to switch to that context.
55  *
56  * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57  * to a contexts is via a context execution list, ergo "Execlists".
58  *
59  * LRC implementation:
60  * Regarding the creation of contexts, we have:
61  *
62  * - One global default context.
63  * - One local default context for each opened fd.
64  * - One local extra context for each context create ioctl call.
65  *
66  * Now that ringbuffers belong per-context (and not per-engine, like before)
67  * and that contexts are uniquely tied to a given engine (and not reusable,
68  * like before) we need:
69  *
70  * - One ringbuffer per-engine inside each context.
71  * - One backing object per-engine inside each context.
72  *
73  * The global default context starts its life with these new objects fully
74  * allocated and populated. The local default context for each opened fd is
75  * more complex, because we don't know at creation time which engine is going
76  * to use them. To handle this, we have implemented a deferred creation of LR
77  * contexts:
78  *
79  * The local context starts its life as a hollow or blank holder, that only
80  * gets populated for a given engine once we receive an execbuffer. If later
81  * on we receive another execbuffer ioctl for the same context but a different
82  * engine, we allocate/populate a new ringbuffer and context backing object and
83  * so on.
84  *
85  * Finally, regarding local contexts created using the ioctl call: as they are
86  * only allowed with the render ring, we can allocate & populate them right
87  * away (no need to defer anything, at least for now).
88  *
89  * Execlists implementation:
90  * Execlists are the new method by which, on gen8+ hardware, workloads are
91  * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92  * This method works as follows:
93  *
94  * When a request is committed, its commands (the BB start and any leading or
95  * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96  * for the appropriate context. The tail pointer in the hardware context is not
97  * updated at this time, but instead, kept by the driver in the ringbuffer
98  * structure. A structure representing this request is added to a request queue
99  * for the appropriate engine: this structure contains a copy of the context's
100  * tail after the request was written to the ring buffer and a pointer to the
101  * context itself.
102  *
103  * If the engine's request queue was empty before the request was added, the
104  * queue is processed immediately. Otherwise the queue will be processed during
105  * a context switch interrupt. In any case, elements on the queue will get sent
106  * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107  * globally unique 20-bits submission ID.
108  *
109  * When execution of a request completes, the GPU updates the context status
110  * buffer with a context complete event and generates a context switch interrupt.
111  * During the interrupt handling, the driver examines the events in the buffer:
112  * for each context complete event, if the announced ID matches that on the head
113  * of the request queue, then that request is retired and removed from the queue.
114  *
115  * After processing, if any requests were retired and the queue is not empty
116  * then a new execution list can be submitted. The two requests at the front of
117  * the queue are next to be submitted but since a context may not occur twice in
118  * an execution list, if subsequent requests have the same ID as the first then
119  * the two requests must be combined. This is done simply by discarding requests
120  * at the head of the queue until either only one requests is left (in which case
121  * we use a NULL second context) or the first two requests have unique IDs.
122  *
123  * By always executing the first two requests in the queue the driver ensures
124  * that the GPU is kept as busy as possible. In the case where a single context
125  * completes but a second context is still executing, the request for this second
126  * context will be at the head of the queue when we remove the first one. This
127  * request will then be resubmitted along with a new request for a different context,
128  * which will cause the hardware to continue executing the second request and queue
129  * the new request (the GPU detects the condition of a context getting preempted
130  * with the same context and optimizes the context switch flow by not doing
131  * preemption, but just sampling the new tail pointer).
132  *
133  */
134 #include <linux/interrupt.h>
135
136 #include <drm/drmP.h>
137 #include <drm/i915_drm.h>
138 #include "i915_drv.h"
139 #include "intel_mocs.h"
140
141 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
142 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
145 #define RING_EXECLIST_QFULL             (1 << 0x2)
146 #define RING_EXECLIST1_VALID            (1 << 0x3)
147 #define RING_EXECLIST0_VALID            (1 << 0x4)
148 #define RING_EXECLIST_ACTIVE_STATUS     (3 << 0xE)
149 #define RING_EXECLIST1_ACTIVE           (1 << 0x11)
150 #define RING_EXECLIST0_ACTIVE           (1 << 0x12)
151
152 #define GEN8_CTX_STATUS_IDLE_ACTIVE     (1 << 0)
153 #define GEN8_CTX_STATUS_PREEMPTED       (1 << 1)
154 #define GEN8_CTX_STATUS_ELEMENT_SWITCH  (1 << 2)
155 #define GEN8_CTX_STATUS_ACTIVE_IDLE     (1 << 3)
156 #define GEN8_CTX_STATUS_COMPLETE        (1 << 4)
157 #define GEN8_CTX_STATUS_LITE_RESTORE    (1 << 15)
158
159 #define GEN8_CTX_STATUS_COMPLETED_MASK \
160          (GEN8_CTX_STATUS_ACTIVE_IDLE | \
161           GEN8_CTX_STATUS_PREEMPTED | \
162           GEN8_CTX_STATUS_ELEMENT_SWITCH)
163
164 #define CTX_LRI_HEADER_0                0x01
165 #define CTX_CONTEXT_CONTROL             0x02
166 #define CTX_RING_HEAD                   0x04
167 #define CTX_RING_TAIL                   0x06
168 #define CTX_RING_BUFFER_START           0x08
169 #define CTX_RING_BUFFER_CONTROL         0x0a
170 #define CTX_BB_HEAD_U                   0x0c
171 #define CTX_BB_HEAD_L                   0x0e
172 #define CTX_BB_STATE                    0x10
173 #define CTX_SECOND_BB_HEAD_U            0x12
174 #define CTX_SECOND_BB_HEAD_L            0x14
175 #define CTX_SECOND_BB_STATE             0x16
176 #define CTX_BB_PER_CTX_PTR              0x18
177 #define CTX_RCS_INDIRECT_CTX            0x1a
178 #define CTX_RCS_INDIRECT_CTX_OFFSET     0x1c
179 #define CTX_LRI_HEADER_1                0x21
180 #define CTX_CTX_TIMESTAMP               0x22
181 #define CTX_PDP3_UDW                    0x24
182 #define CTX_PDP3_LDW                    0x26
183 #define CTX_PDP2_UDW                    0x28
184 #define CTX_PDP2_LDW                    0x2a
185 #define CTX_PDP1_UDW                    0x2c
186 #define CTX_PDP1_LDW                    0x2e
187 #define CTX_PDP0_UDW                    0x30
188 #define CTX_PDP0_LDW                    0x32
189 #define CTX_LRI_HEADER_2                0x41
190 #define CTX_R_PWR_CLK_STATE             0x42
191 #define CTX_GPGPU_CSR_BASE_ADDRESS      0x44
192
193 #define GEN8_CTX_VALID (1<<0)
194 #define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
195 #define GEN8_CTX_FORCE_RESTORE (1<<2)
196 #define GEN8_CTX_L3LLC_COHERENT (1<<5)
197 #define GEN8_CTX_PRIVILEGE (1<<8)
198
199 #define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
200         (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
201         (reg_state)[(pos)+1] = (val); \
202 } while (0)
203
204 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do {                \
205         const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
206         reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
207         reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
208 } while (0)
209
210 #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
211         reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
212         reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
213 } while (0)
214
215 enum {
216         FAULT_AND_HANG = 0,
217         FAULT_AND_HALT, /* Debug only */
218         FAULT_AND_STREAM,
219         FAULT_AND_CONTINUE /* Unsupported */
220 };
221 #define GEN8_CTX_ID_SHIFT 32
222 #define GEN8_CTX_ID_WIDTH 21
223 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT        0x17
224 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT        0x26
225
226 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
227 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
228
229 #define WA_TAIL_DWORDS 2
230
231 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
232                                             struct intel_engine_cs *engine);
233 static int intel_lr_context_pin(struct i915_gem_context *ctx,
234                                 struct intel_engine_cs *engine);
235 static void execlists_init_reg_state(u32 *reg_state,
236                                      struct i915_gem_context *ctx,
237                                      struct intel_engine_cs *engine,
238                                      struct intel_ring *ring);
239
240 /**
241  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
242  * @dev_priv: i915 device private
243  * @enable_execlists: value of i915.enable_execlists module parameter.
244  *
245  * Only certain platforms support Execlists (the prerequisites being
246  * support for Logical Ring Contexts and Aliasing PPGTT or better).
247  *
248  * Return: 1 if Execlists is supported and has to be enabled.
249  */
250 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
251 {
252         /* On platforms with execlist available, vGPU will only
253          * support execlist mode, no ring buffer mode.
254          */
255         if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
256                 return 1;
257
258         if (INTEL_GEN(dev_priv) >= 9)
259                 return 1;
260
261         if (enable_execlists == 0)
262                 return 0;
263
264         if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
265             USES_PPGTT(dev_priv) &&
266             i915.use_mmio_flip >= 0)
267                 return 1;
268
269         return 0;
270 }
271
272 static void
273 logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
274 {
275         struct drm_i915_private *dev_priv = engine->i915;
276
277         engine->disable_lite_restore_wa =
278                 (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
279                  IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
280                 (engine->id == VCS || engine->id == VCS2);
281
282         engine->ctx_desc_template = GEN8_CTX_VALID;
283         if (IS_GEN8(dev_priv))
284                 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
285         engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
286
287         /* TODO: WaDisableLiteRestore when we start using semaphore
288          * signalling between Command Streamers */
289         /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
290
291         /* WaEnableForceRestoreInCtxtDescForVCS:skl */
292         /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
293         if (engine->disable_lite_restore_wa)
294                 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
295 }
296
297 /**
298  * intel_lr_context_descriptor_update() - calculate & cache the descriptor
299  *                                        descriptor for a pinned context
300  * @ctx: Context to work on
301  * @engine: Engine the descriptor will be used with
302  *
303  * The context descriptor encodes various attributes of a context,
304  * including its GTT address and some flags. Because it's fairly
305  * expensive to calculate, we'll just do it once and cache the result,
306  * which remains valid until the context is unpinned.
307  *
308  * This is what a descriptor looks like, from LSB to MSB::
309  *
310  *      bits  0-11:    flags, GEN8_CTX_* (cached in ctx_desc_template)
311  *      bits 12-31:    LRCA, GTT address of (the HWSP of) this context
312  *      bits 32-52:    ctx ID, a globally unique tag
313  *      bits 53-54:    mbz, reserved for use by hardware
314  *      bits 55-63:    group ID, currently unused and set to 0
315  */
316 static void
317 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
318                                    struct intel_engine_cs *engine)
319 {
320         struct intel_context *ce = &ctx->engine[engine->id];
321         u64 desc;
322
323         BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
324
325         desc = ctx->desc_template;                              /* bits  3-4  */
326         desc |= engine->ctx_desc_template;                      /* bits  0-11 */
327         desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE;
328                                                                 /* bits 12-31 */
329         desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;           /* bits 32-52 */
330
331         ce->lrc_desc = desc;
332 }
333
334 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
335                                      struct intel_engine_cs *engine)
336 {
337         return ctx->engine[engine->id].lrc_desc;
338 }
339
340 static inline void
341 execlists_context_status_change(struct drm_i915_gem_request *rq,
342                                 unsigned long status)
343 {
344         /*
345          * Only used when GVT-g is enabled now. When GVT-g is disabled,
346          * The compiler should eliminate this function as dead-code.
347          */
348         if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
349                 return;
350
351         atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
352 }
353
354 static void
355 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
356 {
357         ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
358         ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
359         ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
360         ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
361 }
362
363 static u64 execlists_update_context(struct drm_i915_gem_request *rq)
364 {
365         struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
366         struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
367         u32 *reg_state = ce->lrc_reg_state;
368
369         reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
370
371         /* True 32b PPGTT with dynamic page allocation: update PDP
372          * registers and point the unallocated PDPs to scratch page.
373          * PML4 is allocated during ppgtt init, so this is not needed
374          * in 48-bit mode.
375          */
376         if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
377                 execlists_update_context_pdps(ppgtt, reg_state);
378
379         return ce->lrc_desc;
380 }
381
382 static void execlists_submit_ports(struct intel_engine_cs *engine)
383 {
384         struct drm_i915_private *dev_priv = engine->i915;
385         struct execlist_port *port = engine->execlist_port;
386         u32 __iomem *elsp =
387                 dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
388         u64 desc[2];
389
390         if (!port[0].count)
391                 execlists_context_status_change(port[0].request,
392                                                 INTEL_CONTEXT_SCHEDULE_IN);
393         desc[0] = execlists_update_context(port[0].request);
394         engine->preempt_wa = port[0].count++; /* bdw only? fixed on skl? */
395
396         if (port[1].request) {
397                 GEM_BUG_ON(port[1].count);
398                 execlists_context_status_change(port[1].request,
399                                                 INTEL_CONTEXT_SCHEDULE_IN);
400                 desc[1] = execlists_update_context(port[1].request);
401                 port[1].count = 1;
402         } else {
403                 desc[1] = 0;
404         }
405         GEM_BUG_ON(desc[0] == desc[1]);
406
407         /* You must always write both descriptors in the order below. */
408         writel(upper_32_bits(desc[1]), elsp);
409         writel(lower_32_bits(desc[1]), elsp);
410
411         writel(upper_32_bits(desc[0]), elsp);
412         /* The context is automatically loaded after the following */
413         writel(lower_32_bits(desc[0]), elsp);
414 }
415
416 static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
417 {
418         return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
419                 ctx->execlists_force_single_submission);
420 }
421
422 static bool can_merge_ctx(const struct i915_gem_context *prev,
423                           const struct i915_gem_context *next)
424 {
425         if (prev != next)
426                 return false;
427
428         if (ctx_single_port_submission(prev))
429                 return false;
430
431         return true;
432 }
433
434 static void execlists_dequeue(struct intel_engine_cs *engine)
435 {
436         struct drm_i915_gem_request *cursor, *last;
437         struct execlist_port *port = engine->execlist_port;
438         bool submit = false;
439
440         last = port->request;
441         if (last)
442                 /* WaIdleLiteRestore:bdw,skl
443                  * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
444                  * as we resubmit the request. See gen8_emit_request()
445                  * for where we prepare the padding after the end of the
446                  * request.
447                  */
448                 last->tail = last->wa_tail;
449
450         GEM_BUG_ON(port[1].request);
451
452         /* Hardware submission is through 2 ports. Conceptually each port
453          * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
454          * static for a context, and unique to each, so we only execute
455          * requests belonging to a single context from each ring. RING_HEAD
456          * is maintained by the CS in the context image, it marks the place
457          * where it got up to last time, and through RING_TAIL we tell the CS
458          * where we want to execute up to this time.
459          *
460          * In this list the requests are in order of execution. Consecutive
461          * requests from the same context are adjacent in the ringbuffer. We
462          * can combine these requests into a single RING_TAIL update:
463          *
464          *              RING_HEAD...req1...req2
465          *                                    ^- RING_TAIL
466          * since to execute req2 the CS must first execute req1.
467          *
468          * Our goal then is to point each port to the end of a consecutive
469          * sequence of requests as being the most optimal (fewest wake ups
470          * and context switches) submission.
471          */
472
473         spin_lock(&engine->execlist_lock);
474         list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
475                 /* Can we combine this request with the current port? It has to
476                  * be the same context/ringbuffer and not have any exceptions
477                  * (e.g. GVT saying never to combine contexts).
478                  *
479                  * If we can combine the requests, we can execute both by
480                  * updating the RING_TAIL to point to the end of the second
481                  * request, and so we never need to tell the hardware about
482                  * the first.
483                  */
484                 if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
485                         /* If we are on the second port and cannot combine
486                          * this request with the last, then we are done.
487                          */
488                         if (port != engine->execlist_port)
489                                 break;
490
491                         /* If GVT overrides us we only ever submit port[0],
492                          * leaving port[1] empty. Note that we also have
493                          * to be careful that we don't queue the same
494                          * context (even though a different request) to
495                          * the second port.
496                          */
497                         if (ctx_single_port_submission(cursor->ctx))
498                                 break;
499
500                         GEM_BUG_ON(last->ctx == cursor->ctx);
501
502                         i915_gem_request_assign(&port->request, last);
503                         port++;
504                 }
505                 last = cursor;
506                 submit = true;
507         }
508         if (submit) {
509                 /* Decouple all the requests submitted from the queue */
510                 engine->execlist_queue.next = &cursor->execlist_link;
511                 cursor->execlist_link.prev = &engine->execlist_queue;
512
513                 i915_gem_request_assign(&port->request, last);
514         }
515         spin_unlock(&engine->execlist_lock);
516
517         if (submit)
518                 execlists_submit_ports(engine);
519 }
520
521 static bool execlists_elsp_idle(struct intel_engine_cs *engine)
522 {
523         return !engine->execlist_port[0].request;
524 }
525
526 static bool execlists_elsp_ready(struct intel_engine_cs *engine)
527 {
528         int port;
529
530         port = 1; /* wait for a free slot */
531         if (engine->disable_lite_restore_wa || engine->preempt_wa)
532                 port = 0; /* wait for GPU to be idle before continuing */
533
534         return !engine->execlist_port[port].request;
535 }
536
537 /*
538  * Check the unread Context Status Buffers and manage the submission of new
539  * contexts to the ELSP accordingly.
540  */
541 static void intel_lrc_irq_handler(unsigned long data)
542 {
543         struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
544         struct execlist_port *port = engine->execlist_port;
545         struct drm_i915_private *dev_priv = engine->i915;
546
547         intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
548
549         if (!execlists_elsp_idle(engine)) {
550                 u32 __iomem *csb_mmio =
551                         dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
552                 u32 __iomem *buf =
553                         dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
554                 unsigned int csb, head, tail;
555
556                 csb = readl(csb_mmio);
557                 head = GEN8_CSB_READ_PTR(csb);
558                 tail = GEN8_CSB_WRITE_PTR(csb);
559                 if (tail < head)
560                         tail += GEN8_CSB_ENTRIES;
561                 while (head < tail) {
562                         unsigned int idx = ++head % GEN8_CSB_ENTRIES;
563                         unsigned int status = readl(buf + 2 * idx);
564
565                         if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
566                                 continue;
567
568                         GEM_BUG_ON(port[0].count == 0);
569                         if (--port[0].count == 0) {
570                                 GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
571                                 execlists_context_status_change(port[0].request,
572                                                                 INTEL_CONTEXT_SCHEDULE_OUT);
573
574                                 i915_gem_request_put(port[0].request);
575                                 port[0] = port[1];
576                                 memset(&port[1], 0, sizeof(port[1]));
577
578                                 engine->preempt_wa = false;
579                         }
580
581                         GEM_BUG_ON(port[0].count == 0 &&
582                                    !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
583                 }
584
585                 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
586                                      GEN8_CSB_WRITE_PTR(csb) << 8),
587                        csb_mmio);
588         }
589
590         if (execlists_elsp_ready(engine))
591                 execlists_dequeue(engine);
592
593         intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
594 }
595
596 static void execlists_submit_request(struct drm_i915_gem_request *request)
597 {
598         struct intel_engine_cs *engine = request->engine;
599         unsigned long flags;
600
601         spin_lock_irqsave(&engine->execlist_lock, flags);
602
603         list_add_tail(&request->execlist_link, &engine->execlist_queue);
604         if (execlists_elsp_idle(engine))
605                 tasklet_hi_schedule(&engine->irq_tasklet);
606
607         spin_unlock_irqrestore(&engine->execlist_lock, flags);
608 }
609
610 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
611 {
612         struct intel_engine_cs *engine = request->engine;
613         struct intel_context *ce = &request->ctx->engine[engine->id];
614         int ret;
615
616         /* Flush enough space to reduce the likelihood of waiting after
617          * we start building the request - in which case we will just
618          * have to repeat work.
619          */
620         request->reserved_space += EXECLISTS_REQUEST_SIZE;
621
622         if (!ce->state) {
623                 ret = execlists_context_deferred_alloc(request->ctx, engine);
624                 if (ret)
625                         return ret;
626         }
627
628         request->ring = ce->ring;
629
630         ret = intel_lr_context_pin(request->ctx, engine);
631         if (ret)
632                 return ret;
633
634         if (i915.enable_guc_submission) {
635                 /*
636                  * Check that the GuC has space for the request before
637                  * going any further, as the i915_add_request() call
638                  * later on mustn't fail ...
639                  */
640                 ret = i915_guc_wq_reserve(request);
641                 if (ret)
642                         goto err_unpin;
643         }
644
645         ret = intel_ring_begin(request, 0);
646         if (ret)
647                 goto err_unreserve;
648
649         if (!ce->initialised) {
650                 ret = engine->init_context(request);
651                 if (ret)
652                         goto err_unreserve;
653
654                 ce->initialised = true;
655         }
656
657         /* Note that after this point, we have committed to using
658          * this request as it is being used to both track the
659          * state of engine initialisation and liveness of the
660          * golden renderstate above. Think twice before you try
661          * to cancel/unwind this request now.
662          */
663
664         request->reserved_space -= EXECLISTS_REQUEST_SIZE;
665         return 0;
666
667 err_unreserve:
668         if (i915.enable_guc_submission)
669                 i915_guc_wq_unreserve(request);
670 err_unpin:
671         intel_lr_context_unpin(request->ctx, engine);
672         return ret;
673 }
674
675 /*
676  * intel_logical_ring_advance() - advance the tail and prepare for submission
677  * @request: Request to advance the logical ringbuffer of.
678  *
679  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
680  * really happens during submission is that the context and current tail will be placed
681  * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
682  * point, the tail *inside* the context is updated and the ELSP written to.
683  */
684 static int
685 intel_logical_ring_advance(struct drm_i915_gem_request *request)
686 {
687         struct intel_ring *ring = request->ring;
688         struct intel_engine_cs *engine = request->engine;
689
690         intel_ring_advance(ring);
691         request->tail = ring->tail;
692
693         /*
694          * Here we add two extra NOOPs as padding to avoid
695          * lite restore of a context with HEAD==TAIL.
696          *
697          * Caller must reserve WA_TAIL_DWORDS for us!
698          */
699         intel_ring_emit(ring, MI_NOOP);
700         intel_ring_emit(ring, MI_NOOP);
701         intel_ring_advance(ring);
702         request->wa_tail = ring->tail;
703
704         /* We keep the previous context alive until we retire the following
705          * request. This ensures that any the context object is still pinned
706          * for any residual writes the HW makes into it on the context switch
707          * into the next object following the breadcrumb. Otherwise, we may
708          * retire the context too early.
709          */
710         request->previous_context = engine->last_context;
711         engine->last_context = request->ctx;
712         return 0;
713 }
714
715 static int intel_lr_context_pin(struct i915_gem_context *ctx,
716                                 struct intel_engine_cs *engine)
717 {
718         struct intel_context *ce = &ctx->engine[engine->id];
719         void *vaddr;
720         int ret;
721
722         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
723
724         if (ce->pin_count++)
725                 return 0;
726
727         ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN,
728                            PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL);
729         if (ret)
730                 goto err;
731
732         vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
733         if (IS_ERR(vaddr)) {
734                 ret = PTR_ERR(vaddr);
735                 goto unpin_vma;
736         }
737
738         ret = intel_ring_pin(ce->ring);
739         if (ret)
740                 goto unpin_map;
741
742         intel_lr_context_descriptor_update(ctx, engine);
743
744         ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
745         ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
746                 i915_ggtt_offset(ce->ring->vma);
747
748         ce->state->obj->dirty = true;
749
750         /* Invalidate GuC TLB. */
751         if (i915.enable_guc_submission) {
752                 struct drm_i915_private *dev_priv = ctx->i915;
753                 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
754         }
755
756         i915_gem_context_get(ctx);
757         return 0;
758
759 unpin_map:
760         i915_gem_object_unpin_map(ce->state->obj);
761 unpin_vma:
762         __i915_vma_unpin(ce->state);
763 err:
764         ce->pin_count = 0;
765         return ret;
766 }
767
768 void intel_lr_context_unpin(struct i915_gem_context *ctx,
769                             struct intel_engine_cs *engine)
770 {
771         struct intel_context *ce = &ctx->engine[engine->id];
772
773         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
774         GEM_BUG_ON(ce->pin_count == 0);
775
776         if (--ce->pin_count)
777                 return;
778
779         intel_ring_unpin(ce->ring);
780
781         i915_gem_object_unpin_map(ce->state->obj);
782         i915_vma_unpin(ce->state);
783
784         i915_gem_context_put(ctx);
785 }
786
787 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
788 {
789         int ret, i;
790         struct intel_ring *ring = req->ring;
791         struct i915_workarounds *w = &req->i915->workarounds;
792
793         if (w->count == 0)
794                 return 0;
795
796         ret = req->engine->emit_flush(req, EMIT_BARRIER);
797         if (ret)
798                 return ret;
799
800         ret = intel_ring_begin(req, w->count * 2 + 2);
801         if (ret)
802                 return ret;
803
804         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
805         for (i = 0; i < w->count; i++) {
806                 intel_ring_emit_reg(ring, w->reg[i].addr);
807                 intel_ring_emit(ring, w->reg[i].value);
808         }
809         intel_ring_emit(ring, MI_NOOP);
810
811         intel_ring_advance(ring);
812
813         ret = req->engine->emit_flush(req, EMIT_BARRIER);
814         if (ret)
815                 return ret;
816
817         return 0;
818 }
819
820 #define wa_ctx_emit(batch, index, cmd)                                  \
821         do {                                                            \
822                 int __index = (index)++;                                \
823                 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
824                         return -ENOSPC;                                 \
825                 }                                                       \
826                 batch[__index] = (cmd);                                 \
827         } while (0)
828
829 #define wa_ctx_emit_reg(batch, index, reg) \
830         wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
831
832 /*
833  * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
834  * PIPE_CONTROL instruction. This is required for the flush to happen correctly
835  * but there is a slight complication as this is applied in WA batch where the
836  * values are only initialized once so we cannot take register value at the
837  * beginning and reuse it further; hence we save its value to memory, upload a
838  * constant value with bit21 set and then we restore it back with the saved value.
839  * To simplify the WA, a constant value is formed by using the default value
840  * of this register. This shouldn't be a problem because we are only modifying
841  * it for a short period and this batch in non-premptible. We can ofcourse
842  * use additional instructions that read the actual value of the register
843  * at that time and set our bit of interest but it makes the WA complicated.
844  *
845  * This WA is also required for Gen9 so extracting as a function avoids
846  * code duplication.
847  */
848 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
849                                                 uint32_t *batch,
850                                                 uint32_t index)
851 {
852         struct drm_i915_private *dev_priv = engine->i915;
853         uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
854
855         /*
856          * WaDisableLSQCROPERFforOCL:skl,kbl
857          * This WA is implemented in skl_init_clock_gating() but since
858          * this batch updates GEN8_L3SQCREG4 with default value we need to
859          * set this bit here to retain the WA during flush.
860          */
861         if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
862                 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
863
864         wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
865                                    MI_SRM_LRM_GLOBAL_GTT));
866         wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
867         wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
868         wa_ctx_emit(batch, index, 0);
869
870         wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
871         wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
872         wa_ctx_emit(batch, index, l3sqc4_flush);
873
874         wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
875         wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
876                                    PIPE_CONTROL_DC_FLUSH_ENABLE));
877         wa_ctx_emit(batch, index, 0);
878         wa_ctx_emit(batch, index, 0);
879         wa_ctx_emit(batch, index, 0);
880         wa_ctx_emit(batch, index, 0);
881
882         wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
883                                    MI_SRM_LRM_GLOBAL_GTT));
884         wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
885         wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
886         wa_ctx_emit(batch, index, 0);
887
888         return index;
889 }
890
891 static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
892                                     uint32_t offset,
893                                     uint32_t start_alignment)
894 {
895         return wa_ctx->offset = ALIGN(offset, start_alignment);
896 }
897
898 static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
899                              uint32_t offset,
900                              uint32_t size_alignment)
901 {
902         wa_ctx->size = offset - wa_ctx->offset;
903
904         WARN(wa_ctx->size % size_alignment,
905              "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
906              wa_ctx->size, size_alignment);
907         return 0;
908 }
909
910 /*
911  * Typically we only have one indirect_ctx and per_ctx batch buffer which are
912  * initialized at the beginning and shared across all contexts but this field
913  * helps us to have multiple batches at different offsets and select them based
914  * on a criteria. At the moment this batch always start at the beginning of the page
915  * and at this point we don't have multiple wa_ctx batch buffers.
916  *
917  * The number of WA applied are not known at the beginning; we use this field
918  * to return the no of DWORDS written.
919  *
920  * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
921  * so it adds NOOPs as padding to make it cacheline aligned.
922  * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
923  * makes a complete batch buffer.
924  */
925 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
926                                     struct i915_wa_ctx_bb *wa_ctx,
927                                     uint32_t *batch,
928                                     uint32_t *offset)
929 {
930         uint32_t scratch_addr;
931         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
932
933         /* WaDisableCtxRestoreArbitration:bdw,chv */
934         wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
935
936         /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
937         if (IS_BROADWELL(engine->i915)) {
938                 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
939                 if (rc < 0)
940                         return rc;
941                 index = rc;
942         }
943
944         /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
945         /* Actual scratch location is at 128 bytes offset */
946         scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
947
948         wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
949         wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
950                                    PIPE_CONTROL_GLOBAL_GTT_IVB |
951                                    PIPE_CONTROL_CS_STALL |
952                                    PIPE_CONTROL_QW_WRITE));
953         wa_ctx_emit(batch, index, scratch_addr);
954         wa_ctx_emit(batch, index, 0);
955         wa_ctx_emit(batch, index, 0);
956         wa_ctx_emit(batch, index, 0);
957
958         /* Pad to end of cacheline */
959         while (index % CACHELINE_DWORDS)
960                 wa_ctx_emit(batch, index, MI_NOOP);
961
962         /*
963          * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
964          * execution depends on the length specified in terms of cache lines
965          * in the register CTX_RCS_INDIRECT_CTX
966          */
967
968         return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
969 }
970
971 /*
972  *  This batch is started immediately after indirect_ctx batch. Since we ensure
973  *  that indirect_ctx ends on a cacheline this batch is aligned automatically.
974  *
975  *  The number of DWORDS written are returned using this field.
976  *
977  *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
978  *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
979  */
980 static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
981                                struct i915_wa_ctx_bb *wa_ctx,
982                                uint32_t *batch,
983                                uint32_t *offset)
984 {
985         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
986
987         /* WaDisableCtxRestoreArbitration:bdw,chv */
988         wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
989
990         wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
991
992         return wa_ctx_end(wa_ctx, *offset = index, 1);
993 }
994
995 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
996                                     struct i915_wa_ctx_bb *wa_ctx,
997                                     uint32_t *batch,
998                                     uint32_t *offset)
999 {
1000         int ret;
1001         struct drm_i915_private *dev_priv = engine->i915;
1002         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1003
1004         /* WaDisableCtxRestoreArbitration:skl,bxt */
1005         if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
1006             IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1007                 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1008
1009         /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
1010         ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1011         if (ret < 0)
1012                 return ret;
1013         index = ret;
1014
1015         /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */
1016         wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1017         wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
1018         wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
1019                             GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
1020         wa_ctx_emit(batch, index, MI_NOOP);
1021
1022         /* WaClearSlmSpaceAtContextSwitch:kbl */
1023         /* Actual scratch location is at 128 bytes offset */
1024         if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
1025                 u32 scratch_addr =
1026                         i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
1027
1028                 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1029                 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1030                                            PIPE_CONTROL_GLOBAL_GTT_IVB |
1031                                            PIPE_CONTROL_CS_STALL |
1032                                            PIPE_CONTROL_QW_WRITE));
1033                 wa_ctx_emit(batch, index, scratch_addr);
1034                 wa_ctx_emit(batch, index, 0);
1035                 wa_ctx_emit(batch, index, 0);
1036                 wa_ctx_emit(batch, index, 0);
1037         }
1038
1039         /* WaMediaPoolStateCmdInWABB:bxt */
1040         if (HAS_POOLED_EU(engine->i915)) {
1041                 /*
1042                  * EU pool configuration is setup along with golden context
1043                  * during context initialization. This value depends on
1044                  * device type (2x6 or 3x6) and needs to be updated based
1045                  * on which subslice is disabled especially for 2x6
1046                  * devices, however it is safe to load default
1047                  * configuration of 3x6 device instead of masking off
1048                  * corresponding bits because HW ignores bits of a disabled
1049                  * subslice and drops down to appropriate config. Please
1050                  * see render_state_setup() in i915_gem_render_state.c for
1051                  * possible configurations, to avoid duplication they are
1052                  * not shown here again.
1053                  */
1054                 u32 eu_pool_config = 0x00777000;
1055                 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
1056                 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
1057                 wa_ctx_emit(batch, index, eu_pool_config);
1058                 wa_ctx_emit(batch, index, 0);
1059                 wa_ctx_emit(batch, index, 0);
1060                 wa_ctx_emit(batch, index, 0);
1061         }
1062
1063         /* Pad to end of cacheline */
1064         while (index % CACHELINE_DWORDS)
1065                 wa_ctx_emit(batch, index, MI_NOOP);
1066
1067         return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1068 }
1069
1070 static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1071                                struct i915_wa_ctx_bb *wa_ctx,
1072                                uint32_t *batch,
1073                                uint32_t *offset)
1074 {
1075         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1076
1077         /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1078         if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1079             IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1080                 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1081                 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1082                 wa_ctx_emit(batch, index,
1083                             _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1084                 wa_ctx_emit(batch, index, MI_NOOP);
1085         }
1086
1087         /* WaClearTdlStateAckDirtyBits:bxt */
1088         if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
1089                 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1090
1091                 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
1092                 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1093
1094                 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
1095                 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1096
1097                 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
1098                 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1099
1100                 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
1101                 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
1102                 wa_ctx_emit(batch, index, 0x0);
1103                 wa_ctx_emit(batch, index, MI_NOOP);
1104         }
1105
1106         /* WaDisableCtxRestoreArbitration:skl,bxt */
1107         if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1108             IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1109                 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1110
1111         wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1112
1113         return wa_ctx_end(wa_ctx, *offset = index, 1);
1114 }
1115
1116 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1117 {
1118         struct drm_i915_gem_object *obj;
1119         struct i915_vma *vma;
1120         int err;
1121
1122         obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size));
1123         if (IS_ERR(obj))
1124                 return PTR_ERR(obj);
1125
1126         vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
1127         if (IS_ERR(vma)) {
1128                 err = PTR_ERR(vma);
1129                 goto err;
1130         }
1131
1132         err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
1133         if (err)
1134                 goto err;
1135
1136         engine->wa_ctx.vma = vma;
1137         return 0;
1138
1139 err:
1140         i915_gem_object_put(obj);
1141         return err;
1142 }
1143
1144 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
1145 {
1146         i915_vma_unpin_and_release(&engine->wa_ctx.vma);
1147 }
1148
1149 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1150 {
1151         struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1152         uint32_t *batch;
1153         uint32_t offset;
1154         struct page *page;
1155         int ret;
1156
1157         WARN_ON(engine->id != RCS);
1158
1159         /* update this when WA for higher Gen are added */
1160         if (INTEL_GEN(engine->i915) > 9) {
1161                 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1162                           INTEL_GEN(engine->i915));
1163                 return 0;
1164         }
1165
1166         /* some WA perform writes to scratch page, ensure it is valid */
1167         if (!engine->scratch) {
1168                 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
1169                 return -EINVAL;
1170         }
1171
1172         ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
1173         if (ret) {
1174                 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1175                 return ret;
1176         }
1177
1178         page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
1179         batch = kmap_atomic(page);
1180         offset = 0;
1181
1182         if (IS_GEN8(engine->i915)) {
1183                 ret = gen8_init_indirectctx_bb(engine,
1184                                                &wa_ctx->indirect_ctx,
1185                                                batch,
1186                                                &offset);
1187                 if (ret)
1188                         goto out;
1189
1190                 ret = gen8_init_perctx_bb(engine,
1191                                           &wa_ctx->per_ctx,
1192                                           batch,
1193                                           &offset);
1194                 if (ret)
1195                         goto out;
1196         } else if (IS_GEN9(engine->i915)) {
1197                 ret = gen9_init_indirectctx_bb(engine,
1198                                                &wa_ctx->indirect_ctx,
1199                                                batch,
1200                                                &offset);
1201                 if (ret)
1202                         goto out;
1203
1204                 ret = gen9_init_perctx_bb(engine,
1205                                           &wa_ctx->per_ctx,
1206                                           batch,
1207                                           &offset);
1208                 if (ret)
1209                         goto out;
1210         }
1211
1212 out:
1213         kunmap_atomic(batch);
1214         if (ret)
1215                 lrc_destroy_wa_ctx_obj(engine);
1216
1217         return ret;
1218 }
1219
1220 static void lrc_init_hws(struct intel_engine_cs *engine)
1221 {
1222         struct drm_i915_private *dev_priv = engine->i915;
1223
1224         I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1225                    engine->status_page.ggtt_offset);
1226         POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1227 }
1228
1229 static int gen8_init_common_ring(struct intel_engine_cs *engine)
1230 {
1231         struct drm_i915_private *dev_priv = engine->i915;
1232         int ret;
1233
1234         ret = intel_mocs_init_engine(engine);
1235         if (ret)
1236                 return ret;
1237
1238         lrc_init_hws(engine);
1239
1240         intel_engine_reset_breadcrumbs(engine);
1241
1242         I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1243
1244         I915_WRITE(RING_MODE_GEN7(engine),
1245                    _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1246                    _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1247
1248         DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1249
1250         intel_engine_init_hangcheck(engine);
1251
1252         if (!execlists_elsp_idle(engine))
1253                 execlists_submit_ports(engine);
1254
1255         return 0;
1256 }
1257
1258 static int gen8_init_render_ring(struct intel_engine_cs *engine)
1259 {
1260         struct drm_i915_private *dev_priv = engine->i915;
1261         int ret;
1262
1263         ret = gen8_init_common_ring(engine);
1264         if (ret)
1265                 return ret;
1266
1267         /* We need to disable the AsyncFlip performance optimisations in order
1268          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1269          * programmed to '1' on all products.
1270          *
1271          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1272          */
1273         I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1274
1275         I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1276
1277         return init_workarounds_ring(engine);
1278 }
1279
1280 static int gen9_init_render_ring(struct intel_engine_cs *engine)
1281 {
1282         int ret;
1283
1284         ret = gen8_init_common_ring(engine);
1285         if (ret)
1286                 return ret;
1287
1288         return init_workarounds_ring(engine);
1289 }
1290
1291 static void reset_common_ring(struct intel_engine_cs *engine,
1292                               struct drm_i915_gem_request *request)
1293 {
1294         struct drm_i915_private *dev_priv = engine->i915;
1295         struct execlist_port *port = engine->execlist_port;
1296         struct intel_context *ce = &request->ctx->engine[engine->id];
1297
1298         /* We want a simple context + ring to execute the breadcrumb update.
1299          * We cannot rely on the context being intact across the GPU hang,
1300          * so clear it and rebuild just what we need for the breadcrumb.
1301          * All pending requests for this context will be zapped, and any
1302          * future request will be after userspace has had the opportunity
1303          * to recreate its own state.
1304          */
1305         execlists_init_reg_state(ce->lrc_reg_state,
1306                                  request->ctx, engine, ce->ring);
1307
1308         /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
1309         ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
1310                 i915_ggtt_offset(ce->ring->vma);
1311         ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
1312
1313         request->ring->head = request->postfix;
1314         request->ring->last_retired_head = -1;
1315         intel_ring_update_space(request->ring);
1316
1317         if (i915.enable_guc_submission)
1318                 return;
1319
1320         /* Catch up with any missed context-switch interrupts */
1321         I915_WRITE(RING_CONTEXT_STATUS_PTR(engine), _MASKED_FIELD(0xffff, 0));
1322         if (request->ctx != port[0].request->ctx) {
1323                 i915_gem_request_put(port[0].request);
1324                 port[0] = port[1];
1325                 memset(&port[1], 0, sizeof(port[1]));
1326         }
1327
1328         /* CS is stopped, and we will resubmit both ports on resume */
1329         GEM_BUG_ON(request->ctx != port[0].request->ctx);
1330         port[0].count = 0;
1331         port[1].count = 0;
1332
1333         /* Reset WaIdleLiteRestore:bdw,skl as well */
1334         request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
1335 }
1336
1337 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1338 {
1339         struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1340         struct intel_ring *ring = req->ring;
1341         struct intel_engine_cs *engine = req->engine;
1342         const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1343         int i, ret;
1344
1345         ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
1346         if (ret)
1347                 return ret;
1348
1349         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1350         for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1351                 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1352
1353                 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
1354                 intel_ring_emit(ring, upper_32_bits(pd_daddr));
1355                 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
1356                 intel_ring_emit(ring, lower_32_bits(pd_daddr));
1357         }
1358
1359         intel_ring_emit(ring, MI_NOOP);
1360         intel_ring_advance(ring);
1361
1362         return 0;
1363 }
1364
1365 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1366                               u64 offset, u32 len,
1367                               unsigned int dispatch_flags)
1368 {
1369         struct intel_ring *ring = req->ring;
1370         bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
1371         int ret;
1372
1373         /* Don't rely in hw updating PDPs, specially in lite-restore.
1374          * Ideally, we should set Force PD Restore in ctx descriptor,
1375          * but we can't. Force Restore would be a second option, but
1376          * it is unsafe in case of lite-restore (because the ctx is
1377          * not idle). PML4 is allocated during ppgtt init so this is
1378          * not needed in 48-bit.*/
1379         if (req->ctx->ppgtt &&
1380             (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1381                 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1382                     !intel_vgpu_active(req->i915)) {
1383                         ret = intel_logical_ring_emit_pdps(req);
1384                         if (ret)
1385                                 return ret;
1386                 }
1387
1388                 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
1389         }
1390
1391         ret = intel_ring_begin(req, 4);
1392         if (ret)
1393                 return ret;
1394
1395         /* FIXME(BDW): Address space and security selectors. */
1396         intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
1397                         (ppgtt<<8) |
1398                         (dispatch_flags & I915_DISPATCH_RS ?
1399                          MI_BATCH_RESOURCE_STREAMER : 0));
1400         intel_ring_emit(ring, lower_32_bits(offset));
1401         intel_ring_emit(ring, upper_32_bits(offset));
1402         intel_ring_emit(ring, MI_NOOP);
1403         intel_ring_advance(ring);
1404
1405         return 0;
1406 }
1407
1408 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
1409 {
1410         struct drm_i915_private *dev_priv = engine->i915;
1411         I915_WRITE_IMR(engine,
1412                        ~(engine->irq_enable_mask | engine->irq_keep_mask));
1413         POSTING_READ_FW(RING_IMR(engine->mmio_base));
1414 }
1415
1416 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
1417 {
1418         struct drm_i915_private *dev_priv = engine->i915;
1419         I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1420 }
1421
1422 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
1423 {
1424         struct intel_ring *ring = request->ring;
1425         u32 cmd;
1426         int ret;
1427
1428         ret = intel_ring_begin(request, 4);
1429         if (ret)
1430                 return ret;
1431
1432         cmd = MI_FLUSH_DW + 1;
1433
1434         /* We always require a command barrier so that subsequent
1435          * commands, such as breadcrumb interrupts, are strictly ordered
1436          * wrt the contents of the write cache being flushed to memory
1437          * (and thus being coherent from the CPU).
1438          */
1439         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1440
1441         if (mode & EMIT_INVALIDATE) {
1442                 cmd |= MI_INVALIDATE_TLB;
1443                 if (request->engine->id == VCS)
1444                         cmd |= MI_INVALIDATE_BSD;
1445         }
1446
1447         intel_ring_emit(ring, cmd);
1448         intel_ring_emit(ring,
1449                         I915_GEM_HWS_SCRATCH_ADDR |
1450                         MI_FLUSH_DW_USE_GTT);
1451         intel_ring_emit(ring, 0); /* upper addr */
1452         intel_ring_emit(ring, 0); /* value */
1453         intel_ring_advance(ring);
1454
1455         return 0;
1456 }
1457
1458 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1459                                   u32 mode)
1460 {
1461         struct intel_ring *ring = request->ring;
1462         struct intel_engine_cs *engine = request->engine;
1463         u32 scratch_addr =
1464                 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
1465         bool vf_flush_wa = false, dc_flush_wa = false;
1466         u32 flags = 0;
1467         int ret;
1468         int len;
1469
1470         flags |= PIPE_CONTROL_CS_STALL;
1471
1472         if (mode & EMIT_FLUSH) {
1473                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1474                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1475                 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
1476                 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1477         }
1478
1479         if (mode & EMIT_INVALIDATE) {
1480                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1481                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1482                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1483                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1484                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1485                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1486                 flags |= PIPE_CONTROL_QW_WRITE;
1487                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1488
1489                 /*
1490                  * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1491                  * pipe control.
1492                  */
1493                 if (IS_GEN9(request->i915))
1494                         vf_flush_wa = true;
1495
1496                 /* WaForGAMHang:kbl */
1497                 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1498                         dc_flush_wa = true;
1499         }
1500
1501         len = 6;
1502
1503         if (vf_flush_wa)
1504                 len += 6;
1505
1506         if (dc_flush_wa)
1507                 len += 12;
1508
1509         ret = intel_ring_begin(request, len);
1510         if (ret)
1511                 return ret;
1512
1513         if (vf_flush_wa) {
1514                 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1515                 intel_ring_emit(ring, 0);
1516                 intel_ring_emit(ring, 0);
1517                 intel_ring_emit(ring, 0);
1518                 intel_ring_emit(ring, 0);
1519                 intel_ring_emit(ring, 0);
1520         }
1521
1522         if (dc_flush_wa) {
1523                 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1524                 intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
1525                 intel_ring_emit(ring, 0);
1526                 intel_ring_emit(ring, 0);
1527                 intel_ring_emit(ring, 0);
1528                 intel_ring_emit(ring, 0);
1529         }
1530
1531         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1532         intel_ring_emit(ring, flags);
1533         intel_ring_emit(ring, scratch_addr);
1534         intel_ring_emit(ring, 0);
1535         intel_ring_emit(ring, 0);
1536         intel_ring_emit(ring, 0);
1537
1538         if (dc_flush_wa) {
1539                 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1540                 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
1541                 intel_ring_emit(ring, 0);
1542                 intel_ring_emit(ring, 0);
1543                 intel_ring_emit(ring, 0);
1544                 intel_ring_emit(ring, 0);
1545         }
1546
1547         intel_ring_advance(ring);
1548
1549         return 0;
1550 }
1551
1552 static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
1553 {
1554         /*
1555          * On BXT A steppings there is a HW coherency issue whereby the
1556          * MI_STORE_DATA_IMM storing the completed request's seqno
1557          * occasionally doesn't invalidate the CPU cache. Work around this by
1558          * clflushing the corresponding cacheline whenever the caller wants
1559          * the coherency to be guaranteed. Note that this cacheline is known
1560          * to be clean at this point, since we only write it in
1561          * bxt_a_set_seqno(), where we also do a clflush after the write. So
1562          * this clflush in practice becomes an invalidate operation.
1563          */
1564         intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1565 }
1566
1567 /*
1568  * Reserve space for 2 NOOPs at the end of each request to be
1569  * used as a workaround for not being allowed to do lite
1570  * restore with HEAD==TAIL (WaIdleLiteRestore).
1571  */
1572
1573 static int gen8_emit_request(struct drm_i915_gem_request *request)
1574 {
1575         struct intel_ring *ring = request->ring;
1576         int ret;
1577
1578         ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
1579         if (ret)
1580                 return ret;
1581
1582         /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1583         BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
1584
1585         intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1586         intel_ring_emit(ring,
1587                         intel_hws_seqno_address(request->engine) |
1588                         MI_FLUSH_DW_USE_GTT);
1589         intel_ring_emit(ring, 0);
1590         intel_ring_emit(ring, request->fence.seqno);
1591         intel_ring_emit(ring, MI_USER_INTERRUPT);
1592         intel_ring_emit(ring, MI_NOOP);
1593         return intel_logical_ring_advance(request);
1594 }
1595
1596 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1597 {
1598         struct intel_ring *ring = request->ring;
1599         int ret;
1600
1601         ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
1602         if (ret)
1603                 return ret;
1604
1605         /* We're using qword write, seqno should be aligned to 8 bytes. */
1606         BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1607
1608         /* w/a for post sync ops following a GPGPU operation we
1609          * need a prior CS_STALL, which is emitted by the flush
1610          * following the batch.
1611          */
1612         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1613         intel_ring_emit(ring,
1614                         (PIPE_CONTROL_GLOBAL_GTT_IVB |
1615                          PIPE_CONTROL_CS_STALL |
1616                          PIPE_CONTROL_QW_WRITE));
1617         intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
1618         intel_ring_emit(ring, 0);
1619         intel_ring_emit(ring, i915_gem_request_get_seqno(request));
1620         /* We're thrashing one dword of HWS. */
1621         intel_ring_emit(ring, 0);
1622         intel_ring_emit(ring, MI_USER_INTERRUPT);
1623         intel_ring_emit(ring, MI_NOOP);
1624         return intel_logical_ring_advance(request);
1625 }
1626
1627 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
1628 {
1629         int ret;
1630
1631         ret = intel_logical_ring_workarounds_emit(req);
1632         if (ret)
1633                 return ret;
1634
1635         ret = intel_rcs_context_init_mocs(req);
1636         /*
1637          * Failing to program the MOCS is non-fatal.The system will not
1638          * run at peak performance. So generate an error and carry on.
1639          */
1640         if (ret)
1641                 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1642
1643         return i915_gem_render_state_init(req);
1644 }
1645
1646 /**
1647  * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1648  * @engine: Engine Command Streamer.
1649  */
1650 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1651 {
1652         struct drm_i915_private *dev_priv;
1653
1654         if (!intel_engine_initialized(engine))
1655                 return;
1656
1657         /*
1658          * Tasklet cannot be active at this point due intel_mark_active/idle
1659          * so this is just for documentation.
1660          */
1661         if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1662                 tasklet_kill(&engine->irq_tasklet);
1663
1664         dev_priv = engine->i915;
1665
1666         if (engine->buffer) {
1667                 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
1668         }
1669
1670         if (engine->cleanup)
1671                 engine->cleanup(engine);
1672
1673         intel_engine_cleanup_common(engine);
1674
1675         if (engine->status_page.vma) {
1676                 i915_gem_object_unpin_map(engine->status_page.vma->obj);
1677                 engine->status_page.vma = NULL;
1678         }
1679         intel_lr_context_unpin(dev_priv->kernel_context, engine);
1680
1681         lrc_destroy_wa_ctx_obj(engine);
1682         engine->i915 = NULL;
1683 }
1684
1685 void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
1686 {
1687         struct intel_engine_cs *engine;
1688
1689         for_each_engine(engine, dev_priv)
1690                 engine->submit_request = execlists_submit_request;
1691 }
1692
1693 static void
1694 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1695 {
1696         /* Default vfuncs which can be overriden by each engine. */
1697         engine->init_hw = gen8_init_common_ring;
1698         engine->reset_hw = reset_common_ring;
1699         engine->emit_flush = gen8_emit_flush;
1700         engine->emit_request = gen8_emit_request;
1701         engine->submit_request = execlists_submit_request;
1702
1703         engine->irq_enable = gen8_logical_ring_enable_irq;
1704         engine->irq_disable = gen8_logical_ring_disable_irq;
1705         engine->emit_bb_start = gen8_emit_bb_start;
1706         if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1707                 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
1708 }
1709
1710 static inline void
1711 logical_ring_default_irqs(struct intel_engine_cs *engine)
1712 {
1713         unsigned shift = engine->irq_shift;
1714         engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1715         engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1716 }
1717
1718 static int
1719 lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
1720 {
1721         const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
1722         void *hws;
1723
1724         /* The HWSP is part of the default context object in LRC mode. */
1725         hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
1726         if (IS_ERR(hws))
1727                 return PTR_ERR(hws);
1728
1729         engine->status_page.page_addr = hws + hws_offset;
1730         engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset;
1731         engine->status_page.vma = vma;
1732
1733         return 0;
1734 }
1735
1736 static void
1737 logical_ring_setup(struct intel_engine_cs *engine)
1738 {
1739         struct drm_i915_private *dev_priv = engine->i915;
1740         enum forcewake_domains fw_domains;
1741
1742         intel_engine_setup_common(engine);
1743
1744         /* Intentionally left blank. */
1745         engine->buffer = NULL;
1746
1747         fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
1748                                                     RING_ELSP(engine),
1749                                                     FW_REG_WRITE);
1750
1751         fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1752                                                      RING_CONTEXT_STATUS_PTR(engine),
1753                                                      FW_REG_READ | FW_REG_WRITE);
1754
1755         fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1756                                                      RING_CONTEXT_STATUS_BUF_BASE(engine),
1757                                                      FW_REG_READ);
1758
1759         engine->fw_domains = fw_domains;
1760
1761         tasklet_init(&engine->irq_tasklet,
1762                      intel_lrc_irq_handler, (unsigned long)engine);
1763
1764         logical_ring_init_platform_invariants(engine);
1765         logical_ring_default_vfuncs(engine);
1766         logical_ring_default_irqs(engine);
1767 }
1768
1769 static int
1770 logical_ring_init(struct intel_engine_cs *engine)
1771 {
1772         struct i915_gem_context *dctx = engine->i915->kernel_context;
1773         int ret;
1774
1775         ret = intel_engine_init_common(engine);
1776         if (ret)
1777                 goto error;
1778
1779         ret = execlists_context_deferred_alloc(dctx, engine);
1780         if (ret)
1781                 goto error;
1782
1783         /* As this is the default context, always pin it */
1784         ret = intel_lr_context_pin(dctx, engine);
1785         if (ret) {
1786                 DRM_ERROR("Failed to pin context for %s: %d\n",
1787                           engine->name, ret);
1788                 goto error;
1789         }
1790
1791         /* And setup the hardware status page. */
1792         ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
1793         if (ret) {
1794                 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
1795                 goto error;
1796         }
1797
1798         return 0;
1799
1800 error:
1801         intel_logical_ring_cleanup(engine);
1802         return ret;
1803 }
1804
1805 int logical_render_ring_init(struct intel_engine_cs *engine)
1806 {
1807         struct drm_i915_private *dev_priv = engine->i915;
1808         int ret;
1809
1810         logical_ring_setup(engine);
1811
1812         if (HAS_L3_DPF(dev_priv))
1813                 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1814
1815         /* Override some for render ring. */
1816         if (INTEL_GEN(dev_priv) >= 9)
1817                 engine->init_hw = gen9_init_render_ring;
1818         else
1819                 engine->init_hw = gen8_init_render_ring;
1820         engine->init_context = gen8_init_rcs_context;
1821         engine->emit_flush = gen8_emit_flush_render;
1822         engine->emit_request = gen8_emit_request_render;
1823
1824         ret = intel_engine_create_scratch(engine, 4096);
1825         if (ret)
1826                 return ret;
1827
1828         ret = intel_init_workaround_bb(engine);
1829         if (ret) {
1830                 /*
1831                  * We continue even if we fail to initialize WA batch
1832                  * because we only expect rare glitches but nothing
1833                  * critical to prevent us from using GPU
1834                  */
1835                 DRM_ERROR("WA batch buffer initialization failed: %d\n",
1836                           ret);
1837         }
1838
1839         ret = logical_ring_init(engine);
1840         if (ret) {
1841                 lrc_destroy_wa_ctx_obj(engine);
1842         }
1843
1844         return ret;
1845 }
1846
1847 int logical_xcs_ring_init(struct intel_engine_cs *engine)
1848 {
1849         logical_ring_setup(engine);
1850
1851         return logical_ring_init(engine);
1852 }
1853
1854 static u32
1855 make_rpcs(struct drm_i915_private *dev_priv)
1856 {
1857         u32 rpcs = 0;
1858
1859         /*
1860          * No explicit RPCS request is needed to ensure full
1861          * slice/subslice/EU enablement prior to Gen9.
1862         */
1863         if (INTEL_GEN(dev_priv) < 9)
1864                 return 0;
1865
1866         /*
1867          * Starting in Gen9, render power gating can leave
1868          * slice/subslice/EU in a partially enabled state. We
1869          * must make an explicit request through RPCS for full
1870          * enablement.
1871         */
1872         if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
1873                 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
1874                 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
1875                         GEN8_RPCS_S_CNT_SHIFT;
1876                 rpcs |= GEN8_RPCS_ENABLE;
1877         }
1878
1879         if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
1880                 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
1881                 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
1882                         GEN8_RPCS_SS_CNT_SHIFT;
1883                 rpcs |= GEN8_RPCS_ENABLE;
1884         }
1885
1886         if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
1887                 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
1888                         GEN8_RPCS_EU_MIN_SHIFT;
1889                 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
1890                         GEN8_RPCS_EU_MAX_SHIFT;
1891                 rpcs |= GEN8_RPCS_ENABLE;
1892         }
1893
1894         return rpcs;
1895 }
1896
1897 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
1898 {
1899         u32 indirect_ctx_offset;
1900
1901         switch (INTEL_GEN(engine->i915)) {
1902         default:
1903                 MISSING_CASE(INTEL_GEN(engine->i915));
1904                 /* fall through */
1905         case 9:
1906                 indirect_ctx_offset =
1907                         GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1908                 break;
1909         case 8:
1910                 indirect_ctx_offset =
1911                         GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1912                 break;
1913         }
1914
1915         return indirect_ctx_offset;
1916 }
1917
1918 static void execlists_init_reg_state(u32 *reg_state,
1919                                      struct i915_gem_context *ctx,
1920                                      struct intel_engine_cs *engine,
1921                                      struct intel_ring *ring)
1922 {
1923         struct drm_i915_private *dev_priv = engine->i915;
1924         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
1925
1926         /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1927          * commands followed by (reg, value) pairs. The values we are setting here are
1928          * only for the first context restore: on a subsequent save, the GPU will
1929          * recreate this batchbuffer with new values (including all the missing
1930          * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
1931         reg_state[CTX_LRI_HEADER_0] =
1932                 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
1933         ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
1934                        RING_CONTEXT_CONTROL(engine),
1935                        _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
1936                                           CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
1937                                           (HAS_RESOURCE_STREAMER(dev_priv) ?
1938                                            CTX_CTRL_RS_CTX_ENABLE : 0)));
1939         ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
1940                        0);
1941         ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
1942                        0);
1943         ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
1944                        RING_START(engine->mmio_base), 0);
1945         ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
1946                        RING_CTL(engine->mmio_base),
1947                        ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
1948         ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
1949                        RING_BBADDR_UDW(engine->mmio_base), 0);
1950         ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
1951                        RING_BBADDR(engine->mmio_base), 0);
1952         ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
1953                        RING_BBSTATE(engine->mmio_base),
1954                        RING_BB_PPGTT);
1955         ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
1956                        RING_SBBADDR_UDW(engine->mmio_base), 0);
1957         ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
1958                        RING_SBBADDR(engine->mmio_base), 0);
1959         ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
1960                        RING_SBBSTATE(engine->mmio_base), 0);
1961         if (engine->id == RCS) {
1962                 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
1963                                RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
1964                 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
1965                                RING_INDIRECT_CTX(engine->mmio_base), 0);
1966                 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
1967                                RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
1968                 if (engine->wa_ctx.vma) {
1969                         struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1970                         u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
1971
1972                         reg_state[CTX_RCS_INDIRECT_CTX+1] =
1973                                 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
1974                                 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
1975
1976                         reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
1977                                 intel_lr_indirect_ctx_offset(engine) << 6;
1978
1979                         reg_state[CTX_BB_PER_CTX_PTR+1] =
1980                                 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
1981                                 0x01;
1982                 }
1983         }
1984         reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
1985         ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
1986                        RING_CTX_TIMESTAMP(engine->mmio_base), 0);
1987         /* PDP values well be assigned later if needed */
1988         ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
1989                        0);
1990         ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
1991                        0);
1992         ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
1993                        0);
1994         ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
1995                        0);
1996         ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
1997                        0);
1998         ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
1999                        0);
2000         ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2001                        0);
2002         ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2003                        0);
2004
2005         if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2006                 /* 64b PPGTT (48bit canonical)
2007                  * PDP0_DESCRIPTOR contains the base address to PML4 and
2008                  * other PDP Descriptors are ignored.
2009                  */
2010                 ASSIGN_CTX_PML4(ppgtt, reg_state);
2011         } else {
2012                 /* 32b PPGTT
2013                  * PDP*_DESCRIPTOR contains the base address of space supported.
2014                  * With dynamic page allocation, PDPs may not be allocated at
2015                  * this point. Point the unallocated PDPs to the scratch page
2016                  */
2017                 execlists_update_context_pdps(ppgtt, reg_state);
2018         }
2019
2020         if (engine->id == RCS) {
2021                 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2022                 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2023                                make_rpcs(dev_priv));
2024         }
2025 }
2026
2027 static int
2028 populate_lr_context(struct i915_gem_context *ctx,
2029                     struct drm_i915_gem_object *ctx_obj,
2030                     struct intel_engine_cs *engine,
2031                     struct intel_ring *ring)
2032 {
2033         void *vaddr;
2034         int ret;
2035
2036         ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2037         if (ret) {
2038                 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2039                 return ret;
2040         }
2041
2042         vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
2043         if (IS_ERR(vaddr)) {
2044                 ret = PTR_ERR(vaddr);
2045                 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2046                 return ret;
2047         }
2048         ctx_obj->dirty = true;
2049
2050         /* The second page of the context object contains some fields which must
2051          * be set up prior to the first execution. */
2052
2053         execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
2054                                  ctx, engine, ring);
2055
2056         i915_gem_object_unpin_map(ctx_obj);
2057
2058         return 0;
2059 }
2060
2061 /**
2062  * intel_lr_context_size() - return the size of the context for an engine
2063  * @engine: which engine to find the context size for
2064  *
2065  * Each engine may require a different amount of space for a context image,
2066  * so when allocating (or copying) an image, this function can be used to
2067  * find the right size for the specific engine.
2068  *
2069  * Return: size (in bytes) of an engine-specific context image
2070  *
2071  * Note: this size includes the HWSP, which is part of the context image
2072  * in LRC mode, but does not include the "shared data page" used with
2073  * GuC submission. The caller should account for this if using the GuC.
2074  */
2075 uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2076 {
2077         int ret = 0;
2078
2079         WARN_ON(INTEL_GEN(engine->i915) < 8);
2080
2081         switch (engine->id) {
2082         case RCS:
2083                 if (INTEL_GEN(engine->i915) >= 9)
2084                         ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2085                 else
2086                         ret = GEN8_LR_CONTEXT_RENDER_SIZE;
2087                 break;
2088         case VCS:
2089         case BCS:
2090         case VECS:
2091         case VCS2:
2092                 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2093                 break;
2094         }
2095
2096         return ret;
2097 }
2098
2099 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2100                                             struct intel_engine_cs *engine)
2101 {
2102         struct drm_i915_gem_object *ctx_obj;
2103         struct intel_context *ce = &ctx->engine[engine->id];
2104         struct i915_vma *vma;
2105         uint32_t context_size;
2106         struct intel_ring *ring;
2107         int ret;
2108
2109         WARN_ON(ce->state);
2110
2111         context_size = round_up(intel_lr_context_size(engine), 4096);
2112
2113         /* One extra page as the sharing data between driver and GuC */
2114         context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2115
2116         ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
2117         if (IS_ERR(ctx_obj)) {
2118                 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2119                 return PTR_ERR(ctx_obj);
2120         }
2121
2122         vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL);
2123         if (IS_ERR(vma)) {
2124                 ret = PTR_ERR(vma);
2125                 goto error_deref_obj;
2126         }
2127
2128         ring = intel_engine_create_ring(engine, ctx->ring_size);
2129         if (IS_ERR(ring)) {
2130                 ret = PTR_ERR(ring);
2131                 goto error_deref_obj;
2132         }
2133
2134         ret = populate_lr_context(ctx, ctx_obj, engine, ring);
2135         if (ret) {
2136                 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2137                 goto error_ring_free;
2138         }
2139
2140         ce->ring = ring;
2141         ce->state = vma;
2142         ce->initialised = engine->init_context == NULL;
2143
2144         return 0;
2145
2146 error_ring_free:
2147         intel_ring_free(ring);
2148 error_deref_obj:
2149         i915_gem_object_put(ctx_obj);
2150         return ret;
2151 }
2152
2153 void intel_lr_context_resume(struct drm_i915_private *dev_priv)
2154 {
2155         struct intel_engine_cs *engine;
2156         struct i915_gem_context *ctx;
2157
2158         /* Because we emit WA_TAIL_DWORDS there may be a disparity
2159          * between our bookkeeping in ce->ring->head and ce->ring->tail and
2160          * that stored in context. As we only write new commands from
2161          * ce->ring->tail onwards, everything before that is junk. If the GPU
2162          * starts reading from its RING_HEAD from the context, it may try to
2163          * execute that junk and die.
2164          *
2165          * So to avoid that we reset the context images upon resume. For
2166          * simplicity, we just zero everything out.
2167          */
2168         list_for_each_entry(ctx, &dev_priv->context_list, link) {
2169                 for_each_engine(engine, dev_priv) {
2170                         struct intel_context *ce = &ctx->engine[engine->id];
2171                         u32 *reg;
2172
2173                         if (!ce->state)
2174                                 continue;
2175
2176                         reg = i915_gem_object_pin_map(ce->state->obj,
2177                                                       I915_MAP_WB);
2178                         if (WARN_ON(IS_ERR(reg)))
2179                                 continue;
2180
2181                         reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
2182                         reg[CTX_RING_HEAD+1] = 0;
2183                         reg[CTX_RING_TAIL+1] = 0;
2184
2185                         ce->state->obj->dirty = true;
2186                         i915_gem_object_unpin_map(ce->state->obj);
2187
2188                         ce->ring->head = ce->ring->tail = 0;
2189                         ce->ring->last_retired_head = -1;
2190                         intel_ring_update_space(ce->ring);
2191                 }
2192         }
2193 }