2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
49 static const char *yesno(int v)
51 return v ? "yes" : "no";
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55 * allocated we need to hook into the minor for release. */
57 drm_add_fake_info_node(struct drm_minor *minor,
61 struct drm_info_node *node;
63 node = kmalloc(sizeof(*node), GFP_KERNEL);
71 node->info_ent = (void *) key;
73 mutex_lock(&minor->debugfs_lock);
74 list_add(&node->list, &minor->debugfs_list);
75 mutex_unlock(&minor->debugfs_lock);
80 static int i915_capabilities(struct seq_file *m, void *data)
82 struct drm_info_node *node = m->private;
83 struct drm_device *dev = node->minor->dev;
84 const struct intel_device_info *info = INTEL_INFO(dev);
86 seq_printf(m, "gen: %d\n", info->gen);
87 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
97 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
99 if (obj->user_pin_count > 0)
101 else if (i915_gem_obj_is_pinned(obj))
107 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
109 switch (obj->tiling_mode) {
111 case I915_TILING_NONE: return " ";
112 case I915_TILING_X: return "X";
113 case I915_TILING_Y: return "Y";
117 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
119 return obj->has_global_gtt_mapping ? "g" : " ";
123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
125 struct i915_vma *vma;
128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
131 get_tiling_flag(obj),
132 get_global_flag(obj),
133 obj->base.size / 1024,
134 obj->base.read_domains,
135 obj->base.write_domain,
136 obj->last_read_seqno,
137 obj->last_write_seqno,
138 obj->last_fenced_seqno,
139 i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
140 obj->dirty ? " dirty" : "",
141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
143 seq_printf(m, " (name: %d)", obj->base.name);
144 list_for_each_entry(vma, &obj->vma_list, vma_link) {
145 if (vma->pin_count > 0)
148 seq_printf(m, " (pinned x %d)", pin_count);
149 if (obj->pin_display)
150 seq_printf(m, " (display)");
151 if (obj->fence_reg != I915_FENCE_REG_NONE)
152 seq_printf(m, " (fence: %d)", obj->fence_reg);
153 list_for_each_entry(vma, &obj->vma_list, vma_link) {
154 if (!i915_is_ggtt(vma->vm))
158 seq_printf(m, "gtt offset: %08lx, size: %08lx)",
159 vma->node.start, vma->node.size);
162 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
163 if (obj->pin_mappable || obj->fault_mappable) {
165 if (obj->pin_mappable)
167 if (obj->fault_mappable)
170 seq_printf(m, " (%s mappable)", s);
172 if (obj->ring != NULL)
173 seq_printf(m, " (%s)", obj->ring->name);
174 if (obj->frontbuffer_bits)
175 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
178 static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
180 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
181 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
185 static int i915_gem_object_list_info(struct seq_file *m, void *data)
187 struct drm_info_node *node = m->private;
188 uintptr_t list = (uintptr_t) node->info_ent->data;
189 struct list_head *head;
190 struct drm_device *dev = node->minor->dev;
191 struct drm_i915_private *dev_priv = dev->dev_private;
192 struct i915_address_space *vm = &dev_priv->gtt.base;
193 struct i915_vma *vma;
194 size_t total_obj_size, total_gtt_size;
197 ret = mutex_lock_interruptible(&dev->struct_mutex);
201 /* FIXME: the user of this interface might want more than just GGTT */
204 seq_puts(m, "Active:\n");
205 head = &vm->active_list;
208 seq_puts(m, "Inactive:\n");
209 head = &vm->inactive_list;
212 mutex_unlock(&dev->struct_mutex);
216 total_obj_size = total_gtt_size = count = 0;
217 list_for_each_entry(vma, head, mm_list) {
219 describe_obj(m, vma->obj);
221 total_obj_size += vma->obj->base.size;
222 total_gtt_size += vma->node.size;
225 mutex_unlock(&dev->struct_mutex);
227 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
228 count, total_obj_size, total_gtt_size);
232 static int obj_rank_by_stolen(void *priv,
233 struct list_head *A, struct list_head *B)
235 struct drm_i915_gem_object *a =
236 container_of(A, struct drm_i915_gem_object, obj_exec_link);
237 struct drm_i915_gem_object *b =
238 container_of(B, struct drm_i915_gem_object, obj_exec_link);
240 return a->stolen->start - b->stolen->start;
243 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
245 struct drm_info_node *node = m->private;
246 struct drm_device *dev = node->minor->dev;
247 struct drm_i915_private *dev_priv = dev->dev_private;
248 struct drm_i915_gem_object *obj;
249 size_t total_obj_size, total_gtt_size;
253 ret = mutex_lock_interruptible(&dev->struct_mutex);
257 total_obj_size = total_gtt_size = count = 0;
258 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
259 if (obj->stolen == NULL)
262 list_add(&obj->obj_exec_link, &stolen);
264 total_obj_size += obj->base.size;
265 total_gtt_size += i915_gem_obj_ggtt_size(obj);
268 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
269 if (obj->stolen == NULL)
272 list_add(&obj->obj_exec_link, &stolen);
274 total_obj_size += obj->base.size;
277 list_sort(NULL, &stolen, obj_rank_by_stolen);
278 seq_puts(m, "Stolen:\n");
279 while (!list_empty(&stolen)) {
280 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
282 describe_obj(m, obj);
284 list_del_init(&obj->obj_exec_link);
286 mutex_unlock(&dev->struct_mutex);
288 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
289 count, total_obj_size, total_gtt_size);
293 #define count_objects(list, member) do { \
294 list_for_each_entry(obj, list, member) { \
295 size += i915_gem_obj_ggtt_size(obj); \
297 if (obj->map_and_fenceable) { \
298 mappable_size += i915_gem_obj_ggtt_size(obj); \
305 struct drm_i915_file_private *file_priv;
307 size_t total, unbound;
308 size_t global, shared;
309 size_t active, inactive;
312 static int per_file_stats(int id, void *ptr, void *data)
314 struct drm_i915_gem_object *obj = ptr;
315 struct file_stats *stats = data;
316 struct i915_vma *vma;
319 stats->total += obj->base.size;
321 if (obj->base.name || obj->base.dma_buf)
322 stats->shared += obj->base.size;
324 if (USES_FULL_PPGTT(obj->base.dev)) {
325 list_for_each_entry(vma, &obj->vma_list, vma_link) {
326 struct i915_hw_ppgtt *ppgtt;
328 if (!drm_mm_node_allocated(&vma->node))
331 if (i915_is_ggtt(vma->vm)) {
332 stats->global += obj->base.size;
336 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
337 if (ppgtt->file_priv != stats->file_priv)
340 if (obj->ring) /* XXX per-vma statistic */
341 stats->active += obj->base.size;
343 stats->inactive += obj->base.size;
348 if (i915_gem_obj_ggtt_bound(obj)) {
349 stats->global += obj->base.size;
351 stats->active += obj->base.size;
353 stats->inactive += obj->base.size;
358 if (!list_empty(&obj->global_list))
359 stats->unbound += obj->base.size;
364 #define count_vmas(list, member) do { \
365 list_for_each_entry(vma, list, member) { \
366 size += i915_gem_obj_ggtt_size(vma->obj); \
368 if (vma->obj->map_and_fenceable) { \
369 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
375 static int i915_gem_object_info(struct seq_file *m, void* data)
377 struct drm_info_node *node = m->private;
378 struct drm_device *dev = node->minor->dev;
379 struct drm_i915_private *dev_priv = dev->dev_private;
380 u32 count, mappable_count, purgeable_count;
381 size_t size, mappable_size, purgeable_size;
382 struct drm_i915_gem_object *obj;
383 struct i915_address_space *vm = &dev_priv->gtt.base;
384 struct drm_file *file;
385 struct i915_vma *vma;
388 ret = mutex_lock_interruptible(&dev->struct_mutex);
392 seq_printf(m, "%u objects, %zu bytes\n",
393 dev_priv->mm.object_count,
394 dev_priv->mm.object_memory);
396 size = count = mappable_size = mappable_count = 0;
397 count_objects(&dev_priv->mm.bound_list, global_list);
398 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
399 count, mappable_count, size, mappable_size);
401 size = count = mappable_size = mappable_count = 0;
402 count_vmas(&vm->active_list, mm_list);
403 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
404 count, mappable_count, size, mappable_size);
406 size = count = mappable_size = mappable_count = 0;
407 count_vmas(&vm->inactive_list, mm_list);
408 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
409 count, mappable_count, size, mappable_size);
411 size = count = purgeable_size = purgeable_count = 0;
412 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
413 size += obj->base.size, ++count;
414 if (obj->madv == I915_MADV_DONTNEED)
415 purgeable_size += obj->base.size, ++purgeable_count;
417 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
419 size = count = mappable_size = mappable_count = 0;
420 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
421 if (obj->fault_mappable) {
422 size += i915_gem_obj_ggtt_size(obj);
425 if (obj->pin_mappable) {
426 mappable_size += i915_gem_obj_ggtt_size(obj);
429 if (obj->madv == I915_MADV_DONTNEED) {
430 purgeable_size += obj->base.size;
434 seq_printf(m, "%u purgeable objects, %zu bytes\n",
435 purgeable_count, purgeable_size);
436 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
437 mappable_count, mappable_size);
438 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
441 seq_printf(m, "%zu [%lu] gtt total\n",
442 dev_priv->gtt.base.total,
443 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
446 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
447 struct file_stats stats;
448 struct task_struct *task;
450 memset(&stats, 0, sizeof(stats));
451 stats.file_priv = file->driver_priv;
452 spin_lock(&file->table_lock);
453 idr_for_each(&file->object_idr, per_file_stats, &stats);
454 spin_unlock(&file->table_lock);
456 * Although we have a valid reference on file->pid, that does
457 * not guarantee that the task_struct who called get_pid() is
458 * still alive (e.g. get_pid(current) => fork() => exit()).
459 * Therefore, we need to protect this ->comm access using RCU.
462 task = pid_task(file->pid, PIDTYPE_PID);
463 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
464 task ? task->comm : "<unknown>",
475 mutex_unlock(&dev->struct_mutex);
480 static int i915_gem_gtt_info(struct seq_file *m, void *data)
482 struct drm_info_node *node = m->private;
483 struct drm_device *dev = node->minor->dev;
484 uintptr_t list = (uintptr_t) node->info_ent->data;
485 struct drm_i915_private *dev_priv = dev->dev_private;
486 struct drm_i915_gem_object *obj;
487 size_t total_obj_size, total_gtt_size;
490 ret = mutex_lock_interruptible(&dev->struct_mutex);
494 total_obj_size = total_gtt_size = count = 0;
495 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
496 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
500 describe_obj(m, obj);
502 total_obj_size += obj->base.size;
503 total_gtt_size += i915_gem_obj_ggtt_size(obj);
507 mutex_unlock(&dev->struct_mutex);
509 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
510 count, total_obj_size, total_gtt_size);
515 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
517 struct drm_info_node *node = m->private;
518 struct drm_device *dev = node->minor->dev;
519 struct drm_i915_private *dev_priv = dev->dev_private;
521 struct intel_crtc *crtc;
524 ret = mutex_lock_interruptible(&dev->struct_mutex);
528 for_each_intel_crtc(dev, crtc) {
529 const char pipe = pipe_name(crtc->pipe);
530 const char plane = plane_name(crtc->plane);
531 struct intel_unpin_work *work;
533 spin_lock_irqsave(&dev->event_lock, flags);
534 work = crtc->unpin_work;
536 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
541 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
542 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
545 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
548 if (work->flip_queued_ring) {
549 seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
550 work->flip_queued_ring->name,
551 work->flip_queued_seqno,
552 dev_priv->next_seqno,
553 work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
554 i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
555 work->flip_queued_seqno));
557 seq_printf(m, "Flip not associated with any ring\n");
558 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
559 work->flip_queued_vblank,
560 work->flip_ready_vblank,
561 drm_vblank_count(dev, crtc->pipe));
562 if (work->enable_stall_check)
563 seq_puts(m, "Stall check enabled, ");
565 seq_puts(m, "Stall check waiting for page flip ioctl, ");
566 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
568 if (INTEL_INFO(dev)->gen >= 4)
569 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
571 addr = I915_READ(DSPADDR(crtc->plane));
572 seq_printf(m, "Current scanout address 0x%08x\n", addr);
574 if (work->pending_flip_obj) {
575 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
576 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
579 spin_unlock_irqrestore(&dev->event_lock, flags);
582 mutex_unlock(&dev->struct_mutex);
587 static int i915_gem_request_info(struct seq_file *m, void *data)
589 struct drm_info_node *node = m->private;
590 struct drm_device *dev = node->minor->dev;
591 struct drm_i915_private *dev_priv = dev->dev_private;
592 struct intel_engine_cs *ring;
593 struct drm_i915_gem_request *gem_request;
596 ret = mutex_lock_interruptible(&dev->struct_mutex);
601 for_each_ring(ring, dev_priv, i) {
602 if (list_empty(&ring->request_list))
605 seq_printf(m, "%s requests:\n", ring->name);
606 list_for_each_entry(gem_request,
609 seq_printf(m, " %d @ %d\n",
611 (int) (jiffies - gem_request->emitted_jiffies));
615 mutex_unlock(&dev->struct_mutex);
618 seq_puts(m, "No requests\n");
623 static void i915_ring_seqno_info(struct seq_file *m,
624 struct intel_engine_cs *ring)
626 if (ring->get_seqno) {
627 seq_printf(m, "Current sequence (%s): %u\n",
628 ring->name, ring->get_seqno(ring, false));
632 static int i915_gem_seqno_info(struct seq_file *m, void *data)
634 struct drm_info_node *node = m->private;
635 struct drm_device *dev = node->minor->dev;
636 struct drm_i915_private *dev_priv = dev->dev_private;
637 struct intel_engine_cs *ring;
640 ret = mutex_lock_interruptible(&dev->struct_mutex);
643 intel_runtime_pm_get(dev_priv);
645 for_each_ring(ring, dev_priv, i)
646 i915_ring_seqno_info(m, ring);
648 intel_runtime_pm_put(dev_priv);
649 mutex_unlock(&dev->struct_mutex);
655 static int i915_interrupt_info(struct seq_file *m, void *data)
657 struct drm_info_node *node = m->private;
658 struct drm_device *dev = node->minor->dev;
659 struct drm_i915_private *dev_priv = dev->dev_private;
660 struct intel_engine_cs *ring;
663 ret = mutex_lock_interruptible(&dev->struct_mutex);
666 intel_runtime_pm_get(dev_priv);
668 if (IS_CHERRYVIEW(dev)) {
669 seq_printf(m, "Master Interrupt Control:\t%08x\n",
670 I915_READ(GEN8_MASTER_IRQ));
672 seq_printf(m, "Display IER:\t%08x\n",
674 seq_printf(m, "Display IIR:\t%08x\n",
676 seq_printf(m, "Display IIR_RW:\t%08x\n",
677 I915_READ(VLV_IIR_RW));
678 seq_printf(m, "Display IMR:\t%08x\n",
680 for_each_pipe(dev_priv, pipe)
681 seq_printf(m, "Pipe %c stat:\t%08x\n",
683 I915_READ(PIPESTAT(pipe)));
685 seq_printf(m, "Port hotplug:\t%08x\n",
686 I915_READ(PORT_HOTPLUG_EN));
687 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
688 I915_READ(VLV_DPFLIPSTAT));
689 seq_printf(m, "DPINVGTT:\t%08x\n",
690 I915_READ(DPINVGTT));
692 for (i = 0; i < 4; i++) {
693 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
694 i, I915_READ(GEN8_GT_IMR(i)));
695 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
696 i, I915_READ(GEN8_GT_IIR(i)));
697 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
698 i, I915_READ(GEN8_GT_IER(i)));
701 seq_printf(m, "PCU interrupt mask:\t%08x\n",
702 I915_READ(GEN8_PCU_IMR));
703 seq_printf(m, "PCU interrupt identity:\t%08x\n",
704 I915_READ(GEN8_PCU_IIR));
705 seq_printf(m, "PCU interrupt enable:\t%08x\n",
706 I915_READ(GEN8_PCU_IER));
707 } else if (INTEL_INFO(dev)->gen >= 8) {
708 seq_printf(m, "Master Interrupt Control:\t%08x\n",
709 I915_READ(GEN8_MASTER_IRQ));
711 for (i = 0; i < 4; i++) {
712 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
713 i, I915_READ(GEN8_GT_IMR(i)));
714 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
715 i, I915_READ(GEN8_GT_IIR(i)));
716 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
717 i, I915_READ(GEN8_GT_IER(i)));
720 for_each_pipe(dev_priv, pipe) {
721 if (!intel_display_power_enabled(dev_priv,
722 POWER_DOMAIN_PIPE(pipe))) {
723 seq_printf(m, "Pipe %c power disabled\n",
727 seq_printf(m, "Pipe %c IMR:\t%08x\n",
729 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
730 seq_printf(m, "Pipe %c IIR:\t%08x\n",
732 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
733 seq_printf(m, "Pipe %c IER:\t%08x\n",
735 I915_READ(GEN8_DE_PIPE_IER(pipe)));
738 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
739 I915_READ(GEN8_DE_PORT_IMR));
740 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
741 I915_READ(GEN8_DE_PORT_IIR));
742 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
743 I915_READ(GEN8_DE_PORT_IER));
745 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
746 I915_READ(GEN8_DE_MISC_IMR));
747 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
748 I915_READ(GEN8_DE_MISC_IIR));
749 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
750 I915_READ(GEN8_DE_MISC_IER));
752 seq_printf(m, "PCU interrupt mask:\t%08x\n",
753 I915_READ(GEN8_PCU_IMR));
754 seq_printf(m, "PCU interrupt identity:\t%08x\n",
755 I915_READ(GEN8_PCU_IIR));
756 seq_printf(m, "PCU interrupt enable:\t%08x\n",
757 I915_READ(GEN8_PCU_IER));
758 } else if (IS_VALLEYVIEW(dev)) {
759 seq_printf(m, "Display IER:\t%08x\n",
761 seq_printf(m, "Display IIR:\t%08x\n",
763 seq_printf(m, "Display IIR_RW:\t%08x\n",
764 I915_READ(VLV_IIR_RW));
765 seq_printf(m, "Display IMR:\t%08x\n",
767 for_each_pipe(dev_priv, pipe)
768 seq_printf(m, "Pipe %c stat:\t%08x\n",
770 I915_READ(PIPESTAT(pipe)));
772 seq_printf(m, "Master IER:\t%08x\n",
773 I915_READ(VLV_MASTER_IER));
775 seq_printf(m, "Render IER:\t%08x\n",
777 seq_printf(m, "Render IIR:\t%08x\n",
779 seq_printf(m, "Render IMR:\t%08x\n",
782 seq_printf(m, "PM IER:\t\t%08x\n",
783 I915_READ(GEN6_PMIER));
784 seq_printf(m, "PM IIR:\t\t%08x\n",
785 I915_READ(GEN6_PMIIR));
786 seq_printf(m, "PM IMR:\t\t%08x\n",
787 I915_READ(GEN6_PMIMR));
789 seq_printf(m, "Port hotplug:\t%08x\n",
790 I915_READ(PORT_HOTPLUG_EN));
791 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
792 I915_READ(VLV_DPFLIPSTAT));
793 seq_printf(m, "DPINVGTT:\t%08x\n",
794 I915_READ(DPINVGTT));
796 } else if (!HAS_PCH_SPLIT(dev)) {
797 seq_printf(m, "Interrupt enable: %08x\n",
799 seq_printf(m, "Interrupt identity: %08x\n",
801 seq_printf(m, "Interrupt mask: %08x\n",
803 for_each_pipe(dev_priv, pipe)
804 seq_printf(m, "Pipe %c stat: %08x\n",
806 I915_READ(PIPESTAT(pipe)));
808 seq_printf(m, "North Display Interrupt enable: %08x\n",
810 seq_printf(m, "North Display Interrupt identity: %08x\n",
812 seq_printf(m, "North Display Interrupt mask: %08x\n",
814 seq_printf(m, "South Display Interrupt enable: %08x\n",
816 seq_printf(m, "South Display Interrupt identity: %08x\n",
818 seq_printf(m, "South Display Interrupt mask: %08x\n",
820 seq_printf(m, "Graphics Interrupt enable: %08x\n",
822 seq_printf(m, "Graphics Interrupt identity: %08x\n",
824 seq_printf(m, "Graphics Interrupt mask: %08x\n",
827 for_each_ring(ring, dev_priv, i) {
828 if (INTEL_INFO(dev)->gen >= 6) {
830 "Graphics Interrupt mask (%s): %08x\n",
831 ring->name, I915_READ_IMR(ring));
833 i915_ring_seqno_info(m, ring);
835 intel_runtime_pm_put(dev_priv);
836 mutex_unlock(&dev->struct_mutex);
841 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
843 struct drm_info_node *node = m->private;
844 struct drm_device *dev = node->minor->dev;
845 struct drm_i915_private *dev_priv = dev->dev_private;
848 ret = mutex_lock_interruptible(&dev->struct_mutex);
852 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
853 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
854 for (i = 0; i < dev_priv->num_fence_regs; i++) {
855 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
857 seq_printf(m, "Fence %d, pin count = %d, object = ",
858 i, dev_priv->fence_regs[i].pin_count);
860 seq_puts(m, "unused");
862 describe_obj(m, obj);
866 mutex_unlock(&dev->struct_mutex);
870 static int i915_hws_info(struct seq_file *m, void *data)
872 struct drm_info_node *node = m->private;
873 struct drm_device *dev = node->minor->dev;
874 struct drm_i915_private *dev_priv = dev->dev_private;
875 struct intel_engine_cs *ring;
879 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
880 hws = ring->status_page.page_addr;
884 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
885 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
887 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
893 i915_error_state_write(struct file *filp,
894 const char __user *ubuf,
898 struct i915_error_state_file_priv *error_priv = filp->private_data;
899 struct drm_device *dev = error_priv->dev;
902 DRM_DEBUG_DRIVER("Resetting error state\n");
904 ret = mutex_lock_interruptible(&dev->struct_mutex);
908 i915_destroy_error_state(dev);
909 mutex_unlock(&dev->struct_mutex);
914 static int i915_error_state_open(struct inode *inode, struct file *file)
916 struct drm_device *dev = inode->i_private;
917 struct i915_error_state_file_priv *error_priv;
919 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
923 error_priv->dev = dev;
925 i915_error_state_get(dev, error_priv);
927 file->private_data = error_priv;
932 static int i915_error_state_release(struct inode *inode, struct file *file)
934 struct i915_error_state_file_priv *error_priv = file->private_data;
936 i915_error_state_put(error_priv);
942 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
943 size_t count, loff_t *pos)
945 struct i915_error_state_file_priv *error_priv = file->private_data;
946 struct drm_i915_error_state_buf error_str;
948 ssize_t ret_count = 0;
951 ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
955 ret = i915_error_state_to_str(&error_str, error_priv);
959 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
966 *pos = error_str.start + ret_count;
968 i915_error_state_buf_release(&error_str);
969 return ret ?: ret_count;
972 static const struct file_operations i915_error_state_fops = {
973 .owner = THIS_MODULE,
974 .open = i915_error_state_open,
975 .read = i915_error_state_read,
976 .write = i915_error_state_write,
977 .llseek = default_llseek,
978 .release = i915_error_state_release,
982 i915_next_seqno_get(void *data, u64 *val)
984 struct drm_device *dev = data;
985 struct drm_i915_private *dev_priv = dev->dev_private;
988 ret = mutex_lock_interruptible(&dev->struct_mutex);
992 *val = dev_priv->next_seqno;
993 mutex_unlock(&dev->struct_mutex);
999 i915_next_seqno_set(void *data, u64 val)
1001 struct drm_device *dev = data;
1004 ret = mutex_lock_interruptible(&dev->struct_mutex);
1008 ret = i915_gem_set_seqno(dev, val);
1009 mutex_unlock(&dev->struct_mutex);
1014 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1015 i915_next_seqno_get, i915_next_seqno_set,
1018 static int i915_frequency_info(struct seq_file *m, void *unused)
1020 struct drm_info_node *node = m->private;
1021 struct drm_device *dev = node->minor->dev;
1022 struct drm_i915_private *dev_priv = dev->dev_private;
1025 intel_runtime_pm_get(dev_priv);
1027 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1030 u16 rgvswctl = I915_READ16(MEMSWCTL);
1031 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1033 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1034 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1035 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1037 seq_printf(m, "Current P-state: %d\n",
1038 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1039 } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
1040 IS_BROADWELL(dev)) {
1041 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1042 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1043 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1044 u32 rpmodectl, rpinclimit, rpdeclimit;
1045 u32 rpstat, cagf, reqf;
1046 u32 rpupei, rpcurup, rpprevup;
1047 u32 rpdownei, rpcurdown, rpprevdown;
1048 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1051 /* RPSTAT1 is in the GT power well */
1052 ret = mutex_lock_interruptible(&dev->struct_mutex);
1056 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
1058 reqf = I915_READ(GEN6_RPNSWREQ);
1059 reqf &= ~GEN6_TURBO_DISABLE;
1060 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1064 reqf *= GT_FREQUENCY_MULTIPLIER;
1066 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1067 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1068 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1070 rpstat = I915_READ(GEN6_RPSTAT1);
1071 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1072 rpcurup = I915_READ(GEN6_RP_CUR_UP);
1073 rpprevup = I915_READ(GEN6_RP_PREV_UP);
1074 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
1075 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
1076 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
1077 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1078 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1080 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1081 cagf *= GT_FREQUENCY_MULTIPLIER;
1083 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
1084 mutex_unlock(&dev->struct_mutex);
1086 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1087 pm_ier = I915_READ(GEN6_PMIER);
1088 pm_imr = I915_READ(GEN6_PMIMR);
1089 pm_isr = I915_READ(GEN6_PMISR);
1090 pm_iir = I915_READ(GEN6_PMIIR);
1091 pm_mask = I915_READ(GEN6_PMINTRMSK);
1093 pm_ier = I915_READ(GEN8_GT_IER(2));
1094 pm_imr = I915_READ(GEN8_GT_IMR(2));
1095 pm_isr = I915_READ(GEN8_GT_ISR(2));
1096 pm_iir = I915_READ(GEN8_GT_IIR(2));
1097 pm_mask = I915_READ(GEN6_PMINTRMSK);
1099 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1100 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1101 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1102 seq_printf(m, "Render p-state ratio: %d\n",
1103 (gt_perf_status & 0xff00) >> 8);
1104 seq_printf(m, "Render p-state VID: %d\n",
1105 gt_perf_status & 0xff);
1106 seq_printf(m, "Render p-state limit: %d\n",
1107 rp_state_limits & 0xff);
1108 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1109 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1110 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1111 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1112 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1113 seq_printf(m, "CAGF: %dMHz\n", cagf);
1114 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1115 GEN6_CURICONT_MASK);
1116 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1117 GEN6_CURBSYTAVG_MASK);
1118 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1119 GEN6_CURBSYTAVG_MASK);
1120 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1122 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1123 GEN6_CURBSYTAVG_MASK);
1124 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1125 GEN6_CURBSYTAVG_MASK);
1127 max_freq = (rp_state_cap & 0xff0000) >> 16;
1128 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1129 max_freq * GT_FREQUENCY_MULTIPLIER);
1131 max_freq = (rp_state_cap & 0xff00) >> 8;
1132 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1133 max_freq * GT_FREQUENCY_MULTIPLIER);
1135 max_freq = rp_state_cap & 0xff;
1136 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1137 max_freq * GT_FREQUENCY_MULTIPLIER);
1139 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1140 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
1141 } else if (IS_VALLEYVIEW(dev)) {
1144 mutex_lock(&dev_priv->rps.hw_lock);
1145 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1146 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1147 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1149 seq_printf(m, "max GPU freq: %d MHz\n",
1150 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1152 seq_printf(m, "min GPU freq: %d MHz\n",
1153 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1155 seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
1156 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1158 seq_printf(m, "current GPU freq: %d MHz\n",
1159 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1160 mutex_unlock(&dev_priv->rps.hw_lock);
1162 seq_puts(m, "no P-state info available\n");
1166 intel_runtime_pm_put(dev_priv);
1170 static int ironlake_drpc_info(struct seq_file *m)
1172 struct drm_info_node *node = m->private;
1173 struct drm_device *dev = node->minor->dev;
1174 struct drm_i915_private *dev_priv = dev->dev_private;
1175 u32 rgvmodectl, rstdbyctl;
1179 ret = mutex_lock_interruptible(&dev->struct_mutex);
1182 intel_runtime_pm_get(dev_priv);
1184 rgvmodectl = I915_READ(MEMMODECTL);
1185 rstdbyctl = I915_READ(RSTDBYCTL);
1186 crstandvid = I915_READ16(CRSTANDVID);
1188 intel_runtime_pm_put(dev_priv);
1189 mutex_unlock(&dev->struct_mutex);
1191 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1193 seq_printf(m, "Boost freq: %d\n",
1194 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1195 MEMMODE_BOOST_FREQ_SHIFT);
1196 seq_printf(m, "HW control enabled: %s\n",
1197 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1198 seq_printf(m, "SW control enabled: %s\n",
1199 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1200 seq_printf(m, "Gated voltage change: %s\n",
1201 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1202 seq_printf(m, "Starting frequency: P%d\n",
1203 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1204 seq_printf(m, "Max P-state: P%d\n",
1205 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1206 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1207 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1208 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1209 seq_printf(m, "Render standby enabled: %s\n",
1210 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1211 seq_puts(m, "Current RS state: ");
1212 switch (rstdbyctl & RSX_STATUS_MASK) {
1214 seq_puts(m, "on\n");
1216 case RSX_STATUS_RC1:
1217 seq_puts(m, "RC1\n");
1219 case RSX_STATUS_RC1E:
1220 seq_puts(m, "RC1E\n");
1222 case RSX_STATUS_RS1:
1223 seq_puts(m, "RS1\n");
1225 case RSX_STATUS_RS2:
1226 seq_puts(m, "RS2 (RC6)\n");
1228 case RSX_STATUS_RS3:
1229 seq_puts(m, "RC3 (RC6+)\n");
1232 seq_puts(m, "unknown\n");
1239 static int vlv_drpc_info(struct seq_file *m)
1242 struct drm_info_node *node = m->private;
1243 struct drm_device *dev = node->minor->dev;
1244 struct drm_i915_private *dev_priv = dev->dev_private;
1245 u32 rpmodectl1, rcctl1;
1246 unsigned fw_rendercount = 0, fw_mediacount = 0;
1248 intel_runtime_pm_get(dev_priv);
1250 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1251 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1253 intel_runtime_pm_put(dev_priv);
1255 seq_printf(m, "Video Turbo Mode: %s\n",
1256 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1257 seq_printf(m, "Turbo enabled: %s\n",
1258 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1259 seq_printf(m, "HW control enabled: %s\n",
1260 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1261 seq_printf(m, "SW control enabled: %s\n",
1262 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1263 GEN6_RP_MEDIA_SW_MODE));
1264 seq_printf(m, "RC6 Enabled: %s\n",
1265 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1266 GEN6_RC_CTL_EI_MODE(1))));
1267 seq_printf(m, "Render Power Well: %s\n",
1268 (I915_READ(VLV_GTLC_PW_STATUS) &
1269 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1270 seq_printf(m, "Media Power Well: %s\n",
1271 (I915_READ(VLV_GTLC_PW_STATUS) &
1272 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1274 seq_printf(m, "Render RC6 residency since boot: %u\n",
1275 I915_READ(VLV_GT_RENDER_RC6));
1276 seq_printf(m, "Media RC6 residency since boot: %u\n",
1277 I915_READ(VLV_GT_MEDIA_RC6));
1279 spin_lock_irq(&dev_priv->uncore.lock);
1280 fw_rendercount = dev_priv->uncore.fw_rendercount;
1281 fw_mediacount = dev_priv->uncore.fw_mediacount;
1282 spin_unlock_irq(&dev_priv->uncore.lock);
1284 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
1285 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
1292 static int gen6_drpc_info(struct seq_file *m)
1295 struct drm_info_node *node = m->private;
1296 struct drm_device *dev = node->minor->dev;
1297 struct drm_i915_private *dev_priv = dev->dev_private;
1298 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1299 unsigned forcewake_count;
1302 ret = mutex_lock_interruptible(&dev->struct_mutex);
1305 intel_runtime_pm_get(dev_priv);
1307 spin_lock_irq(&dev_priv->uncore.lock);
1308 forcewake_count = dev_priv->uncore.forcewake_count;
1309 spin_unlock_irq(&dev_priv->uncore.lock);
1311 if (forcewake_count) {
1312 seq_puts(m, "RC information inaccurate because somebody "
1313 "holds a forcewake reference \n");
1315 /* NB: we cannot use forcewake, else we read the wrong values */
1316 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1318 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1321 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1322 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1324 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1325 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1326 mutex_unlock(&dev->struct_mutex);
1327 mutex_lock(&dev_priv->rps.hw_lock);
1328 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1329 mutex_unlock(&dev_priv->rps.hw_lock);
1331 intel_runtime_pm_put(dev_priv);
1333 seq_printf(m, "Video Turbo Mode: %s\n",
1334 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1335 seq_printf(m, "HW control enabled: %s\n",
1336 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1337 seq_printf(m, "SW control enabled: %s\n",
1338 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1339 GEN6_RP_MEDIA_SW_MODE));
1340 seq_printf(m, "RC1e Enabled: %s\n",
1341 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1342 seq_printf(m, "RC6 Enabled: %s\n",
1343 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1344 seq_printf(m, "Deep RC6 Enabled: %s\n",
1345 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1346 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1347 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1348 seq_puts(m, "Current RC state: ");
1349 switch (gt_core_status & GEN6_RCn_MASK) {
1351 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1352 seq_puts(m, "Core Power Down\n");
1354 seq_puts(m, "on\n");
1357 seq_puts(m, "RC3\n");
1360 seq_puts(m, "RC6\n");
1363 seq_puts(m, "RC7\n");
1366 seq_puts(m, "Unknown\n");
1370 seq_printf(m, "Core Power Down: %s\n",
1371 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1373 /* Not exactly sure what this is */
1374 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1375 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1376 seq_printf(m, "RC6 residency since boot: %u\n",
1377 I915_READ(GEN6_GT_GFX_RC6));
1378 seq_printf(m, "RC6+ residency since boot: %u\n",
1379 I915_READ(GEN6_GT_GFX_RC6p));
1380 seq_printf(m, "RC6++ residency since boot: %u\n",
1381 I915_READ(GEN6_GT_GFX_RC6pp));
1383 seq_printf(m, "RC6 voltage: %dmV\n",
1384 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1385 seq_printf(m, "RC6+ voltage: %dmV\n",
1386 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1387 seq_printf(m, "RC6++ voltage: %dmV\n",
1388 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1392 static int i915_drpc_info(struct seq_file *m, void *unused)
1394 struct drm_info_node *node = m->private;
1395 struct drm_device *dev = node->minor->dev;
1397 if (IS_VALLEYVIEW(dev))
1398 return vlv_drpc_info(m);
1399 else if (INTEL_INFO(dev)->gen >= 6)
1400 return gen6_drpc_info(m);
1402 return ironlake_drpc_info(m);
1405 static int i915_fbc_status(struct seq_file *m, void *unused)
1407 struct drm_info_node *node = m->private;
1408 struct drm_device *dev = node->minor->dev;
1409 struct drm_i915_private *dev_priv = dev->dev_private;
1411 if (!HAS_FBC(dev)) {
1412 seq_puts(m, "FBC unsupported on this chipset\n");
1416 intel_runtime_pm_get(dev_priv);
1418 if (intel_fbc_enabled(dev)) {
1419 seq_puts(m, "FBC enabled\n");
1421 seq_puts(m, "FBC disabled: ");
1422 switch (dev_priv->fbc.no_fbc_reason) {
1424 seq_puts(m, "FBC actived, but currently disabled in hardware");
1426 case FBC_UNSUPPORTED:
1427 seq_puts(m, "unsupported by this chipset");
1430 seq_puts(m, "no outputs");
1432 case FBC_STOLEN_TOO_SMALL:
1433 seq_puts(m, "not enough stolen memory");
1435 case FBC_UNSUPPORTED_MODE:
1436 seq_puts(m, "mode not supported");
1438 case FBC_MODE_TOO_LARGE:
1439 seq_puts(m, "mode too large");
1442 seq_puts(m, "FBC unsupported on plane");
1445 seq_puts(m, "scanout buffer not tiled");
1447 case FBC_MULTIPLE_PIPES:
1448 seq_puts(m, "multiple pipes are enabled");
1450 case FBC_MODULE_PARAM:
1451 seq_puts(m, "disabled per module param (default off)");
1453 case FBC_CHIP_DEFAULT:
1454 seq_puts(m, "disabled per chip default");
1457 seq_puts(m, "unknown reason");
1462 intel_runtime_pm_put(dev_priv);
1467 static int i915_fbc_fc_get(void *data, u64 *val)
1469 struct drm_device *dev = data;
1470 struct drm_i915_private *dev_priv = dev->dev_private;
1472 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1475 drm_modeset_lock_all(dev);
1476 *val = dev_priv->fbc.false_color;
1477 drm_modeset_unlock_all(dev);
1482 static int i915_fbc_fc_set(void *data, u64 val)
1484 struct drm_device *dev = data;
1485 struct drm_i915_private *dev_priv = dev->dev_private;
1488 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1491 drm_modeset_lock_all(dev);
1493 reg = I915_READ(ILK_DPFC_CONTROL);
1494 dev_priv->fbc.false_color = val;
1496 I915_WRITE(ILK_DPFC_CONTROL, val ?
1497 (reg | FBC_CTL_FALSE_COLOR) :
1498 (reg & ~FBC_CTL_FALSE_COLOR));
1500 drm_modeset_unlock_all(dev);
1504 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1505 i915_fbc_fc_get, i915_fbc_fc_set,
1508 static int i915_ips_status(struct seq_file *m, void *unused)
1510 struct drm_info_node *node = m->private;
1511 struct drm_device *dev = node->minor->dev;
1512 struct drm_i915_private *dev_priv = dev->dev_private;
1514 if (!HAS_IPS(dev)) {
1515 seq_puts(m, "not supported\n");
1519 intel_runtime_pm_get(dev_priv);
1521 seq_printf(m, "Enabled by kernel parameter: %s\n",
1522 yesno(i915.enable_ips));
1524 if (INTEL_INFO(dev)->gen >= 8) {
1525 seq_puts(m, "Currently: unknown\n");
1527 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1528 seq_puts(m, "Currently: enabled\n");
1530 seq_puts(m, "Currently: disabled\n");
1533 intel_runtime_pm_put(dev_priv);
1538 static int i915_sr_status(struct seq_file *m, void *unused)
1540 struct drm_info_node *node = m->private;
1541 struct drm_device *dev = node->minor->dev;
1542 struct drm_i915_private *dev_priv = dev->dev_private;
1543 bool sr_enabled = false;
1545 intel_runtime_pm_get(dev_priv);
1547 if (HAS_PCH_SPLIT(dev))
1548 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1549 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1550 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1551 else if (IS_I915GM(dev))
1552 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1553 else if (IS_PINEVIEW(dev))
1554 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1556 intel_runtime_pm_put(dev_priv);
1558 seq_printf(m, "self-refresh: %s\n",
1559 sr_enabled ? "enabled" : "disabled");
1564 static int i915_emon_status(struct seq_file *m, void *unused)
1566 struct drm_info_node *node = m->private;
1567 struct drm_device *dev = node->minor->dev;
1568 struct drm_i915_private *dev_priv = dev->dev_private;
1569 unsigned long temp, chipset, gfx;
1575 ret = mutex_lock_interruptible(&dev->struct_mutex);
1579 temp = i915_mch_val(dev_priv);
1580 chipset = i915_chipset_val(dev_priv);
1581 gfx = i915_gfx_val(dev_priv);
1582 mutex_unlock(&dev->struct_mutex);
1584 seq_printf(m, "GMCH temp: %ld\n", temp);
1585 seq_printf(m, "Chipset power: %ld\n", chipset);
1586 seq_printf(m, "GFX power: %ld\n", gfx);
1587 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1592 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1594 struct drm_info_node *node = m->private;
1595 struct drm_device *dev = node->minor->dev;
1596 struct drm_i915_private *dev_priv = dev->dev_private;
1598 int gpu_freq, ia_freq;
1600 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1601 seq_puts(m, "unsupported on this chipset\n");
1605 intel_runtime_pm_get(dev_priv);
1607 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1609 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1613 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1615 for (gpu_freq = dev_priv->rps.min_freq_softlimit;
1616 gpu_freq <= dev_priv->rps.max_freq_softlimit;
1619 sandybridge_pcode_read(dev_priv,
1620 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1622 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1623 gpu_freq * GT_FREQUENCY_MULTIPLIER,
1624 ((ia_freq >> 0) & 0xff) * 100,
1625 ((ia_freq >> 8) & 0xff) * 100);
1628 mutex_unlock(&dev_priv->rps.hw_lock);
1631 intel_runtime_pm_put(dev_priv);
1635 static int i915_opregion(struct seq_file *m, void *unused)
1637 struct drm_info_node *node = m->private;
1638 struct drm_device *dev = node->minor->dev;
1639 struct drm_i915_private *dev_priv = dev->dev_private;
1640 struct intel_opregion *opregion = &dev_priv->opregion;
1641 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1647 ret = mutex_lock_interruptible(&dev->struct_mutex);
1651 if (opregion->header) {
1652 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1653 seq_write(m, data, OPREGION_SIZE);
1656 mutex_unlock(&dev->struct_mutex);
1663 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1665 struct drm_info_node *node = m->private;
1666 struct drm_device *dev = node->minor->dev;
1667 struct intel_fbdev *ifbdev = NULL;
1668 struct intel_framebuffer *fb;
1670 #ifdef CONFIG_DRM_I915_FBDEV
1671 struct drm_i915_private *dev_priv = dev->dev_private;
1673 ifbdev = dev_priv->fbdev;
1674 fb = to_intel_framebuffer(ifbdev->helper.fb);
1676 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1680 fb->base.bits_per_pixel,
1681 atomic_read(&fb->base.refcount.refcount));
1682 describe_obj(m, fb->obj);
1686 mutex_lock(&dev->mode_config.fb_lock);
1687 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1688 if (ifbdev && &fb->base == ifbdev->helper.fb)
1691 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1695 fb->base.bits_per_pixel,
1696 atomic_read(&fb->base.refcount.refcount));
1697 describe_obj(m, fb->obj);
1700 mutex_unlock(&dev->mode_config.fb_lock);
1705 static void describe_ctx_ringbuf(struct seq_file *m,
1706 struct intel_ringbuffer *ringbuf)
1708 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1709 ringbuf->space, ringbuf->head, ringbuf->tail,
1710 ringbuf->last_retired_head);
1713 static int i915_context_status(struct seq_file *m, void *unused)
1715 struct drm_info_node *node = m->private;
1716 struct drm_device *dev = node->minor->dev;
1717 struct drm_i915_private *dev_priv = dev->dev_private;
1718 struct intel_engine_cs *ring;
1719 struct intel_context *ctx;
1722 ret = mutex_lock_interruptible(&dev->struct_mutex);
1726 if (dev_priv->ips.pwrctx) {
1727 seq_puts(m, "power context ");
1728 describe_obj(m, dev_priv->ips.pwrctx);
1732 if (dev_priv->ips.renderctx) {
1733 seq_puts(m, "render context ");
1734 describe_obj(m, dev_priv->ips.renderctx);
1738 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1739 if (!i915.enable_execlists &&
1740 ctx->legacy_hw_ctx.rcs_state == NULL)
1743 seq_puts(m, "HW context ");
1744 describe_ctx(m, ctx);
1745 for_each_ring(ring, dev_priv, i) {
1746 if (ring->default_context == ctx)
1747 seq_printf(m, "(default context %s) ",
1751 if (i915.enable_execlists) {
1753 for_each_ring(ring, dev_priv, i) {
1754 struct drm_i915_gem_object *ctx_obj =
1755 ctx->engine[i].state;
1756 struct intel_ringbuffer *ringbuf =
1757 ctx->engine[i].ringbuf;
1759 seq_printf(m, "%s: ", ring->name);
1761 describe_obj(m, ctx_obj);
1763 describe_ctx_ringbuf(m, ringbuf);
1767 describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1773 mutex_unlock(&dev->struct_mutex);
1778 static int i915_dump_lrc(struct seq_file *m, void *unused)
1780 struct drm_info_node *node = (struct drm_info_node *) m->private;
1781 struct drm_device *dev = node->minor->dev;
1782 struct drm_i915_private *dev_priv = dev->dev_private;
1783 struct intel_engine_cs *ring;
1784 struct intel_context *ctx;
1787 if (!i915.enable_execlists) {
1788 seq_printf(m, "Logical Ring Contexts are disabled\n");
1792 ret = mutex_lock_interruptible(&dev->struct_mutex);
1796 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1797 for_each_ring(ring, dev_priv, i) {
1798 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
1800 if (ring->default_context == ctx)
1804 struct page *page = i915_gem_object_get_page(ctx_obj, 1);
1805 uint32_t *reg_state = kmap_atomic(page);
1808 seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1809 intel_execlists_ctx_id(ctx_obj));
1811 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1812 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1813 i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
1814 reg_state[j], reg_state[j + 1],
1815 reg_state[j + 2], reg_state[j + 3]);
1817 kunmap_atomic(reg_state);
1824 mutex_unlock(&dev->struct_mutex);
1829 static int i915_execlists(struct seq_file *m, void *data)
1831 struct drm_info_node *node = (struct drm_info_node *)m->private;
1832 struct drm_device *dev = node->minor->dev;
1833 struct drm_i915_private *dev_priv = dev->dev_private;
1834 struct intel_engine_cs *ring;
1840 struct list_head *cursor;
1844 if (!i915.enable_execlists) {
1845 seq_puts(m, "Logical Ring Contexts are disabled\n");
1849 ret = mutex_lock_interruptible(&dev->struct_mutex);
1853 for_each_ring(ring, dev_priv, ring_id) {
1854 struct intel_ctx_submit_request *head_req = NULL;
1856 unsigned long flags;
1858 seq_printf(m, "%s\n", ring->name);
1860 status = I915_READ(RING_EXECLIST_STATUS(ring));
1861 ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
1862 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
1865 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
1866 seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
1868 read_pointer = ring->next_context_status_buffer;
1869 write_pointer = status_pointer & 0x07;
1870 if (read_pointer > write_pointer)
1872 seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
1873 read_pointer, write_pointer);
1875 for (i = 0; i < 6; i++) {
1876 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
1877 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
1879 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
1883 spin_lock_irqsave(&ring->execlist_lock, flags);
1884 list_for_each(cursor, &ring->execlist_queue)
1886 head_req = list_first_entry_or_null(&ring->execlist_queue,
1887 struct intel_ctx_submit_request, execlist_link);
1888 spin_unlock_irqrestore(&ring->execlist_lock, flags);
1890 seq_printf(m, "\t%d requests in queue\n", count);
1892 struct drm_i915_gem_object *ctx_obj;
1894 ctx_obj = head_req->ctx->engine[ring_id].state;
1895 seq_printf(m, "\tHead request id: %u\n",
1896 intel_execlists_ctx_id(ctx_obj));
1897 seq_printf(m, "\tHead request tail: %u\n",
1904 mutex_unlock(&dev->struct_mutex);
1909 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1911 struct drm_info_node *node = m->private;
1912 struct drm_device *dev = node->minor->dev;
1913 struct drm_i915_private *dev_priv = dev->dev_private;
1914 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
1916 spin_lock_irq(&dev_priv->uncore.lock);
1917 if (IS_VALLEYVIEW(dev)) {
1918 fw_rendercount = dev_priv->uncore.fw_rendercount;
1919 fw_mediacount = dev_priv->uncore.fw_mediacount;
1921 forcewake_count = dev_priv->uncore.forcewake_count;
1922 spin_unlock_irq(&dev_priv->uncore.lock);
1924 if (IS_VALLEYVIEW(dev)) {
1925 seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
1926 seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
1928 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1933 static const char *swizzle_string(unsigned swizzle)
1936 case I915_BIT_6_SWIZZLE_NONE:
1938 case I915_BIT_6_SWIZZLE_9:
1940 case I915_BIT_6_SWIZZLE_9_10:
1941 return "bit9/bit10";
1942 case I915_BIT_6_SWIZZLE_9_11:
1943 return "bit9/bit11";
1944 case I915_BIT_6_SWIZZLE_9_10_11:
1945 return "bit9/bit10/bit11";
1946 case I915_BIT_6_SWIZZLE_9_17:
1947 return "bit9/bit17";
1948 case I915_BIT_6_SWIZZLE_9_10_17:
1949 return "bit9/bit10/bit17";
1950 case I915_BIT_6_SWIZZLE_UNKNOWN:
1957 static int i915_swizzle_info(struct seq_file *m, void *data)
1959 struct drm_info_node *node = m->private;
1960 struct drm_device *dev = node->minor->dev;
1961 struct drm_i915_private *dev_priv = dev->dev_private;
1964 ret = mutex_lock_interruptible(&dev->struct_mutex);
1967 intel_runtime_pm_get(dev_priv);
1969 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1970 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1971 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1972 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1974 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1975 seq_printf(m, "DDC = 0x%08x\n",
1977 seq_printf(m, "C0DRB3 = 0x%04x\n",
1978 I915_READ16(C0DRB3));
1979 seq_printf(m, "C1DRB3 = 0x%04x\n",
1980 I915_READ16(C1DRB3));
1981 } else if (INTEL_INFO(dev)->gen >= 6) {
1982 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1983 I915_READ(MAD_DIMM_C0));
1984 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1985 I915_READ(MAD_DIMM_C1));
1986 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1987 I915_READ(MAD_DIMM_C2));
1988 seq_printf(m, "TILECTL = 0x%08x\n",
1989 I915_READ(TILECTL));
1991 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1992 I915_READ(GAMTARBMODE));
1994 seq_printf(m, "ARB_MODE = 0x%08x\n",
1995 I915_READ(ARB_MODE));
1996 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1997 I915_READ(DISP_ARB_CTL));
1999 intel_runtime_pm_put(dev_priv);
2000 mutex_unlock(&dev->struct_mutex);
2005 static int per_file_ctx(int id, void *ptr, void *data)
2007 struct intel_context *ctx = ptr;
2008 struct seq_file *m = data;
2009 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2012 seq_printf(m, " no ppgtt for context %d\n",
2017 if (i915_gem_context_is_default(ctx))
2018 seq_puts(m, " default context:\n");
2020 seq_printf(m, " context %d:\n", ctx->user_handle);
2021 ppgtt->debug_dump(ppgtt, m);
2026 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2028 struct drm_i915_private *dev_priv = dev->dev_private;
2029 struct intel_engine_cs *ring;
2030 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2036 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
2037 seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
2038 for_each_ring(ring, dev_priv, unused) {
2039 seq_printf(m, "%s\n", ring->name);
2040 for (i = 0; i < 4; i++) {
2041 u32 offset = 0x270 + i * 8;
2042 u64 pdp = I915_READ(ring->mmio_base + offset + 4);
2044 pdp |= I915_READ(ring->mmio_base + offset);
2045 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2050 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2052 struct drm_i915_private *dev_priv = dev->dev_private;
2053 struct intel_engine_cs *ring;
2054 struct drm_file *file;
2057 if (INTEL_INFO(dev)->gen == 6)
2058 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2060 for_each_ring(ring, dev_priv, i) {
2061 seq_printf(m, "%s\n", ring->name);
2062 if (INTEL_INFO(dev)->gen == 7)
2063 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
2064 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
2065 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
2066 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
2068 if (dev_priv->mm.aliasing_ppgtt) {
2069 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2071 seq_puts(m, "aliasing PPGTT:\n");
2072 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
2074 ppgtt->debug_dump(ppgtt, m);
2077 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2078 struct drm_i915_file_private *file_priv = file->driver_priv;
2080 seq_printf(m, "proc: %s\n",
2081 get_pid_task(file->pid, PIDTYPE_PID)->comm);
2082 idr_for_each(&file_priv->context_idr, per_file_ctx, m);
2084 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2087 static int i915_ppgtt_info(struct seq_file *m, void *data)
2089 struct drm_info_node *node = m->private;
2090 struct drm_device *dev = node->minor->dev;
2091 struct drm_i915_private *dev_priv = dev->dev_private;
2093 int ret = mutex_lock_interruptible(&dev->struct_mutex);
2096 intel_runtime_pm_get(dev_priv);
2098 if (INTEL_INFO(dev)->gen >= 8)
2099 gen8_ppgtt_info(m, dev);
2100 else if (INTEL_INFO(dev)->gen >= 6)
2101 gen6_ppgtt_info(m, dev);
2103 intel_runtime_pm_put(dev_priv);
2104 mutex_unlock(&dev->struct_mutex);
2109 static int i915_llc(struct seq_file *m, void *data)
2111 struct drm_info_node *node = m->private;
2112 struct drm_device *dev = node->minor->dev;
2113 struct drm_i915_private *dev_priv = dev->dev_private;
2115 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
2116 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
2117 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
2122 static int i915_edp_psr_status(struct seq_file *m, void *data)
2124 struct drm_info_node *node = m->private;
2125 struct drm_device *dev = node->minor->dev;
2126 struct drm_i915_private *dev_priv = dev->dev_private;
2128 bool enabled = false;
2130 intel_runtime_pm_get(dev_priv);
2132 mutex_lock(&dev_priv->psr.lock);
2133 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2134 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2135 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2136 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2137 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2138 dev_priv->psr.busy_frontbuffer_bits);
2139 seq_printf(m, "Re-enable work scheduled: %s\n",
2140 yesno(work_busy(&dev_priv->psr.work.work)));
2142 enabled = HAS_PSR(dev) &&
2143 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
2144 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2147 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
2148 EDP_PSR_PERF_CNT_MASK;
2149 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2150 mutex_unlock(&dev_priv->psr.lock);
2152 intel_runtime_pm_put(dev_priv);
2156 static int i915_sink_crc(struct seq_file *m, void *data)
2158 struct drm_info_node *node = m->private;
2159 struct drm_device *dev = node->minor->dev;
2160 struct intel_encoder *encoder;
2161 struct intel_connector *connector;
2162 struct intel_dp *intel_dp = NULL;
2166 drm_modeset_lock_all(dev);
2167 list_for_each_entry(connector, &dev->mode_config.connector_list,
2170 if (connector->base.dpms != DRM_MODE_DPMS_ON)
2173 if (!connector->base.encoder)
2176 encoder = to_intel_encoder(connector->base.encoder);
2177 if (encoder->type != INTEL_OUTPUT_EDP)
2180 intel_dp = enc_to_intel_dp(&encoder->base);
2182 ret = intel_dp_sink_crc(intel_dp, crc);
2186 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2187 crc[0], crc[1], crc[2],
2188 crc[3], crc[4], crc[5]);
2193 drm_modeset_unlock_all(dev);
2197 static int i915_energy_uJ(struct seq_file *m, void *data)
2199 struct drm_info_node *node = m->private;
2200 struct drm_device *dev = node->minor->dev;
2201 struct drm_i915_private *dev_priv = dev->dev_private;
2205 if (INTEL_INFO(dev)->gen < 6)
2208 intel_runtime_pm_get(dev_priv);
2210 rdmsrl(MSR_RAPL_POWER_UNIT, power);
2211 power = (power & 0x1f00) >> 8;
2212 units = 1000000 / (1 << power); /* convert to uJ */
2213 power = I915_READ(MCH_SECP_NRG_STTS);
2216 intel_runtime_pm_put(dev_priv);
2218 seq_printf(m, "%llu", (long long unsigned)power);
2223 static int i915_pc8_status(struct seq_file *m, void *unused)
2225 struct drm_info_node *node = m->private;
2226 struct drm_device *dev = node->minor->dev;
2227 struct drm_i915_private *dev_priv = dev->dev_private;
2229 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2230 seq_puts(m, "not supported\n");
2234 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2235 seq_printf(m, "IRQs disabled: %s\n",
2236 yesno(!intel_irqs_enabled(dev_priv)));
2241 static const char *power_domain_str(enum intel_display_power_domain domain)
2244 case POWER_DOMAIN_PIPE_A:
2246 case POWER_DOMAIN_PIPE_B:
2248 case POWER_DOMAIN_PIPE_C:
2250 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
2251 return "PIPE_A_PANEL_FITTER";
2252 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
2253 return "PIPE_B_PANEL_FITTER";
2254 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
2255 return "PIPE_C_PANEL_FITTER";
2256 case POWER_DOMAIN_TRANSCODER_A:
2257 return "TRANSCODER_A";
2258 case POWER_DOMAIN_TRANSCODER_B:
2259 return "TRANSCODER_B";
2260 case POWER_DOMAIN_TRANSCODER_C:
2261 return "TRANSCODER_C";
2262 case POWER_DOMAIN_TRANSCODER_EDP:
2263 return "TRANSCODER_EDP";
2264 case POWER_DOMAIN_PORT_DDI_A_2_LANES:
2265 return "PORT_DDI_A_2_LANES";
2266 case POWER_DOMAIN_PORT_DDI_A_4_LANES:
2267 return "PORT_DDI_A_4_LANES";
2268 case POWER_DOMAIN_PORT_DDI_B_2_LANES:
2269 return "PORT_DDI_B_2_LANES";
2270 case POWER_DOMAIN_PORT_DDI_B_4_LANES:
2271 return "PORT_DDI_B_4_LANES";
2272 case POWER_DOMAIN_PORT_DDI_C_2_LANES:
2273 return "PORT_DDI_C_2_LANES";
2274 case POWER_DOMAIN_PORT_DDI_C_4_LANES:
2275 return "PORT_DDI_C_4_LANES";
2276 case POWER_DOMAIN_PORT_DDI_D_2_LANES:
2277 return "PORT_DDI_D_2_LANES";
2278 case POWER_DOMAIN_PORT_DDI_D_4_LANES:
2279 return "PORT_DDI_D_4_LANES";
2280 case POWER_DOMAIN_PORT_DSI:
2282 case POWER_DOMAIN_PORT_CRT:
2284 case POWER_DOMAIN_PORT_OTHER:
2285 return "PORT_OTHER";
2286 case POWER_DOMAIN_VGA:
2288 case POWER_DOMAIN_AUDIO:
2290 case POWER_DOMAIN_PLLS:
2292 case POWER_DOMAIN_INIT:
2300 static int i915_power_domain_info(struct seq_file *m, void *unused)
2302 struct drm_info_node *node = m->private;
2303 struct drm_device *dev = node->minor->dev;
2304 struct drm_i915_private *dev_priv = dev->dev_private;
2305 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2308 mutex_lock(&power_domains->lock);
2310 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2311 for (i = 0; i < power_domains->power_well_count; i++) {
2312 struct i915_power_well *power_well;
2313 enum intel_display_power_domain power_domain;
2315 power_well = &power_domains->power_wells[i];
2316 seq_printf(m, "%-25s %d\n", power_well->name,
2319 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2321 if (!(BIT(power_domain) & power_well->domains))
2324 seq_printf(m, " %-23s %d\n",
2325 power_domain_str(power_domain),
2326 power_domains->domain_use_count[power_domain]);
2330 mutex_unlock(&power_domains->lock);
2335 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2336 struct drm_display_mode *mode)
2340 for (i = 0; i < tabs; i++)
2343 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2344 mode->base.id, mode->name,
2345 mode->vrefresh, mode->clock,
2346 mode->hdisplay, mode->hsync_start,
2347 mode->hsync_end, mode->htotal,
2348 mode->vdisplay, mode->vsync_start,
2349 mode->vsync_end, mode->vtotal,
2350 mode->type, mode->flags);
2353 static void intel_encoder_info(struct seq_file *m,
2354 struct intel_crtc *intel_crtc,
2355 struct intel_encoder *intel_encoder)
2357 struct drm_info_node *node = m->private;
2358 struct drm_device *dev = node->minor->dev;
2359 struct drm_crtc *crtc = &intel_crtc->base;
2360 struct intel_connector *intel_connector;
2361 struct drm_encoder *encoder;
2363 encoder = &intel_encoder->base;
2364 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2365 encoder->base.id, encoder->name);
2366 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2367 struct drm_connector *connector = &intel_connector->base;
2368 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2371 drm_get_connector_status_name(connector->status));
2372 if (connector->status == connector_status_connected) {
2373 struct drm_display_mode *mode = &crtc->mode;
2374 seq_printf(m, ", mode:\n");
2375 intel_seq_print_mode(m, 2, mode);
2382 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2384 struct drm_info_node *node = m->private;
2385 struct drm_device *dev = node->minor->dev;
2386 struct drm_crtc *crtc = &intel_crtc->base;
2387 struct intel_encoder *intel_encoder;
2389 if (crtc->primary->fb)
2390 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2391 crtc->primary->fb->base.id, crtc->x, crtc->y,
2392 crtc->primary->fb->width, crtc->primary->fb->height);
2394 seq_puts(m, "\tprimary plane disabled\n");
2395 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2396 intel_encoder_info(m, intel_crtc, intel_encoder);
2399 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2401 struct drm_display_mode *mode = panel->fixed_mode;
2403 seq_printf(m, "\tfixed mode:\n");
2404 intel_seq_print_mode(m, 2, mode);
2407 static void intel_dp_info(struct seq_file *m,
2408 struct intel_connector *intel_connector)
2410 struct intel_encoder *intel_encoder = intel_connector->encoder;
2411 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2413 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2414 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
2416 if (intel_encoder->type == INTEL_OUTPUT_EDP)
2417 intel_panel_info(m, &intel_connector->panel);
2420 static void intel_hdmi_info(struct seq_file *m,
2421 struct intel_connector *intel_connector)
2423 struct intel_encoder *intel_encoder = intel_connector->encoder;
2424 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2426 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
2430 static void intel_lvds_info(struct seq_file *m,
2431 struct intel_connector *intel_connector)
2433 intel_panel_info(m, &intel_connector->panel);
2436 static void intel_connector_info(struct seq_file *m,
2437 struct drm_connector *connector)
2439 struct intel_connector *intel_connector = to_intel_connector(connector);
2440 struct intel_encoder *intel_encoder = intel_connector->encoder;
2441 struct drm_display_mode *mode;
2443 seq_printf(m, "connector %d: type %s, status: %s\n",
2444 connector->base.id, connector->name,
2445 drm_get_connector_status_name(connector->status));
2446 if (connector->status == connector_status_connected) {
2447 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2448 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2449 connector->display_info.width_mm,
2450 connector->display_info.height_mm);
2451 seq_printf(m, "\tsubpixel order: %s\n",
2452 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2453 seq_printf(m, "\tCEA rev: %d\n",
2454 connector->display_info.cea_rev);
2456 if (intel_encoder) {
2457 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2458 intel_encoder->type == INTEL_OUTPUT_EDP)
2459 intel_dp_info(m, intel_connector);
2460 else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2461 intel_hdmi_info(m, intel_connector);
2462 else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2463 intel_lvds_info(m, intel_connector);
2466 seq_printf(m, "\tmodes:\n");
2467 list_for_each_entry(mode, &connector->modes, head)
2468 intel_seq_print_mode(m, 2, mode);
2471 static bool cursor_active(struct drm_device *dev, int pipe)
2473 struct drm_i915_private *dev_priv = dev->dev_private;
2476 if (IS_845G(dev) || IS_I865G(dev))
2477 state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
2479 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2484 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2486 struct drm_i915_private *dev_priv = dev->dev_private;
2489 pos = I915_READ(CURPOS(pipe));
2491 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2492 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2495 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2496 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2499 return cursor_active(dev, pipe);
2502 static int i915_display_info(struct seq_file *m, void *unused)
2504 struct drm_info_node *node = m->private;
2505 struct drm_device *dev = node->minor->dev;
2506 struct drm_i915_private *dev_priv = dev->dev_private;
2507 struct intel_crtc *crtc;
2508 struct drm_connector *connector;
2510 intel_runtime_pm_get(dev_priv);
2511 drm_modeset_lock_all(dev);
2512 seq_printf(m, "CRTC info\n");
2513 seq_printf(m, "---------\n");
2514 for_each_intel_crtc(dev, crtc) {
2518 seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
2519 crtc->base.base.id, pipe_name(crtc->pipe),
2520 yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
2522 intel_crtc_info(m, crtc);
2524 active = cursor_position(dev, crtc->pipe, &x, &y);
2525 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
2526 yesno(crtc->cursor_base),
2527 x, y, crtc->cursor_width, crtc->cursor_height,
2528 crtc->cursor_addr, yesno(active));
2531 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2532 yesno(!crtc->cpu_fifo_underrun_disabled),
2533 yesno(!crtc->pch_fifo_underrun_disabled));
2536 seq_printf(m, "\n");
2537 seq_printf(m, "Connector info\n");
2538 seq_printf(m, "--------------\n");
2539 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2540 intel_connector_info(m, connector);
2542 drm_modeset_unlock_all(dev);
2543 intel_runtime_pm_put(dev_priv);
2548 static int i915_semaphore_status(struct seq_file *m, void *unused)
2550 struct drm_info_node *node = (struct drm_info_node *) m->private;
2551 struct drm_device *dev = node->minor->dev;
2552 struct drm_i915_private *dev_priv = dev->dev_private;
2553 struct intel_engine_cs *ring;
2554 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
2557 if (!i915_semaphore_is_enabled(dev)) {
2558 seq_puts(m, "Semaphores are disabled\n");
2562 ret = mutex_lock_interruptible(&dev->struct_mutex);
2565 intel_runtime_pm_get(dev_priv);
2567 if (IS_BROADWELL(dev)) {
2571 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
2573 seqno = (uint64_t *)kmap_atomic(page);
2574 for_each_ring(ring, dev_priv, i) {
2577 seq_printf(m, "%s\n", ring->name);
2579 seq_puts(m, " Last signal:");
2580 for (j = 0; j < num_rings; j++) {
2581 offset = i * I915_NUM_RINGS + j;
2582 seq_printf(m, "0x%08llx (0x%02llx) ",
2583 seqno[offset], offset * 8);
2587 seq_puts(m, " Last wait: ");
2588 for (j = 0; j < num_rings; j++) {
2589 offset = i + (j * I915_NUM_RINGS);
2590 seq_printf(m, "0x%08llx (0x%02llx) ",
2591 seqno[offset], offset * 8);
2596 kunmap_atomic(seqno);
2598 seq_puts(m, " Last signal:");
2599 for_each_ring(ring, dev_priv, i)
2600 for (j = 0; j < num_rings; j++)
2601 seq_printf(m, "0x%08x\n",
2602 I915_READ(ring->semaphore.mbox.signal[j]));
2606 seq_puts(m, "\nSync seqno:\n");
2607 for_each_ring(ring, dev_priv, i) {
2608 for (j = 0; j < num_rings; j++) {
2609 seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
2615 intel_runtime_pm_put(dev_priv);
2616 mutex_unlock(&dev->struct_mutex);
2620 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2622 struct drm_info_node *node = (struct drm_info_node *) m->private;
2623 struct drm_device *dev = node->minor->dev;
2624 struct drm_i915_private *dev_priv = dev->dev_private;
2627 drm_modeset_lock_all(dev);
2628 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2629 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2631 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
2632 seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
2633 pll->active, yesno(pll->on));
2634 seq_printf(m, " tracked hardware state:\n");
2635 seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
2636 seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
2637 seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
2638 seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
2639 seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
2641 drm_modeset_unlock_all(dev);
2646 static int i915_wa_registers(struct seq_file *m, void *unused)
2650 struct drm_info_node *node = (struct drm_info_node *) m->private;
2651 struct drm_device *dev = node->minor->dev;
2652 struct drm_i915_private *dev_priv = dev->dev_private;
2654 ret = mutex_lock_interruptible(&dev->struct_mutex);
2658 intel_runtime_pm_get(dev_priv);
2660 seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs);
2661 for (i = 0; i < dev_priv->num_wa_regs; ++i) {
2664 addr = dev_priv->intel_wa_regs[i].addr;
2665 mask = dev_priv->intel_wa_regs[i].mask;
2666 dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask;
2667 if (dev_priv->intel_wa_regs[i].addr)
2668 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2669 dev_priv->intel_wa_regs[i].addr,
2670 dev_priv->intel_wa_regs[i].value,
2671 dev_priv->intel_wa_regs[i].mask);
2674 intel_runtime_pm_put(dev_priv);
2675 mutex_unlock(&dev->struct_mutex);
2680 struct pipe_crc_info {
2682 struct drm_device *dev;
2686 static int i915_dp_mst_info(struct seq_file *m, void *unused)
2688 struct drm_info_node *node = (struct drm_info_node *) m->private;
2689 struct drm_device *dev = node->minor->dev;
2690 struct drm_encoder *encoder;
2691 struct intel_encoder *intel_encoder;
2692 struct intel_digital_port *intel_dig_port;
2693 drm_modeset_lock_all(dev);
2694 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2695 intel_encoder = to_intel_encoder(encoder);
2696 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
2698 intel_dig_port = enc_to_dig_port(encoder);
2699 if (!intel_dig_port->dp.can_mst)
2702 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
2704 drm_modeset_unlock_all(dev);
2708 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
2710 struct pipe_crc_info *info = inode->i_private;
2711 struct drm_i915_private *dev_priv = info->dev->dev_private;
2712 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2714 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
2717 spin_lock_irq(&pipe_crc->lock);
2719 if (pipe_crc->opened) {
2720 spin_unlock_irq(&pipe_crc->lock);
2721 return -EBUSY; /* already open */
2724 pipe_crc->opened = true;
2725 filep->private_data = inode->i_private;
2727 spin_unlock_irq(&pipe_crc->lock);
2732 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
2734 struct pipe_crc_info *info = inode->i_private;
2735 struct drm_i915_private *dev_priv = info->dev->dev_private;
2736 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2738 spin_lock_irq(&pipe_crc->lock);
2739 pipe_crc->opened = false;
2740 spin_unlock_irq(&pipe_crc->lock);
2745 /* (6 fields, 8 chars each, space separated (5) + '\n') */
2746 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
2747 /* account for \'0' */
2748 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
2750 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
2752 assert_spin_locked(&pipe_crc->lock);
2753 return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
2754 INTEL_PIPE_CRC_ENTRIES_NR);
2758 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
2761 struct pipe_crc_info *info = filep->private_data;
2762 struct drm_device *dev = info->dev;
2763 struct drm_i915_private *dev_priv = dev->dev_private;
2764 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2765 char buf[PIPE_CRC_BUFFER_LEN];
2766 int head, tail, n_entries, n;
2770 * Don't allow user space to provide buffers not big enough to hold
2773 if (count < PIPE_CRC_LINE_LEN)
2776 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
2779 /* nothing to read */
2780 spin_lock_irq(&pipe_crc->lock);
2781 while (pipe_crc_data_count(pipe_crc) == 0) {
2784 if (filep->f_flags & O_NONBLOCK) {
2785 spin_unlock_irq(&pipe_crc->lock);
2789 ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
2790 pipe_crc_data_count(pipe_crc), pipe_crc->lock);
2792 spin_unlock_irq(&pipe_crc->lock);
2797 /* We now have one or more entries to read */
2798 head = pipe_crc->head;
2799 tail = pipe_crc->tail;
2800 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
2801 count / PIPE_CRC_LINE_LEN);
2802 spin_unlock_irq(&pipe_crc->lock);
2807 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
2810 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
2811 "%8u %8x %8x %8x %8x %8x\n",
2812 entry->frame, entry->crc[0],
2813 entry->crc[1], entry->crc[2],
2814 entry->crc[3], entry->crc[4]);
2816 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
2817 buf, PIPE_CRC_LINE_LEN);
2818 if (ret == PIPE_CRC_LINE_LEN)
2821 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
2822 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
2824 } while (--n_entries);
2826 spin_lock_irq(&pipe_crc->lock);
2827 pipe_crc->tail = tail;
2828 spin_unlock_irq(&pipe_crc->lock);
2833 static const struct file_operations i915_pipe_crc_fops = {
2834 .owner = THIS_MODULE,
2835 .open = i915_pipe_crc_open,
2836 .read = i915_pipe_crc_read,
2837 .release = i915_pipe_crc_release,
2840 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
2842 .name = "i915_pipe_A_crc",
2846 .name = "i915_pipe_B_crc",
2850 .name = "i915_pipe_C_crc",
2855 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
2858 struct drm_device *dev = minor->dev;
2860 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
2863 ent = debugfs_create_file(info->name, S_IRUGO, root, info,
2864 &i915_pipe_crc_fops);
2868 return drm_add_fake_info_node(minor, ent, info);
2871 static const char * const pipe_crc_sources[] = {
2884 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
2886 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
2887 return pipe_crc_sources[source];
2890 static int display_crc_ctl_show(struct seq_file *m, void *data)
2892 struct drm_device *dev = m->private;
2893 struct drm_i915_private *dev_priv = dev->dev_private;
2896 for (i = 0; i < I915_MAX_PIPES; i++)
2897 seq_printf(m, "%c %s\n", pipe_name(i),
2898 pipe_crc_source_name(dev_priv->pipe_crc[i].source));
2903 static int display_crc_ctl_open(struct inode *inode, struct file *file)
2905 struct drm_device *dev = inode->i_private;
2907 return single_open(file, display_crc_ctl_show, dev);
2910 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2913 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2914 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2917 case INTEL_PIPE_CRC_SOURCE_PIPE:
2918 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
2920 case INTEL_PIPE_CRC_SOURCE_NONE:
2930 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2931 enum intel_pipe_crc_source *source)
2933 struct intel_encoder *encoder;
2934 struct intel_crtc *crtc;
2935 struct intel_digital_port *dig_port;
2938 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2940 drm_modeset_lock_all(dev);
2941 for_each_intel_encoder(dev, encoder) {
2942 if (!encoder->base.crtc)
2945 crtc = to_intel_crtc(encoder->base.crtc);
2947 if (crtc->pipe != pipe)
2950 switch (encoder->type) {
2951 case INTEL_OUTPUT_TVOUT:
2952 *source = INTEL_PIPE_CRC_SOURCE_TV;
2954 case INTEL_OUTPUT_DISPLAYPORT:
2955 case INTEL_OUTPUT_EDP:
2956 dig_port = enc_to_dig_port(&encoder->base);
2957 switch (dig_port->port) {
2959 *source = INTEL_PIPE_CRC_SOURCE_DP_B;
2962 *source = INTEL_PIPE_CRC_SOURCE_DP_C;
2965 *source = INTEL_PIPE_CRC_SOURCE_DP_D;
2968 WARN(1, "nonexisting DP port %c\n",
2969 port_name(dig_port->port));
2975 drm_modeset_unlock_all(dev);
2980 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
2982 enum intel_pipe_crc_source *source,
2985 struct drm_i915_private *dev_priv = dev->dev_private;
2986 bool need_stable_symbols = false;
2988 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2989 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2995 case INTEL_PIPE_CRC_SOURCE_PIPE:
2996 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
2998 case INTEL_PIPE_CRC_SOURCE_DP_B:
2999 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
3000 need_stable_symbols = true;
3002 case INTEL_PIPE_CRC_SOURCE_DP_C:
3003 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
3004 need_stable_symbols = true;
3006 case INTEL_PIPE_CRC_SOURCE_NONE:
3014 * When the pipe CRC tap point is after the transcoders we need
3015 * to tweak symbol-level features to produce a deterministic series of
3016 * symbols for a given frame. We need to reset those features only once
3017 * a frame (instead of every nth symbol):
3018 * - DC-balance: used to ensure a better clock recovery from the data
3020 * - DisplayPort scrambling: used for EMI reduction
3022 if (need_stable_symbols) {
3023 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3025 tmp |= DC_BALANCE_RESET_VLV;
3027 tmp |= PIPE_A_SCRAMBLE_RESET;
3029 tmp |= PIPE_B_SCRAMBLE_RESET;
3031 I915_WRITE(PORT_DFT2_G4X, tmp);
3037 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3039 enum intel_pipe_crc_source *source,
3042 struct drm_i915_private *dev_priv = dev->dev_private;
3043 bool need_stable_symbols = false;
3045 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3046 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3052 case INTEL_PIPE_CRC_SOURCE_PIPE:
3053 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
3055 case INTEL_PIPE_CRC_SOURCE_TV:
3056 if (!SUPPORTS_TV(dev))
3058 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
3060 case INTEL_PIPE_CRC_SOURCE_DP_B:
3063 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
3064 need_stable_symbols = true;
3066 case INTEL_PIPE_CRC_SOURCE_DP_C:
3069 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
3070 need_stable_symbols = true;
3072 case INTEL_PIPE_CRC_SOURCE_DP_D:
3075 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
3076 need_stable_symbols = true;
3078 case INTEL_PIPE_CRC_SOURCE_NONE:
3086 * When the pipe CRC tap point is after the transcoders we need
3087 * to tweak symbol-level features to produce a deterministic series of
3088 * symbols for a given frame. We need to reset those features only once
3089 * a frame (instead of every nth symbol):
3090 * - DC-balance: used to ensure a better clock recovery from the data
3092 * - DisplayPort scrambling: used for EMI reduction
3094 if (need_stable_symbols) {
3095 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3097 WARN_ON(!IS_G4X(dev));
3099 I915_WRITE(PORT_DFT_I9XX,
3100 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
3103 tmp |= PIPE_A_SCRAMBLE_RESET;
3105 tmp |= PIPE_B_SCRAMBLE_RESET;
3107 I915_WRITE(PORT_DFT2_G4X, tmp);
3113 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3116 struct drm_i915_private *dev_priv = dev->dev_private;
3117 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3120 tmp &= ~PIPE_A_SCRAMBLE_RESET;
3122 tmp &= ~PIPE_B_SCRAMBLE_RESET;
3123 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
3124 tmp &= ~DC_BALANCE_RESET_VLV;
3125 I915_WRITE(PORT_DFT2_G4X, tmp);
3129 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
3132 struct drm_i915_private *dev_priv = dev->dev_private;
3133 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3136 tmp &= ~PIPE_A_SCRAMBLE_RESET;
3138 tmp &= ~PIPE_B_SCRAMBLE_RESET;
3139 I915_WRITE(PORT_DFT2_G4X, tmp);
3141 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
3142 I915_WRITE(PORT_DFT_I9XX,
3143 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
3147 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3150 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3151 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
3154 case INTEL_PIPE_CRC_SOURCE_PLANE1:
3155 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
3157 case INTEL_PIPE_CRC_SOURCE_PLANE2:
3158 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
3160 case INTEL_PIPE_CRC_SOURCE_PIPE:
3161 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
3163 case INTEL_PIPE_CRC_SOURCE_NONE:
3173 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
3175 struct drm_i915_private *dev_priv = dev->dev_private;
3176 struct intel_crtc *crtc =
3177 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3179 drm_modeset_lock_all(dev);
3181 * If we use the eDP transcoder we need to make sure that we don't
3182 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
3183 * relevant on hsw with pipe A when using the always-on power well
3186 if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
3187 !crtc->config.pch_pfit.enabled) {
3188 crtc->config.pch_pfit.force_thru = true;
3190 intel_display_power_get(dev_priv,
3191 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
3193 dev_priv->display.crtc_disable(&crtc->base);
3194 dev_priv->display.crtc_enable(&crtc->base);
3196 drm_modeset_unlock_all(dev);
3199 static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
3201 struct drm_i915_private *dev_priv = dev->dev_private;
3202 struct intel_crtc *crtc =
3203 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3205 drm_modeset_lock_all(dev);
3207 * If we use the eDP transcoder we need to make sure that we don't
3208 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
3209 * relevant on hsw with pipe A when using the always-on power well
3212 if (crtc->config.pch_pfit.force_thru) {
3213 crtc->config.pch_pfit.force_thru = false;
3215 dev_priv->display.crtc_disable(&crtc->base);
3216 dev_priv->display.crtc_enable(&crtc->base);
3218 intel_display_power_put(dev_priv,
3219 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
3221 drm_modeset_unlock_all(dev);
3224 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
3226 enum intel_pipe_crc_source *source,
3229 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3230 *source = INTEL_PIPE_CRC_SOURCE_PF;
3233 case INTEL_PIPE_CRC_SOURCE_PLANE1:
3234 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
3236 case INTEL_PIPE_CRC_SOURCE_PLANE2:
3237 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
3239 case INTEL_PIPE_CRC_SOURCE_PF:
3240 if (IS_HASWELL(dev) && pipe == PIPE_A)
3241 hsw_trans_edp_pipe_A_crc_wa(dev);
3243 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
3245 case INTEL_PIPE_CRC_SOURCE_NONE:
3255 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3256 enum intel_pipe_crc_source source)
3258 struct drm_i915_private *dev_priv = dev->dev_private;
3259 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3260 u32 val = 0; /* shut up gcc */
3263 if (pipe_crc->source == source)
3266 /* forbid changing the source without going back to 'none' */
3267 if (pipe_crc->source && source)
3271 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
3272 else if (INTEL_INFO(dev)->gen < 5)
3273 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3274 else if (IS_VALLEYVIEW(dev))
3275 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3276 else if (IS_GEN5(dev) || IS_GEN6(dev))
3277 ret = ilk_pipe_crc_ctl_reg(&source, &val);
3279 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3284 /* none -> real source transition */
3286 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
3287 pipe_name(pipe), pipe_crc_source_name(source));
3289 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
3290 INTEL_PIPE_CRC_ENTRIES_NR,
3292 if (!pipe_crc->entries)
3295 spin_lock_irq(&pipe_crc->lock);
3298 spin_unlock_irq(&pipe_crc->lock);
3301 pipe_crc->source = source;
3303 I915_WRITE(PIPE_CRC_CTL(pipe), val);
3304 POSTING_READ(PIPE_CRC_CTL(pipe));
3306 /* real source -> none transition */
3307 if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
3308 struct intel_pipe_crc_entry *entries;
3309 struct intel_crtc *crtc =
3310 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
3312 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
3315 drm_modeset_lock(&crtc->base.mutex, NULL);
3317 intel_wait_for_vblank(dev, pipe);
3318 drm_modeset_unlock(&crtc->base.mutex);
3320 spin_lock_irq(&pipe_crc->lock);
3321 entries = pipe_crc->entries;
3322 pipe_crc->entries = NULL;
3323 spin_unlock_irq(&pipe_crc->lock);
3328 g4x_undo_pipe_scramble_reset(dev, pipe);
3329 else if (IS_VALLEYVIEW(dev))
3330 vlv_undo_pipe_scramble_reset(dev, pipe);
3331 else if (IS_HASWELL(dev) && pipe == PIPE_A)
3332 hsw_undo_trans_edp_pipe_A_crc_wa(dev);
3339 * Parse pipe CRC command strings:
3340 * command: wsp* object wsp+ name wsp+ source wsp*
3343 * source: (none | plane1 | plane2 | pf)
3344 * wsp: (#0x20 | #0x9 | #0xA)+
3347 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
3348 * "pipe A none" -> Stop CRC
3350 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
3357 /* skip leading white space */
3358 buf = skip_spaces(buf);
3360 break; /* end of buffer */
3362 /* find end of word */
3363 for (end = buf; *end && !isspace(*end); end++)
3366 if (n_words == max_words) {
3367 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
3369 return -EINVAL; /* ran out of words[] before bytes */
3374 words[n_words++] = buf;
3381 enum intel_pipe_crc_object {
3382 PIPE_CRC_OBJECT_PIPE,
3385 static const char * const pipe_crc_objects[] = {
3390 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
3394 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
3395 if (!strcmp(buf, pipe_crc_objects[i])) {
3403 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
3405 const char name = buf[0];
3407 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
3416 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
3420 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
3421 if (!strcmp(buf, pipe_crc_sources[i])) {
3429 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
3433 char *words[N_WORDS];
3435 enum intel_pipe_crc_object object;
3436 enum intel_pipe_crc_source source;
3438 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
3439 if (n_words != N_WORDS) {
3440 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
3445 if (display_crc_ctl_parse_object(words[0], &object) < 0) {
3446 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
3450 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
3451 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
3455 if (display_crc_ctl_parse_source(words[2], &source) < 0) {
3456 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
3460 return pipe_crc_set_source(dev, pipe, source);
3463 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
3464 size_t len, loff_t *offp)
3466 struct seq_file *m = file->private_data;
3467 struct drm_device *dev = m->private;
3474 if (len > PAGE_SIZE - 1) {
3475 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
3480 tmpbuf = kmalloc(len + 1, GFP_KERNEL);
3484 if (copy_from_user(tmpbuf, ubuf, len)) {
3490 ret = display_crc_ctl_parse(dev, tmpbuf, len);
3501 static const struct file_operations i915_display_crc_ctl_fops = {
3502 .owner = THIS_MODULE,
3503 .open = display_crc_ctl_open,
3505 .llseek = seq_lseek,
3506 .release = single_release,
3507 .write = display_crc_ctl_write
3510 static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
3512 struct drm_device *dev = m->private;
3513 int num_levels = ilk_wm_max_level(dev) + 1;
3516 drm_modeset_lock_all(dev);
3518 for (level = 0; level < num_levels; level++) {
3519 unsigned int latency = wm[level];
3521 /* WM1+ latency values in 0.5us units */
3525 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3527 latency / 10, latency % 10);
3530 drm_modeset_unlock_all(dev);
3533 static int pri_wm_latency_show(struct seq_file *m, void *data)
3535 struct drm_device *dev = m->private;
3537 wm_latency_show(m, to_i915(dev)->wm.pri_latency);
3542 static int spr_wm_latency_show(struct seq_file *m, void *data)
3544 struct drm_device *dev = m->private;
3546 wm_latency_show(m, to_i915(dev)->wm.spr_latency);
3551 static int cur_wm_latency_show(struct seq_file *m, void *data)
3553 struct drm_device *dev = m->private;
3555 wm_latency_show(m, to_i915(dev)->wm.cur_latency);
3560 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3562 struct drm_device *dev = inode->i_private;
3564 if (HAS_GMCH_DISPLAY(dev))
3567 return single_open(file, pri_wm_latency_show, dev);
3570 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3572 struct drm_device *dev = inode->i_private;
3574 if (HAS_GMCH_DISPLAY(dev))
3577 return single_open(file, spr_wm_latency_show, dev);
3580 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3582 struct drm_device *dev = inode->i_private;
3584 if (HAS_GMCH_DISPLAY(dev))
3587 return single_open(file, cur_wm_latency_show, dev);
3590 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3591 size_t len, loff_t *offp, uint16_t wm[5])
3593 struct seq_file *m = file->private_data;
3594 struct drm_device *dev = m->private;
3595 uint16_t new[5] = { 0 };
3596 int num_levels = ilk_wm_max_level(dev) + 1;
3601 if (len >= sizeof(tmp))
3604 if (copy_from_user(tmp, ubuf, len))
3609 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
3610 if (ret != num_levels)
3613 drm_modeset_lock_all(dev);
3615 for (level = 0; level < num_levels; level++)
3616 wm[level] = new[level];
3618 drm_modeset_unlock_all(dev);
3624 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3625 size_t len, loff_t *offp)
3627 struct seq_file *m = file->private_data;
3628 struct drm_device *dev = m->private;
3630 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
3633 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3634 size_t len, loff_t *offp)
3636 struct seq_file *m = file->private_data;
3637 struct drm_device *dev = m->private;
3639 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
3642 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3643 size_t len, loff_t *offp)
3645 struct seq_file *m = file->private_data;
3646 struct drm_device *dev = m->private;
3648 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
3651 static const struct file_operations i915_pri_wm_latency_fops = {
3652 .owner = THIS_MODULE,
3653 .open = pri_wm_latency_open,
3655 .llseek = seq_lseek,
3656 .release = single_release,
3657 .write = pri_wm_latency_write
3660 static const struct file_operations i915_spr_wm_latency_fops = {
3661 .owner = THIS_MODULE,
3662 .open = spr_wm_latency_open,
3664 .llseek = seq_lseek,
3665 .release = single_release,
3666 .write = spr_wm_latency_write
3669 static const struct file_operations i915_cur_wm_latency_fops = {
3670 .owner = THIS_MODULE,
3671 .open = cur_wm_latency_open,
3673 .llseek = seq_lseek,
3674 .release = single_release,
3675 .write = cur_wm_latency_write
3679 i915_wedged_get(void *data, u64 *val)
3681 struct drm_device *dev = data;
3682 struct drm_i915_private *dev_priv = dev->dev_private;
3684 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
3690 i915_wedged_set(void *data, u64 val)
3692 struct drm_device *dev = data;
3693 struct drm_i915_private *dev_priv = dev->dev_private;
3695 intel_runtime_pm_get(dev_priv);
3697 i915_handle_error(dev, val,
3698 "Manually setting wedged to %llu", val);
3700 intel_runtime_pm_put(dev_priv);
3705 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3706 i915_wedged_get, i915_wedged_set,
3710 i915_ring_stop_get(void *data, u64 *val)
3712 struct drm_device *dev = data;
3713 struct drm_i915_private *dev_priv = dev->dev_private;
3715 *val = dev_priv->gpu_error.stop_rings;
3721 i915_ring_stop_set(void *data, u64 val)
3723 struct drm_device *dev = data;
3724 struct drm_i915_private *dev_priv = dev->dev_private;
3727 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
3729 ret = mutex_lock_interruptible(&dev->struct_mutex);
3733 dev_priv->gpu_error.stop_rings = val;
3734 mutex_unlock(&dev->struct_mutex);
3739 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
3740 i915_ring_stop_get, i915_ring_stop_set,
3744 i915_ring_missed_irq_get(void *data, u64 *val)
3746 struct drm_device *dev = data;
3747 struct drm_i915_private *dev_priv = dev->dev_private;
3749 *val = dev_priv->gpu_error.missed_irq_rings;
3754 i915_ring_missed_irq_set(void *data, u64 val)
3756 struct drm_device *dev = data;
3757 struct drm_i915_private *dev_priv = dev->dev_private;
3760 /* Lock against concurrent debugfs callers */
3761 ret = mutex_lock_interruptible(&dev->struct_mutex);
3764 dev_priv->gpu_error.missed_irq_rings = val;
3765 mutex_unlock(&dev->struct_mutex);
3770 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
3771 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
3775 i915_ring_test_irq_get(void *data, u64 *val)
3777 struct drm_device *dev = data;
3778 struct drm_i915_private *dev_priv = dev->dev_private;
3780 *val = dev_priv->gpu_error.test_irq_rings;
3786 i915_ring_test_irq_set(void *data, u64 val)
3788 struct drm_device *dev = data;
3789 struct drm_i915_private *dev_priv = dev->dev_private;
3792 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
3794 /* Lock against concurrent debugfs callers */
3795 ret = mutex_lock_interruptible(&dev->struct_mutex);
3799 dev_priv->gpu_error.test_irq_rings = val;
3800 mutex_unlock(&dev->struct_mutex);
3805 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
3806 i915_ring_test_irq_get, i915_ring_test_irq_set,
3809 #define DROP_UNBOUND 0x1
3810 #define DROP_BOUND 0x2
3811 #define DROP_RETIRE 0x4
3812 #define DROP_ACTIVE 0x8
3813 #define DROP_ALL (DROP_UNBOUND | \
3818 i915_drop_caches_get(void *data, u64 *val)
3826 i915_drop_caches_set(void *data, u64 val)
3828 struct drm_device *dev = data;
3829 struct drm_i915_private *dev_priv = dev->dev_private;
3832 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
3834 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3835 * on ioctls on -EAGAIN. */
3836 ret = mutex_lock_interruptible(&dev->struct_mutex);
3840 if (val & DROP_ACTIVE) {
3841 ret = i915_gpu_idle(dev);
3846 if (val & (DROP_RETIRE | DROP_ACTIVE))
3847 i915_gem_retire_requests(dev);
3849 if (val & DROP_BOUND)
3850 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
3852 if (val & DROP_UNBOUND)
3853 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
3856 mutex_unlock(&dev->struct_mutex);
3861 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3862 i915_drop_caches_get, i915_drop_caches_set,
3866 i915_max_freq_get(void *data, u64 *val)
3868 struct drm_device *dev = data;
3869 struct drm_i915_private *dev_priv = dev->dev_private;
3872 if (INTEL_INFO(dev)->gen < 6)
3875 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3877 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3881 if (IS_VALLEYVIEW(dev))
3882 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
3884 *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
3885 mutex_unlock(&dev_priv->rps.hw_lock);
3891 i915_max_freq_set(void *data, u64 val)
3893 struct drm_device *dev = data;
3894 struct drm_i915_private *dev_priv = dev->dev_private;
3895 u32 rp_state_cap, hw_max, hw_min;
3898 if (INTEL_INFO(dev)->gen < 6)
3901 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3903 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
3905 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3910 * Turbo will still be enabled, but won't go above the set value.
3912 if (IS_VALLEYVIEW(dev)) {
3913 val = vlv_freq_opcode(dev_priv, val);
3915 hw_max = dev_priv->rps.max_freq;
3916 hw_min = dev_priv->rps.min_freq;
3918 do_div(val, GT_FREQUENCY_MULTIPLIER);
3920 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3921 hw_max = dev_priv->rps.max_freq;
3922 hw_min = (rp_state_cap >> 16) & 0xff;
3925 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
3926 mutex_unlock(&dev_priv->rps.hw_lock);
3930 dev_priv->rps.max_freq_softlimit = val;
3932 if (IS_VALLEYVIEW(dev))
3933 valleyview_set_rps(dev, val);
3935 gen6_set_rps(dev, val);
3937 mutex_unlock(&dev_priv->rps.hw_lock);
3942 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
3943 i915_max_freq_get, i915_max_freq_set,
3947 i915_min_freq_get(void *data, u64 *val)
3949 struct drm_device *dev = data;
3950 struct drm_i915_private *dev_priv = dev->dev_private;
3953 if (INTEL_INFO(dev)->gen < 6)
3956 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3958 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3962 if (IS_VALLEYVIEW(dev))
3963 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
3965 *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
3966 mutex_unlock(&dev_priv->rps.hw_lock);
3972 i915_min_freq_set(void *data, u64 val)
3974 struct drm_device *dev = data;
3975 struct drm_i915_private *dev_priv = dev->dev_private;
3976 u32 rp_state_cap, hw_max, hw_min;
3979 if (INTEL_INFO(dev)->gen < 6)
3982 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3984 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
3986 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3991 * Turbo will still be enabled, but won't go below the set value.
3993 if (IS_VALLEYVIEW(dev)) {
3994 val = vlv_freq_opcode(dev_priv, val);
3996 hw_max = dev_priv->rps.max_freq;
3997 hw_min = dev_priv->rps.min_freq;
3999 do_div(val, GT_FREQUENCY_MULTIPLIER);
4001 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4002 hw_max = dev_priv->rps.max_freq;
4003 hw_min = (rp_state_cap >> 16) & 0xff;
4006 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
4007 mutex_unlock(&dev_priv->rps.hw_lock);
4011 dev_priv->rps.min_freq_softlimit = val;
4013 if (IS_VALLEYVIEW(dev))
4014 valleyview_set_rps(dev, val);
4016 gen6_set_rps(dev, val);
4018 mutex_unlock(&dev_priv->rps.hw_lock);
4023 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
4024 i915_min_freq_get, i915_min_freq_set,
4028 i915_cache_sharing_get(void *data, u64 *val)
4030 struct drm_device *dev = data;
4031 struct drm_i915_private *dev_priv = dev->dev_private;
4035 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
4038 ret = mutex_lock_interruptible(&dev->struct_mutex);
4041 intel_runtime_pm_get(dev_priv);
4043 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4045 intel_runtime_pm_put(dev_priv);
4046 mutex_unlock(&dev_priv->dev->struct_mutex);
4048 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4054 i915_cache_sharing_set(void *data, u64 val)
4056 struct drm_device *dev = data;
4057 struct drm_i915_private *dev_priv = dev->dev_private;
4060 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
4066 intel_runtime_pm_get(dev_priv);
4067 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4069 /* Update the cache sharing policy here as well */
4070 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4071 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4072 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4073 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4075 intel_runtime_pm_put(dev_priv);
4079 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4080 i915_cache_sharing_get, i915_cache_sharing_set,
4083 static int i915_forcewake_open(struct inode *inode, struct file *file)
4085 struct drm_device *dev = inode->i_private;
4086 struct drm_i915_private *dev_priv = dev->dev_private;
4088 if (INTEL_INFO(dev)->gen < 6)
4091 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4096 static int i915_forcewake_release(struct inode *inode, struct file *file)
4098 struct drm_device *dev = inode->i_private;
4099 struct drm_i915_private *dev_priv = dev->dev_private;
4101 if (INTEL_INFO(dev)->gen < 6)
4104 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4109 static const struct file_operations i915_forcewake_fops = {
4110 .owner = THIS_MODULE,
4111 .open = i915_forcewake_open,
4112 .release = i915_forcewake_release,
4115 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
4117 struct drm_device *dev = minor->dev;
4120 ent = debugfs_create_file("i915_forcewake_user",
4123 &i915_forcewake_fops);
4127 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
4130 static int i915_debugfs_create(struct dentry *root,
4131 struct drm_minor *minor,
4133 const struct file_operations *fops)
4135 struct drm_device *dev = minor->dev;
4138 ent = debugfs_create_file(name,
4145 return drm_add_fake_info_node(minor, ent, fops);
4148 static const struct drm_info_list i915_debugfs_list[] = {
4149 {"i915_capabilities", i915_capabilities, 0},
4150 {"i915_gem_objects", i915_gem_object_info, 0},
4151 {"i915_gem_gtt", i915_gem_gtt_info, 0},
4152 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
4153 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
4154 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
4155 {"i915_gem_stolen", i915_gem_stolen_list_info },
4156 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
4157 {"i915_gem_request", i915_gem_request_info, 0},
4158 {"i915_gem_seqno", i915_gem_seqno_info, 0},
4159 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4160 {"i915_gem_interrupt", i915_interrupt_info, 0},
4161 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
4162 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
4163 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
4164 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
4165 {"i915_frequency_info", i915_frequency_info, 0},
4166 {"i915_drpc_info", i915_drpc_info, 0},
4167 {"i915_emon_status", i915_emon_status, 0},
4168 {"i915_ring_freq_table", i915_ring_freq_table, 0},
4169 {"i915_fbc_status", i915_fbc_status, 0},
4170 {"i915_ips_status", i915_ips_status, 0},
4171 {"i915_sr_status", i915_sr_status, 0},
4172 {"i915_opregion", i915_opregion, 0},
4173 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4174 {"i915_context_status", i915_context_status, 0},
4175 {"i915_dump_lrc", i915_dump_lrc, 0},
4176 {"i915_execlists", i915_execlists, 0},
4177 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
4178 {"i915_swizzle_info", i915_swizzle_info, 0},
4179 {"i915_ppgtt_info", i915_ppgtt_info, 0},
4180 {"i915_llc", i915_llc, 0},
4181 {"i915_edp_psr_status", i915_edp_psr_status, 0},
4182 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
4183 {"i915_energy_uJ", i915_energy_uJ, 0},
4184 {"i915_pc8_status", i915_pc8_status, 0},
4185 {"i915_power_domain_info", i915_power_domain_info, 0},
4186 {"i915_display_info", i915_display_info, 0},
4187 {"i915_semaphore_status", i915_semaphore_status, 0},
4188 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4189 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4190 {"i915_wa_registers", i915_wa_registers, 0},
4192 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4194 static const struct i915_debugfs_files {
4196 const struct file_operations *fops;
4197 } i915_debugfs_files[] = {
4198 {"i915_wedged", &i915_wedged_fops},
4199 {"i915_max_freq", &i915_max_freq_fops},
4200 {"i915_min_freq", &i915_min_freq_fops},
4201 {"i915_cache_sharing", &i915_cache_sharing_fops},
4202 {"i915_ring_stop", &i915_ring_stop_fops},
4203 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4204 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
4205 {"i915_gem_drop_caches", &i915_drop_caches_fops},
4206 {"i915_error_state", &i915_error_state_fops},
4207 {"i915_next_seqno", &i915_next_seqno_fops},
4208 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4209 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4210 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4211 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4212 {"i915_fbc_false_color", &i915_fbc_fc_fops},
4215 void intel_display_crc_init(struct drm_device *dev)
4217 struct drm_i915_private *dev_priv = dev->dev_private;
4220 for_each_pipe(dev_priv, pipe) {
4221 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
4223 pipe_crc->opened = false;
4224 spin_lock_init(&pipe_crc->lock);
4225 init_waitqueue_head(&pipe_crc->wq);
4229 int i915_debugfs_init(struct drm_minor *minor)
4233 ret = i915_forcewake_create(minor->debugfs_root, minor);
4237 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
4238 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
4243 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4244 ret = i915_debugfs_create(minor->debugfs_root, minor,
4245 i915_debugfs_files[i].name,
4246 i915_debugfs_files[i].fops);
4251 return drm_debugfs_create_files(i915_debugfs_list,
4252 I915_DEBUGFS_ENTRIES,
4253 minor->debugfs_root, minor);
4256 void i915_debugfs_cleanup(struct drm_minor *minor)
4260 drm_debugfs_remove_files(i915_debugfs_list,
4261 I915_DEBUGFS_ENTRIES, minor);
4263 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
4266 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
4267 struct drm_info_list *info_list =
4268 (struct drm_info_list *)&i915_pipe_crc_data[i];
4270 drm_debugfs_remove_files(info_list, 1, minor);
4273 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4274 struct drm_info_list *info_list =
4275 (struct drm_info_list *) i915_debugfs_files[i].fops;
4277 drm_debugfs_remove_files(info_list, 1, minor);