2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017-2018 Intel Corporation
8 #include <linux/pm_runtime.h>
10 #include "gt/intel_engine.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_engine_user.h"
13 #include "gt/intel_gt_pm.h"
19 /* Frequency for the sampling timer for events which need it. */
21 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
23 #define ENGINE_SAMPLE_MASK \
24 (BIT(I915_SAMPLE_BUSY) | \
25 BIT(I915_SAMPLE_WAIT) | \
26 BIT(I915_SAMPLE_SEMA))
28 #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
30 static cpumask_t i915_pmu_cpumask;
32 static u8 engine_config_sample(u64 config)
34 return config & I915_PMU_SAMPLE_MASK;
37 static u8 engine_event_sample(struct perf_event *event)
39 return engine_config_sample(event->attr.config);
42 static u8 engine_event_class(struct perf_event *event)
44 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
47 static u8 engine_event_instance(struct perf_event *event)
49 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
52 static bool is_engine_config(u64 config)
54 return config < __I915_PMU_OTHER(0);
57 static unsigned int config_enabled_bit(u64 config)
59 if (is_engine_config(config))
60 return engine_config_sample(config);
62 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
65 static u64 config_enabled_mask(u64 config)
67 return BIT_ULL(config_enabled_bit(config));
70 static bool is_engine_event(struct perf_event *event)
72 return is_engine_config(event->attr.config);
75 static unsigned int event_enabled_bit(struct perf_event *event)
77 return config_enabled_bit(event->attr.config);
80 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
82 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
86 * Only some counters need the sampling timer.
88 * We start with a bitmask of all currently enabled events.
93 * Mask out all the ones which do not need the timer, or in
94 * other words keep all the ones that could need the timer.
96 enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
97 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
101 * When the GPU is idle per-engine counters do not need to be
102 * running so clear those bits out.
105 enable &= ~ENGINE_SAMPLE_MASK;
107 * Also there is software busyness tracking available we do not
108 * need the timer for I915_SAMPLE_BUSY counter.
110 else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
111 enable &= ~BIT(I915_SAMPLE_BUSY);
114 * If some bits remain it means we need the sampling timer running.
119 void i915_pmu_gt_parked(struct drm_i915_private *i915)
121 struct i915_pmu *pmu = &i915->pmu;
123 if (!pmu->base.event_init)
126 spin_lock_irq(&pmu->lock);
128 * Signal sampling timer to stop if only engine events are enabled and
131 pmu->timer_enabled = pmu_needs_timer(pmu, false);
132 spin_unlock_irq(&pmu->lock);
135 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
137 if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
138 pmu->timer_enabled = true;
139 pmu->timer_last = ktime_get();
140 hrtimer_start_range_ns(&pmu->timer,
141 ns_to_ktime(PERIOD), 0,
142 HRTIMER_MODE_REL_PINNED);
146 void i915_pmu_gt_unparked(struct drm_i915_private *i915)
148 struct i915_pmu *pmu = &i915->pmu;
150 if (!pmu->base.event_init)
153 spin_lock_irq(&pmu->lock);
155 * Re-enable sampling timer when GPU goes active.
157 __i915_pmu_maybe_start_timer(pmu);
158 spin_unlock_irq(&pmu->lock);
162 add_sample(struct i915_pmu_sample *sample, u32 val)
168 engines_sample(struct intel_gt *gt, unsigned int period_ns)
170 struct drm_i915_private *i915 = gt->i915;
171 struct intel_engine_cs *engine;
172 enum intel_engine_id id;
174 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
177 for_each_engine(engine, i915, id) {
178 struct intel_engine_pmu *pmu = &engine->pmu;
183 if (!intel_engine_pm_get_if_awake(engine))
186 spin_lock_irqsave(&engine->uncore->lock, flags);
188 val = ENGINE_READ_FW(engine, RING_CTL);
189 if (val == 0) /* powerwell off => engine idle */
193 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
194 if (val & RING_WAIT_SEMAPHORE)
195 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
198 * While waiting on a semaphore or event, MI_MODE reports the
199 * ring as idle. However, previously using the seqno, and with
200 * execlists sampling, we account for the ring waiting as the
201 * engine being busy. Therefore, we record the sample as being
202 * busy if either waiting or !idle.
204 busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
206 val = ENGINE_READ_FW(engine, RING_MI_MODE);
207 busy = !(val & MODE_IDLE);
210 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
213 spin_unlock_irqrestore(&engine->uncore->lock, flags);
214 intel_engine_pm_put(engine);
219 add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
221 sample->cur += mul_u32_u32(val, mul);
225 frequency_sample(struct intel_gt *gt, unsigned int period_ns)
227 struct drm_i915_private *i915 = gt->i915;
228 struct intel_uncore *uncore = gt->uncore;
229 struct i915_pmu *pmu = &i915->pmu;
231 if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
234 val = i915->gt_pm.rps.cur_freq;
235 if (intel_gt_pm_get_if_awake(gt)) {
236 val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
237 val = intel_get_cagf(i915, val);
241 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
242 intel_gpu_freq(i915, val),
246 if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
247 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
248 intel_gpu_freq(i915, i915->gt_pm.rps.cur_freq),
253 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
255 struct drm_i915_private *i915 =
256 container_of(hrtimer, struct drm_i915_private, pmu.timer);
257 struct i915_pmu *pmu = &i915->pmu;
258 struct intel_gt *gt = &i915->gt;
259 unsigned int period_ns;
262 if (!READ_ONCE(pmu->timer_enabled))
263 return HRTIMER_NORESTART;
266 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
267 pmu->timer_last = now;
270 * Strictly speaking the passed in period may not be 100% accurate for
271 * all internal calculation, since some amount of time can be spent on
272 * grabbing the forcewake. However the potential error from timer call-
273 * back delay greatly dominates this so we keep it simple.
275 engines_sample(gt, period_ns);
276 frequency_sample(gt, period_ns);
278 hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
280 return HRTIMER_RESTART;
283 static u64 count_interrupts(struct drm_i915_private *i915)
285 /* open-coded kstat_irqs() */
286 struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
290 if (!desc || !desc->kstat_irqs)
293 for_each_possible_cpu(cpu)
294 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
299 static void engine_event_destroy(struct perf_event *event)
301 struct drm_i915_private *i915 =
302 container_of(event->pmu, typeof(*i915), pmu.base);
303 struct intel_engine_cs *engine;
305 engine = intel_engine_lookup_user(i915,
306 engine_event_class(event),
307 engine_event_instance(event));
308 if (WARN_ON_ONCE(!engine))
311 if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
312 intel_engine_supports_stats(engine))
313 intel_disable_engine_stats(engine);
316 static void i915_pmu_event_destroy(struct perf_event *event)
318 WARN_ON(event->parent);
320 if (is_engine_event(event))
321 engine_event_destroy(event);
325 engine_event_status(struct intel_engine_cs *engine,
326 enum drm_i915_pmu_engine_sample sample)
329 case I915_SAMPLE_BUSY:
330 case I915_SAMPLE_WAIT:
332 case I915_SAMPLE_SEMA:
333 if (INTEL_GEN(engine->i915) < 6)
344 config_status(struct drm_i915_private *i915, u64 config)
347 case I915_PMU_ACTUAL_FREQUENCY:
348 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
349 /* Requires a mutex for sampling! */
352 case I915_PMU_REQUESTED_FREQUENCY:
353 if (INTEL_GEN(i915) < 6)
356 case I915_PMU_INTERRUPTS:
358 case I915_PMU_RC6_RESIDENCY:
369 static int engine_event_init(struct perf_event *event)
371 struct drm_i915_private *i915 =
372 container_of(event->pmu, typeof(*i915), pmu.base);
373 struct intel_engine_cs *engine;
377 engine = intel_engine_lookup_user(i915, engine_event_class(event),
378 engine_event_instance(event));
382 sample = engine_event_sample(event);
383 ret = engine_event_status(engine, sample);
387 if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
388 ret = intel_enable_engine_stats(engine);
393 static int i915_pmu_event_init(struct perf_event *event)
395 struct drm_i915_private *i915 =
396 container_of(event->pmu, typeof(*i915), pmu.base);
399 if (event->attr.type != event->pmu->type)
402 /* unsupported modes and filters */
403 if (event->attr.sample_period) /* no sampling */
406 if (has_branch_stack(event))
412 /* only allow running on one cpu at a time */
413 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
416 if (is_engine_event(event))
417 ret = engine_event_init(event);
419 ret = config_status(i915, event->attr.config);
424 event->destroy = i915_pmu_event_destroy;
429 static u64 __get_rc6(struct intel_gt *gt)
431 struct drm_i915_private *i915 = gt->i915;
434 val = intel_rc6_residency_ns(i915,
435 IS_VALLEYVIEW(i915) ?
440 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
443 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
448 static u64 get_rc6(struct intel_gt *gt)
450 #if IS_ENABLED(CONFIG_PM)
451 struct drm_i915_private *i915 = gt->i915;
452 struct intel_runtime_pm *rpm = &i915->runtime_pm;
453 struct i915_pmu *pmu = &i915->pmu;
454 intel_wakeref_t wakeref;
458 wakeref = intel_runtime_pm_get_if_in_use(rpm);
461 intel_runtime_pm_put(rpm, wakeref);
464 * If we are coming back from being runtime suspended we must
465 * be careful not to report a larger value than returned
469 spin_lock_irqsave(&pmu->lock, flags);
471 if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
472 pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
473 pmu->sample[__I915_SAMPLE_RC6].cur = val;
475 val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
478 spin_unlock_irqrestore(&pmu->lock, flags);
480 struct device *kdev = rpm->kdev;
483 * We are runtime suspended.
485 * Report the delta from when the device was suspended to now,
486 * on top of the last known real value, as the approximated RC6
489 spin_lock_irqsave(&pmu->lock, flags);
492 * After the above branch intel_runtime_pm_get_if_in_use failed
493 * to get the runtime PM reference we cannot assume we are in
494 * runtime suspend since we can either: a) race with coming out
495 * of it before we took the power.lock, or b) there are other
496 * states than suspended which can bring us here.
498 * We need to double-check that we are indeed currently runtime
499 * suspended and if not we cannot do better than report the last
502 if (pm_runtime_status_suspended(kdev)) {
503 val = pm_runtime_suspended_time(kdev);
505 if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
506 pmu->suspended_time_last = val;
508 val -= pmu->suspended_time_last;
509 val += pmu->sample[__I915_SAMPLE_RC6].cur;
511 pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
512 } else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
513 val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
515 val = pmu->sample[__I915_SAMPLE_RC6].cur;
518 spin_unlock_irqrestore(&pmu->lock, flags);
523 return __get_rc6(gt);
527 static u64 __i915_pmu_event_read(struct perf_event *event)
529 struct drm_i915_private *i915 =
530 container_of(event->pmu, typeof(*i915), pmu.base);
531 struct i915_pmu *pmu = &i915->pmu;
534 if (is_engine_event(event)) {
535 u8 sample = engine_event_sample(event);
536 struct intel_engine_cs *engine;
538 engine = intel_engine_lookup_user(i915,
539 engine_event_class(event),
540 engine_event_instance(event));
542 if (WARN_ON_ONCE(!engine)) {
544 } else if (sample == I915_SAMPLE_BUSY &&
545 intel_engine_supports_stats(engine)) {
546 val = ktime_to_ns(intel_engine_get_busy_time(engine));
548 val = engine->pmu.sample[sample].cur;
551 switch (event->attr.config) {
552 case I915_PMU_ACTUAL_FREQUENCY:
554 div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
555 USEC_PER_SEC /* to MHz */);
557 case I915_PMU_REQUESTED_FREQUENCY:
559 div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
560 USEC_PER_SEC /* to MHz */);
562 case I915_PMU_INTERRUPTS:
563 val = count_interrupts(i915);
565 case I915_PMU_RC6_RESIDENCY:
566 val = get_rc6(&i915->gt);
574 static void i915_pmu_event_read(struct perf_event *event)
576 struct hw_perf_event *hwc = &event->hw;
580 prev = local64_read(&hwc->prev_count);
581 new = __i915_pmu_event_read(event);
583 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
586 local64_add(new - prev, &event->count);
589 static void i915_pmu_enable(struct perf_event *event)
591 struct drm_i915_private *i915 =
592 container_of(event->pmu, typeof(*i915), pmu.base);
593 unsigned int bit = event_enabled_bit(event);
594 struct i915_pmu *pmu = &i915->pmu;
597 spin_lock_irqsave(&pmu->lock, flags);
600 * Update the bitmask of enabled events and increment
601 * the event reference counter.
603 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
604 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
605 GEM_BUG_ON(pmu->enable_count[bit] == ~0);
606 pmu->enable |= BIT_ULL(bit);
607 pmu->enable_count[bit]++;
610 * Start the sampling timer if needed and not already enabled.
612 __i915_pmu_maybe_start_timer(pmu);
615 * For per-engine events the bitmask and reference counting
616 * is stored per engine.
618 if (is_engine_event(event)) {
619 u8 sample = engine_event_sample(event);
620 struct intel_engine_cs *engine;
622 engine = intel_engine_lookup_user(i915,
623 engine_event_class(event),
624 engine_event_instance(event));
626 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
627 I915_ENGINE_SAMPLE_COUNT);
628 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
629 I915_ENGINE_SAMPLE_COUNT);
630 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
631 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
632 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
634 engine->pmu.enable |= BIT(sample);
635 engine->pmu.enable_count[sample]++;
638 spin_unlock_irqrestore(&pmu->lock, flags);
641 * Store the current counter value so we can report the correct delta
642 * for all listeners. Even when the event was already enabled and has
643 * an existing non-zero value.
645 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
648 static void i915_pmu_disable(struct perf_event *event)
650 struct drm_i915_private *i915 =
651 container_of(event->pmu, typeof(*i915), pmu.base);
652 unsigned int bit = event_enabled_bit(event);
653 struct i915_pmu *pmu = &i915->pmu;
656 spin_lock_irqsave(&pmu->lock, flags);
658 if (is_engine_event(event)) {
659 u8 sample = engine_event_sample(event);
660 struct intel_engine_cs *engine;
662 engine = intel_engine_lookup_user(i915,
663 engine_event_class(event),
664 engine_event_instance(event));
666 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
667 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
668 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
671 * Decrement the reference count and clear the enabled
672 * bitmask when the last listener on an event goes away.
674 if (--engine->pmu.enable_count[sample] == 0)
675 engine->pmu.enable &= ~BIT(sample);
678 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
679 GEM_BUG_ON(pmu->enable_count[bit] == 0);
681 * Decrement the reference count and clear the enabled
682 * bitmask when the last listener on an event goes away.
684 if (--pmu->enable_count[bit] == 0) {
685 pmu->enable &= ~BIT_ULL(bit);
686 pmu->timer_enabled &= pmu_needs_timer(pmu, true);
689 spin_unlock_irqrestore(&pmu->lock, flags);
692 static void i915_pmu_event_start(struct perf_event *event, int flags)
694 i915_pmu_enable(event);
698 static void i915_pmu_event_stop(struct perf_event *event, int flags)
700 if (flags & PERF_EF_UPDATE)
701 i915_pmu_event_read(event);
702 i915_pmu_disable(event);
703 event->hw.state = PERF_HES_STOPPED;
706 static int i915_pmu_event_add(struct perf_event *event, int flags)
708 if (flags & PERF_EF_START)
709 i915_pmu_event_start(event, flags);
714 static void i915_pmu_event_del(struct perf_event *event, int flags)
716 i915_pmu_event_stop(event, PERF_EF_UPDATE);
719 static int i915_pmu_event_event_idx(struct perf_event *event)
724 struct i915_str_attribute {
725 struct device_attribute attr;
729 static ssize_t i915_pmu_format_show(struct device *dev,
730 struct device_attribute *attr, char *buf)
732 struct i915_str_attribute *eattr;
734 eattr = container_of(attr, struct i915_str_attribute, attr);
735 return sprintf(buf, "%s\n", eattr->str);
738 #define I915_PMU_FORMAT_ATTR(_name, _config) \
739 (&((struct i915_str_attribute[]) { \
740 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
744 static struct attribute *i915_pmu_format_attrs[] = {
745 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
749 static const struct attribute_group i915_pmu_format_attr_group = {
751 .attrs = i915_pmu_format_attrs,
754 struct i915_ext_attribute {
755 struct device_attribute attr;
759 static ssize_t i915_pmu_event_show(struct device *dev,
760 struct device_attribute *attr, char *buf)
762 struct i915_ext_attribute *eattr;
764 eattr = container_of(attr, struct i915_ext_attribute, attr);
765 return sprintf(buf, "config=0x%lx\n", eattr->val);
768 static struct attribute_group i915_pmu_events_attr_group = {
770 /* Patch in attrs at runtime. */
774 i915_pmu_get_attr_cpumask(struct device *dev,
775 struct device_attribute *attr,
778 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
781 static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
783 static struct attribute *i915_cpumask_attrs[] = {
784 &dev_attr_cpumask.attr,
788 static const struct attribute_group i915_pmu_cpumask_attr_group = {
789 .attrs = i915_cpumask_attrs,
792 static const struct attribute_group *i915_pmu_attr_groups[] = {
793 &i915_pmu_format_attr_group,
794 &i915_pmu_events_attr_group,
795 &i915_pmu_cpumask_attr_group,
799 #define __event(__config, __name, __unit) \
801 .config = (__config), \
806 #define __engine_event(__sample, __name) \
808 .sample = (__sample), \
812 static struct i915_ext_attribute *
813 add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
815 sysfs_attr_init(&attr->attr.attr);
816 attr->attr.attr.name = name;
817 attr->attr.attr.mode = 0444;
818 attr->attr.show = i915_pmu_event_show;
824 static struct perf_pmu_events_attr *
825 add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
828 sysfs_attr_init(&attr->attr.attr);
829 attr->attr.attr.name = name;
830 attr->attr.attr.mode = 0444;
831 attr->attr.show = perf_event_sysfs_show;
832 attr->event_str = str;
837 static struct attribute **
838 create_event_attributes(struct i915_pmu *pmu)
840 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
841 static const struct {
846 __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
847 __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
848 __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
849 __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
851 static const struct {
852 enum drm_i915_pmu_engine_sample sample;
854 } engine_events[] = {
855 __engine_event(I915_SAMPLE_BUSY, "busy"),
856 __engine_event(I915_SAMPLE_SEMA, "sema"),
857 __engine_event(I915_SAMPLE_WAIT, "wait"),
859 unsigned int count = 0;
860 struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
861 struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
862 struct attribute **attr = NULL, **attr_iter;
863 struct intel_engine_cs *engine;
866 /* Count how many counters we will be exposing. */
867 for (i = 0; i < ARRAY_SIZE(events); i++) {
868 if (!config_status(i915, events[i].config))
872 for_each_uabi_engine(engine, i915) {
873 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
874 if (!engine_event_status(engine,
875 engine_events[i].sample))
880 /* Allocate attribute objects and table. */
881 i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
885 pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
889 /* Max one pointer of each attribute type plus a termination entry. */
890 attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
894 i915_iter = i915_attr;
898 /* Initialize supported non-engine counters. */
899 for (i = 0; i < ARRAY_SIZE(events); i++) {
902 if (config_status(i915, events[i].config))
905 str = kstrdup(events[i].name, GFP_KERNEL);
909 *attr_iter++ = &i915_iter->attr.attr;
910 i915_iter = add_i915_attr(i915_iter, str, events[i].config);
912 if (events[i].unit) {
913 str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
917 *attr_iter++ = &pmu_iter->attr.attr;
918 pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
922 /* Initialize supported engine counters. */
923 for_each_uabi_engine(engine, i915) {
924 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
927 if (engine_event_status(engine,
928 engine_events[i].sample))
931 str = kasprintf(GFP_KERNEL, "%s-%s",
932 engine->name, engine_events[i].name);
936 *attr_iter++ = &i915_iter->attr.attr;
938 add_i915_attr(i915_iter, str,
939 __I915_PMU_ENGINE(engine->uabi_class,
940 engine->uabi_instance,
941 engine_events[i].sample));
943 str = kasprintf(GFP_KERNEL, "%s-%s.unit",
944 engine->name, engine_events[i].name);
948 *attr_iter++ = &pmu_iter->attr.attr;
949 pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
953 pmu->i915_attr = i915_attr;
954 pmu->pmu_attr = pmu_attr;
959 for (attr_iter = attr; *attr_iter; attr_iter++)
960 kfree((*attr_iter)->name);
970 static void free_event_attributes(struct i915_pmu *pmu)
972 struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
974 for (; *attr_iter; attr_iter++)
975 kfree((*attr_iter)->name);
977 kfree(i915_pmu_events_attr_group.attrs);
978 kfree(pmu->i915_attr);
979 kfree(pmu->pmu_attr);
981 i915_pmu_events_attr_group.attrs = NULL;
982 pmu->i915_attr = NULL;
983 pmu->pmu_attr = NULL;
986 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
988 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
990 GEM_BUG_ON(!pmu->base.event_init);
992 /* Select the first online CPU as a designated reader. */
993 if (!cpumask_weight(&i915_pmu_cpumask))
994 cpumask_set_cpu(cpu, &i915_pmu_cpumask);
999 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
1001 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
1002 unsigned int target;
1004 GEM_BUG_ON(!pmu->base.event_init);
1006 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
1007 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
1008 /* Migrate events if there is a valid target */
1009 if (target < nr_cpu_ids) {
1010 cpumask_set_cpu(target, &i915_pmu_cpumask);
1011 perf_pmu_migrate_context(&pmu->base, cpu, target);
1018 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
1020 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
1022 enum cpuhp_state slot;
1025 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1026 "perf/x86/intel/i915:online",
1027 i915_pmu_cpu_online,
1028 i915_pmu_cpu_offline);
1033 ret = cpuhp_state_add_instance(slot, &pmu->node);
1035 cpuhp_remove_multi_state(slot);
1043 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
1045 WARN_ON(cpuhp_slot == CPUHP_INVALID);
1046 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
1047 cpuhp_remove_multi_state(cpuhp_slot);
1050 void i915_pmu_register(struct drm_i915_private *i915)
1052 struct i915_pmu *pmu = &i915->pmu;
1055 if (INTEL_GEN(i915) <= 2) {
1056 dev_info(i915->drm.dev, "PMU not supported for this GPU.");
1060 i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
1061 if (!i915_pmu_events_attr_group.attrs) {
1066 pmu->base.attr_groups = i915_pmu_attr_groups;
1067 pmu->base.task_ctx_nr = perf_invalid_context;
1068 pmu->base.event_init = i915_pmu_event_init;
1069 pmu->base.add = i915_pmu_event_add;
1070 pmu->base.del = i915_pmu_event_del;
1071 pmu->base.start = i915_pmu_event_start;
1072 pmu->base.stop = i915_pmu_event_stop;
1073 pmu->base.read = i915_pmu_event_read;
1074 pmu->base.event_idx = i915_pmu_event_event_idx;
1076 spin_lock_init(&pmu->lock);
1077 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1078 pmu->timer.function = i915_sample;
1080 ret = perf_pmu_register(&pmu->base, "i915", -1);
1084 ret = i915_pmu_register_cpuhp_state(pmu);
1091 perf_pmu_unregister(&pmu->base);
1093 pmu->base.event_init = NULL;
1094 free_event_attributes(pmu);
1095 DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
1098 void i915_pmu_unregister(struct drm_i915_private *i915)
1100 struct i915_pmu *pmu = &i915->pmu;
1102 if (!pmu->base.event_init)
1105 WARN_ON(pmu->enable);
1107 hrtimer_cancel(&pmu->timer);
1109 i915_pmu_unregister_cpuhp_state(pmu);
1111 perf_pmu_unregister(&pmu->base);
1112 pmu->base.event_init = NULL;
1113 free_event_attributes(pmu);