1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016 Freescale Semiconductor, Inc.
7 #include <linux/bitfield.h>
8 #include <linux/init.h>
9 #include <linux/interrupt.h>
11 #include <linux/module.h>
13 #include <linux/of_address.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/perf_event.h>
17 #include <linux/slab.h>
19 #define COUNTER_CNTL 0x0
20 #define COUNTER_READ 0x20
22 #define COUNTER_DPCR1 0x30
25 #define CNTL_CLEAR 0x2
27 #define CNTL_EN_MASK 0xFFFFFFFB
28 #define CNTL_CLEAR_MASK 0xFFFFFFFD
29 #define CNTL_OVER_MASK 0xFFFFFFFE
31 #define CNTL_CSV_SHIFT 24
32 #define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
34 #define EVENT_CYCLES_ID 0
35 #define EVENT_CYCLES_COUNTER 0
36 #define NUM_COUNTERS 4
38 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
40 #define DDR_PERF_DEV_NAME "imx8_ddr"
41 #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
43 static DEFINE_IDA(ddr_ida);
45 static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
46 { .compatible = "fsl,imx8-ddr-pmu",},
47 { .compatible = "fsl,imx8m-ddr-pmu",},
55 struct hlist_node node;
57 struct perf_event *events[NUM_COUNTERS];
59 enum cpuhp_state cpuhp_state;
64 static ssize_t ddr_perf_cpumask_show(struct device *dev,
65 struct device_attribute *attr, char *buf)
67 struct ddr_pmu *pmu = dev_get_drvdata(dev);
69 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
72 static struct device_attribute ddr_perf_cpumask_attr =
73 __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
75 static struct attribute *ddr_perf_cpumask_attrs[] = {
76 &ddr_perf_cpumask_attr.attr,
80 static struct attribute_group ddr_perf_cpumask_attr_group = {
81 .attrs = ddr_perf_cpumask_attrs,
85 ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
88 struct perf_pmu_events_attr *pmu_attr;
90 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
91 return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
94 #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
95 (&((struct perf_pmu_events_attr[]) { \
96 { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
100 static struct attribute *ddr_perf_events_attrs[] = {
101 IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
102 IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
103 IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
104 IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
105 IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
106 IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
107 IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
108 IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
109 IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
110 IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
111 IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
112 IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
113 IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
114 IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
115 IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
116 IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
117 IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
118 IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
119 IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
120 IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
121 IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
122 IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
123 IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
124 IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
125 IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
126 IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
127 IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
128 IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
129 IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
130 IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
134 static struct attribute_group ddr_perf_events_attr_group = {
136 .attrs = ddr_perf_events_attrs,
139 PMU_FORMAT_ATTR(event, "config:0-7");
141 static struct attribute *ddr_perf_format_attrs[] = {
142 &format_attr_event.attr,
146 static struct attribute_group ddr_perf_format_attr_group = {
148 .attrs = ddr_perf_format_attrs,
151 static const struct attribute_group *attr_groups[] = {
152 &ddr_perf_events_attr_group,
153 &ddr_perf_format_attr_group,
154 &ddr_perf_cpumask_attr_group,
158 static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
163 * Always map cycle event to counter 0
164 * Cycles counter is dedicated for cycle event
165 * can't used for the other events
167 if (event == EVENT_CYCLES_ID) {
168 if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
169 return EVENT_CYCLES_COUNTER;
174 for (i = 1; i < NUM_COUNTERS; i++) {
175 if (pmu->events[i] == NULL)
182 static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
184 pmu->events[counter] = NULL;
187 static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
189 return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
192 static int ddr_perf_event_init(struct perf_event *event)
194 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
195 struct hw_perf_event *hwc = &event->hw;
196 struct perf_event *sibling;
198 if (event->attr.type != event->pmu->type)
201 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
204 if (event->cpu < 0) {
205 dev_warn(pmu->dev, "Can't provide per-task data!\n");
210 * We must NOT create groups containing mixed PMUs, although software
211 * events are acceptable (for example to create a CCN group
212 * periodically read when a hrtimer aka cpu-clock leader triggers).
214 if (event->group_leader->pmu != event->pmu &&
215 !is_software_event(event->group_leader))
218 for_each_sibling_event(sibling, event->group_leader) {
219 if (sibling->pmu != event->pmu &&
220 !is_software_event(sibling))
224 event->cpu = pmu->cpu;
231 static void ddr_perf_event_update(struct perf_event *event)
233 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
234 struct hw_perf_event *hwc = &event->hw;
235 u64 delta, prev_raw_count, new_raw_count;
236 int counter = hwc->idx;
239 prev_raw_count = local64_read(&hwc->prev_count);
240 new_raw_count = ddr_perf_read_counter(pmu, counter);
241 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
242 new_raw_count) != prev_raw_count);
244 delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
246 local64_add(delta, &event->count);
249 static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
250 int counter, bool enable)
252 u8 reg = counter * 4 + COUNTER_CNTL;
257 * must disable first, then enable again
258 * otherwise, cycle counter will not work
259 * if previous state is enabled.
261 writel(0, pmu->base + reg);
262 val = CNTL_EN | CNTL_CLEAR;
263 val |= FIELD_PREP(CNTL_CSV_MASK, config);
264 writel(val, pmu->base + reg);
266 /* Disable counter */
267 writel(0, pmu->base + reg);
271 static void ddr_perf_event_start(struct perf_event *event, int flags)
273 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
274 struct hw_perf_event *hwc = &event->hw;
275 int counter = hwc->idx;
277 local64_set(&hwc->prev_count, 0);
279 ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
284 static int ddr_perf_event_add(struct perf_event *event, int flags)
286 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
287 struct hw_perf_event *hwc = &event->hw;
289 int cfg = event->attr.config;
291 counter = ddr_perf_alloc_counter(pmu, cfg);
293 dev_dbg(pmu->dev, "There are not enough counters\n");
297 pmu->events[counter] = event;
298 pmu->active_events++;
301 hwc->state |= PERF_HES_STOPPED;
303 if (flags & PERF_EF_START)
304 ddr_perf_event_start(event, flags);
309 static void ddr_perf_event_stop(struct perf_event *event, int flags)
311 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
312 struct hw_perf_event *hwc = &event->hw;
313 int counter = hwc->idx;
315 ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
316 ddr_perf_event_update(event);
318 hwc->state |= PERF_HES_STOPPED;
321 static void ddr_perf_event_del(struct perf_event *event, int flags)
323 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
324 struct hw_perf_event *hwc = &event->hw;
325 int counter = hwc->idx;
327 ddr_perf_event_stop(event, PERF_EF_UPDATE);
329 ddr_perf_free_counter(pmu, counter);
330 pmu->active_events--;
334 static void ddr_perf_pmu_enable(struct pmu *pmu)
336 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
338 /* enable cycle counter if cycle is not active event list */
339 if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
340 ddr_perf_counter_enable(ddr_pmu,
342 EVENT_CYCLES_COUNTER,
346 static void ddr_perf_pmu_disable(struct pmu *pmu)
348 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
350 if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
351 ddr_perf_counter_enable(ddr_pmu,
353 EVENT_CYCLES_COUNTER,
357 static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
360 *pmu = (struct ddr_pmu) {
361 .pmu = (struct pmu) {
362 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
363 .task_ctx_nr = perf_invalid_context,
364 .attr_groups = attr_groups,
365 .event_init = ddr_perf_event_init,
366 .add = ddr_perf_event_add,
367 .del = ddr_perf_event_del,
368 .start = ddr_perf_event_start,
369 .stop = ddr_perf_event_stop,
370 .read = ddr_perf_event_update,
371 .pmu_enable = ddr_perf_pmu_enable,
372 .pmu_disable = ddr_perf_pmu_disable,
378 pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
382 static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
385 struct ddr_pmu *pmu = (struct ddr_pmu *) p;
386 struct perf_event *event, *cycle_event = NULL;
388 /* all counter will stop if cycle counter disabled */
389 ddr_perf_counter_enable(pmu,
391 EVENT_CYCLES_COUNTER,
394 * When the cycle counter overflows, all counters are stopped,
395 * and an IRQ is raised. If any other counter overflows, it
396 * continues counting, and no IRQ is raised.
398 * Cycles occur at least 4 times as often as other events, so we
399 * can update all events on a cycle counter overflow and not
403 for (i = 0; i < NUM_COUNTERS; i++) {
408 event = pmu->events[i];
410 ddr_perf_event_update(event);
412 if (event->hw.idx == EVENT_CYCLES_COUNTER)
416 ddr_perf_counter_enable(pmu,
418 EVENT_CYCLES_COUNTER,
421 ddr_perf_event_update(cycle_event);
426 static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
428 struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
434 target = cpumask_any_but(cpu_online_mask, cpu);
435 if (target >= nr_cpu_ids)
438 perf_pmu_migrate_context(&pmu->pmu, cpu, target);
441 WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
446 static int ddr_perf_probe(struct platform_device *pdev)
449 struct device_node *np;
456 base = devm_platform_ioremap_resource(pdev, 0);
458 return PTR_ERR(base);
460 np = pdev->dev.of_node;
462 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
466 num = ddr_perf_init(pmu, base, &pdev->dev);
468 platform_set_drvdata(pdev, pmu);
470 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
475 pmu->cpu = raw_smp_processor_id();
476 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
479 ddr_perf_offline_cpu);
482 dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
486 pmu->cpuhp_state = ret;
488 /* Register the pmu instance for cpu hotplug */
489 cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
492 irq = of_irq_get(np, 0);
494 dev_err(&pdev->dev, "Failed to get irq: %d", irq);
499 ret = devm_request_irq(&pdev->dev, irq,
500 ddr_perf_irq_handler,
501 IRQF_NOBALANCING | IRQF_NO_THREAD,
505 dev_err(&pdev->dev, "Request irq failed: %d", ret);
510 ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
512 dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
516 ret = perf_pmu_register(&pmu->pmu, name, -1);
523 if (pmu->cpuhp_state)
524 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
526 ida_simple_remove(&ddr_ida, pmu->id);
527 dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
531 static int ddr_perf_remove(struct platform_device *pdev)
533 struct ddr_pmu *pmu = platform_get_drvdata(pdev);
535 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
536 irq_set_affinity_hint(pmu->irq, NULL);
538 perf_pmu_unregister(&pmu->pmu);
540 ida_simple_remove(&ddr_ida, pmu->id);
544 static struct platform_driver imx_ddr_pmu_driver = {
546 .name = "imx-ddr-pmu",
547 .of_match_table = imx_ddr_pmu_dt_ids,
549 .probe = ddr_perf_probe,
550 .remove = ddr_perf_remove,
553 module_platform_driver(imx_ddr_pmu_driver);
554 MODULE_LICENSE("GPL v2");