Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / arch / powerpc / perf / core-fsl-emb.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Performance event support - Freescale Embedded Performance Monitor
4  *
5  * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6  * Copyright 2010 Freescale Semiconductor, Inc.
7  */
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/perf_event.h>
11 #include <linux/percpu.h>
12 #include <linux/hardirq.h>
13 #include <asm/reg_fsl_emb.h>
14 #include <asm/pmc.h>
15 #include <asm/machdep.h>
16 #include <asm/firmware.h>
17 #include <asm/ptrace.h>
18
19 struct cpu_hw_events {
20         int n_events;
21         int disabled;
22         u8  pmcs_enabled;
23         struct perf_event *event[MAX_HWEVENTS];
24 };
25 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
26
27 static struct fsl_emb_pmu *ppmu;
28
29 /* Number of perf_events counting hardware events */
30 static atomic_t num_events;
31 /* Used to avoid races in calling reserve/release_pmc_hardware */
32 static DEFINE_MUTEX(pmc_reserve_mutex);
33
34 /*
35  * If interrupts were soft-disabled when a PMU interrupt occurs, treat
36  * it as an NMI.
37  */
38 static inline int perf_intr_is_nmi(struct pt_regs *regs)
39 {
40 #ifdef __powerpc64__
41         return (regs->softe & IRQS_DISABLED);
42 #else
43         return 0;
44 #endif
45 }
46
47 static void perf_event_interrupt(struct pt_regs *regs);
48
49 /*
50  * Read one performance monitor counter (PMC).
51  */
52 static unsigned long read_pmc(int idx)
53 {
54         unsigned long val;
55
56         switch (idx) {
57         case 0:
58                 val = mfpmr(PMRN_PMC0);
59                 break;
60         case 1:
61                 val = mfpmr(PMRN_PMC1);
62                 break;
63         case 2:
64                 val = mfpmr(PMRN_PMC2);
65                 break;
66         case 3:
67                 val = mfpmr(PMRN_PMC3);
68                 break;
69         case 4:
70                 val = mfpmr(PMRN_PMC4);
71                 break;
72         case 5:
73                 val = mfpmr(PMRN_PMC5);
74                 break;
75         default:
76                 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
77                 val = 0;
78         }
79         return val;
80 }
81
82 /*
83  * Write one PMC.
84  */
85 static void write_pmc(int idx, unsigned long val)
86 {
87         switch (idx) {
88         case 0:
89                 mtpmr(PMRN_PMC0, val);
90                 break;
91         case 1:
92                 mtpmr(PMRN_PMC1, val);
93                 break;
94         case 2:
95                 mtpmr(PMRN_PMC2, val);
96                 break;
97         case 3:
98                 mtpmr(PMRN_PMC3, val);
99                 break;
100         case 4:
101                 mtpmr(PMRN_PMC4, val);
102                 break;
103         case 5:
104                 mtpmr(PMRN_PMC5, val);
105                 break;
106         default:
107                 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
108         }
109
110         isync();
111 }
112
113 /*
114  * Write one local control A register
115  */
116 static void write_pmlca(int idx, unsigned long val)
117 {
118         switch (idx) {
119         case 0:
120                 mtpmr(PMRN_PMLCA0, val);
121                 break;
122         case 1:
123                 mtpmr(PMRN_PMLCA1, val);
124                 break;
125         case 2:
126                 mtpmr(PMRN_PMLCA2, val);
127                 break;
128         case 3:
129                 mtpmr(PMRN_PMLCA3, val);
130                 break;
131         case 4:
132                 mtpmr(PMRN_PMLCA4, val);
133                 break;
134         case 5:
135                 mtpmr(PMRN_PMLCA5, val);
136                 break;
137         default:
138                 printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
139         }
140
141         isync();
142 }
143
144 /*
145  * Write one local control B register
146  */
147 static void write_pmlcb(int idx, unsigned long val)
148 {
149         switch (idx) {
150         case 0:
151                 mtpmr(PMRN_PMLCB0, val);
152                 break;
153         case 1:
154                 mtpmr(PMRN_PMLCB1, val);
155                 break;
156         case 2:
157                 mtpmr(PMRN_PMLCB2, val);
158                 break;
159         case 3:
160                 mtpmr(PMRN_PMLCB3, val);
161                 break;
162         case 4:
163                 mtpmr(PMRN_PMLCB4, val);
164                 break;
165         case 5:
166                 mtpmr(PMRN_PMLCB5, val);
167                 break;
168         default:
169                 printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
170         }
171
172         isync();
173 }
174
175 static void fsl_emb_pmu_read(struct perf_event *event)
176 {
177         s64 val, delta, prev;
178
179         if (event->hw.state & PERF_HES_STOPPED)
180                 return;
181
182         /*
183          * Performance monitor interrupts come even when interrupts
184          * are soft-disabled, as long as interrupts are hard-enabled.
185          * Therefore we treat them like NMIs.
186          */
187         do {
188                 prev = local64_read(&event->hw.prev_count);
189                 barrier();
190                 val = read_pmc(event->hw.idx);
191         } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
192
193         /* The counters are only 32 bits wide */
194         delta = (val - prev) & 0xfffffffful;
195         local64_add(delta, &event->count);
196         local64_sub(delta, &event->hw.period_left);
197 }
198
199 /*
200  * Disable all events to prevent PMU interrupts and to allow
201  * events to be added or removed.
202  */
203 static void fsl_emb_pmu_disable(struct pmu *pmu)
204 {
205         struct cpu_hw_events *cpuhw;
206         unsigned long flags;
207
208         local_irq_save(flags);
209         cpuhw = this_cpu_ptr(&cpu_hw_events);
210
211         if (!cpuhw->disabled) {
212                 cpuhw->disabled = 1;
213
214                 /*
215                  * Check if we ever enabled the PMU on this cpu.
216                  */
217                 if (!cpuhw->pmcs_enabled) {
218                         ppc_enable_pmcs();
219                         cpuhw->pmcs_enabled = 1;
220                 }
221
222                 if (atomic_read(&num_events)) {
223                         /*
224                          * Set the 'freeze all counters' bit, and disable
225                          * interrupts.  The barrier is to make sure the
226                          * mtpmr has been executed and the PMU has frozen
227                          * the events before we return.
228                          */
229
230                         mtpmr(PMRN_PMGC0, PMGC0_FAC);
231                         isync();
232                 }
233         }
234         local_irq_restore(flags);
235 }
236
237 /*
238  * Re-enable all events if disable == 0.
239  * If we were previously disabled and events were added, then
240  * put the new config on the PMU.
241  */
242 static void fsl_emb_pmu_enable(struct pmu *pmu)
243 {
244         struct cpu_hw_events *cpuhw;
245         unsigned long flags;
246
247         local_irq_save(flags);
248         cpuhw = this_cpu_ptr(&cpu_hw_events);
249         if (!cpuhw->disabled)
250                 goto out;
251
252         cpuhw->disabled = 0;
253         ppc_set_pmu_inuse(cpuhw->n_events != 0);
254
255         if (cpuhw->n_events > 0) {
256                 mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
257                 isync();
258         }
259
260  out:
261         local_irq_restore(flags);
262 }
263
264 static int collect_events(struct perf_event *group, int max_count,
265                           struct perf_event *ctrs[])
266 {
267         int n = 0;
268         struct perf_event *event;
269
270         if (!is_software_event(group)) {
271                 if (n >= max_count)
272                         return -1;
273                 ctrs[n] = group;
274                 n++;
275         }
276         for_each_sibling_event(event, group) {
277                 if (!is_software_event(event) &&
278                     event->state != PERF_EVENT_STATE_OFF) {
279                         if (n >= max_count)
280                                 return -1;
281                         ctrs[n] = event;
282                         n++;
283                 }
284         }
285         return n;
286 }
287
288 /* context locked on entry */
289 static int fsl_emb_pmu_add(struct perf_event *event, int flags)
290 {
291         struct cpu_hw_events *cpuhw;
292         int ret = -EAGAIN;
293         int num_counters = ppmu->n_counter;
294         u64 val;
295         int i;
296
297         perf_pmu_disable(event->pmu);
298         cpuhw = &get_cpu_var(cpu_hw_events);
299
300         if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
301                 num_counters = ppmu->n_restricted;
302
303         /*
304          * Allocate counters from top-down, so that restricted-capable
305          * counters are kept free as long as possible.
306          */
307         for (i = num_counters - 1; i >= 0; i--) {
308                 if (cpuhw->event[i])
309                         continue;
310
311                 break;
312         }
313
314         if (i < 0)
315                 goto out;
316
317         event->hw.idx = i;
318         cpuhw->event[i] = event;
319         ++cpuhw->n_events;
320
321         val = 0;
322         if (event->hw.sample_period) {
323                 s64 left = local64_read(&event->hw.period_left);
324                 if (left < 0x80000000L)
325                         val = 0x80000000L - left;
326         }
327         local64_set(&event->hw.prev_count, val);
328
329         if (unlikely(!(flags & PERF_EF_START))) {
330                 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
331                 val = 0;
332         } else {
333                 event->hw.state &= ~(PERF_HES_STOPPED | PERF_HES_UPTODATE);
334         }
335
336         write_pmc(i, val);
337         perf_event_update_userpage(event);
338
339         write_pmlcb(i, event->hw.config >> 32);
340         write_pmlca(i, event->hw.config_base);
341
342         ret = 0;
343  out:
344         put_cpu_var(cpu_hw_events);
345         perf_pmu_enable(event->pmu);
346         return ret;
347 }
348
349 /* context locked on entry */
350 static void fsl_emb_pmu_del(struct perf_event *event, int flags)
351 {
352         struct cpu_hw_events *cpuhw;
353         int i = event->hw.idx;
354
355         perf_pmu_disable(event->pmu);
356         if (i < 0)
357                 goto out;
358
359         fsl_emb_pmu_read(event);
360
361         cpuhw = &get_cpu_var(cpu_hw_events);
362
363         WARN_ON(event != cpuhw->event[event->hw.idx]);
364
365         write_pmlca(i, 0);
366         write_pmlcb(i, 0);
367         write_pmc(i, 0);
368
369         cpuhw->event[i] = NULL;
370         event->hw.idx = -1;
371
372         /*
373          * TODO: if at least one restricted event exists, and we
374          * just freed up a non-restricted-capable counter, and
375          * there is a restricted-capable counter occupied by
376          * a non-restricted event, migrate that event to the
377          * vacated counter.
378          */
379
380         cpuhw->n_events--;
381
382  out:
383         perf_pmu_enable(event->pmu);
384         put_cpu_var(cpu_hw_events);
385 }
386
387 static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
388 {
389         unsigned long flags;
390         unsigned long val;
391         s64 left;
392
393         if (event->hw.idx < 0 || !event->hw.sample_period)
394                 return;
395
396         if (!(event->hw.state & PERF_HES_STOPPED))
397                 return;
398
399         if (ef_flags & PERF_EF_RELOAD)
400                 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
401
402         local_irq_save(flags);
403         perf_pmu_disable(event->pmu);
404
405         event->hw.state = 0;
406         left = local64_read(&event->hw.period_left);
407         val = 0;
408         if (left < 0x80000000L)
409                 val = 0x80000000L - left;
410         write_pmc(event->hw.idx, val);
411
412         perf_event_update_userpage(event);
413         perf_pmu_enable(event->pmu);
414         local_irq_restore(flags);
415 }
416
417 static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
418 {
419         unsigned long flags;
420
421         if (event->hw.idx < 0 || !event->hw.sample_period)
422                 return;
423
424         if (event->hw.state & PERF_HES_STOPPED)
425                 return;
426
427         local_irq_save(flags);
428         perf_pmu_disable(event->pmu);
429
430         fsl_emb_pmu_read(event);
431         event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
432         write_pmc(event->hw.idx, 0);
433
434         perf_event_update_userpage(event);
435         perf_pmu_enable(event->pmu);
436         local_irq_restore(flags);
437 }
438
439 /*
440  * Release the PMU if this is the last perf_event.
441  */
442 static void hw_perf_event_destroy(struct perf_event *event)
443 {
444         if (!atomic_add_unless(&num_events, -1, 1)) {
445                 mutex_lock(&pmc_reserve_mutex);
446                 if (atomic_dec_return(&num_events) == 0)
447                         release_pmc_hardware();
448                 mutex_unlock(&pmc_reserve_mutex);
449         }
450 }
451
452 /*
453  * Translate a generic cache event_id config to a raw event_id code.
454  */
455 static int hw_perf_cache_event(u64 config, u64 *eventp)
456 {
457         unsigned long type, op, result;
458         int ev;
459
460         if (!ppmu->cache_events)
461                 return -EINVAL;
462
463         /* unpack config */
464         type = config & 0xff;
465         op = (config >> 8) & 0xff;
466         result = (config >> 16) & 0xff;
467
468         if (type >= PERF_COUNT_HW_CACHE_MAX ||
469             op >= PERF_COUNT_HW_CACHE_OP_MAX ||
470             result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
471                 return -EINVAL;
472
473         ev = (*ppmu->cache_events)[type][op][result];
474         if (ev == 0)
475                 return -EOPNOTSUPP;
476         if (ev == -1)
477                 return -EINVAL;
478         *eventp = ev;
479         return 0;
480 }
481
482 static int fsl_emb_pmu_event_init(struct perf_event *event)
483 {
484         u64 ev;
485         struct perf_event *events[MAX_HWEVENTS];
486         int n;
487         int err;
488         int num_restricted;
489         int i;
490
491         if (ppmu->n_counter > MAX_HWEVENTS) {
492                 WARN(1, "No. of perf counters (%d) is higher than max array size(%d)\n",
493                         ppmu->n_counter, MAX_HWEVENTS);
494                 ppmu->n_counter = MAX_HWEVENTS;
495         }
496
497         switch (event->attr.type) {
498         case PERF_TYPE_HARDWARE:
499                 ev = event->attr.config;
500                 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
501                         return -EOPNOTSUPP;
502                 ev = ppmu->generic_events[ev];
503                 break;
504
505         case PERF_TYPE_HW_CACHE:
506                 err = hw_perf_cache_event(event->attr.config, &ev);
507                 if (err)
508                         return err;
509                 break;
510
511         case PERF_TYPE_RAW:
512                 ev = event->attr.config;
513                 break;
514
515         default:
516                 return -ENOENT;
517         }
518
519         event->hw.config = ppmu->xlate_event(ev);
520         if (!(event->hw.config & FSL_EMB_EVENT_VALID))
521                 return -EINVAL;
522
523         /*
524          * If this is in a group, check if it can go on with all the
525          * other hardware events in the group.  We assume the event
526          * hasn't been linked into its leader's sibling list at this point.
527          */
528         n = 0;
529         if (event->group_leader != event) {
530                 n = collect_events(event->group_leader,
531                                    ppmu->n_counter - 1, events);
532                 if (n < 0)
533                         return -EINVAL;
534         }
535
536         if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
537                 num_restricted = 0;
538                 for (i = 0; i < n; i++) {
539                         if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
540                                 num_restricted++;
541                 }
542
543                 if (num_restricted >= ppmu->n_restricted)
544                         return -EINVAL;
545         }
546
547         event->hw.idx = -1;
548
549         event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
550                                 (u32)((ev << 16) & PMLCA_EVENT_MASK);
551
552         if (event->attr.exclude_user)
553                 event->hw.config_base |= PMLCA_FCU;
554         if (event->attr.exclude_kernel)
555                 event->hw.config_base |= PMLCA_FCS;
556         if (event->attr.exclude_idle)
557                 return -ENOTSUPP;
558
559         event->hw.last_period = event->hw.sample_period;
560         local64_set(&event->hw.period_left, event->hw.last_period);
561
562         /*
563          * See if we need to reserve the PMU.
564          * If no events are currently in use, then we have to take a
565          * mutex to ensure that we don't race with another task doing
566          * reserve_pmc_hardware or release_pmc_hardware.
567          */
568         err = 0;
569         if (!atomic_inc_not_zero(&num_events)) {
570                 mutex_lock(&pmc_reserve_mutex);
571                 if (atomic_read(&num_events) == 0 &&
572                     reserve_pmc_hardware(perf_event_interrupt))
573                         err = -EBUSY;
574                 else
575                         atomic_inc(&num_events);
576                 mutex_unlock(&pmc_reserve_mutex);
577
578                 mtpmr(PMRN_PMGC0, PMGC0_FAC);
579                 isync();
580         }
581         event->destroy = hw_perf_event_destroy;
582
583         return err;
584 }
585
586 static struct pmu fsl_emb_pmu = {
587         .pmu_enable     = fsl_emb_pmu_enable,
588         .pmu_disable    = fsl_emb_pmu_disable,
589         .event_init     = fsl_emb_pmu_event_init,
590         .add            = fsl_emb_pmu_add,
591         .del            = fsl_emb_pmu_del,
592         .start          = fsl_emb_pmu_start,
593         .stop           = fsl_emb_pmu_stop,
594         .read           = fsl_emb_pmu_read,
595 };
596
597 /*
598  * A counter has overflowed; update its count and record
599  * things if requested.  Note that interrupts are hard-disabled
600  * here so there is no possibility of being interrupted.
601  */
602 static void record_and_restart(struct perf_event *event, unsigned long val,
603                                struct pt_regs *regs)
604 {
605         u64 period = event->hw.sample_period;
606         s64 prev, delta, left;
607         int record = 0;
608
609         if (event->hw.state & PERF_HES_STOPPED) {
610                 write_pmc(event->hw.idx, 0);
611                 return;
612         }
613
614         /* we don't have to worry about interrupts here */
615         prev = local64_read(&event->hw.prev_count);
616         delta = (val - prev) & 0xfffffffful;
617         local64_add(delta, &event->count);
618
619         /*
620          * See if the total period for this event has expired,
621          * and update for the next period.
622          */
623         val = 0;
624         left = local64_read(&event->hw.period_left) - delta;
625         if (period) {
626                 if (left <= 0) {
627                         left += period;
628                         if (left <= 0)
629                                 left = period;
630                         record = 1;
631                         event->hw.last_period = event->hw.sample_period;
632                 }
633                 if (left < 0x80000000LL)
634                         val = 0x80000000LL - left;
635         }
636
637         write_pmc(event->hw.idx, val);
638         local64_set(&event->hw.prev_count, val);
639         local64_set(&event->hw.period_left, left);
640         perf_event_update_userpage(event);
641
642         /*
643          * Finally record data if requested.
644          */
645         if (record) {
646                 struct perf_sample_data data;
647
648                 perf_sample_data_init(&data, 0, event->hw.last_period);
649
650                 if (perf_event_overflow(event, &data, regs))
651                         fsl_emb_pmu_stop(event, 0);
652         }
653 }
654
655 static void perf_event_interrupt(struct pt_regs *regs)
656 {
657         int i;
658         struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
659         struct perf_event *event;
660         unsigned long val;
661         int found = 0;
662         int nmi;
663
664         nmi = perf_intr_is_nmi(regs);
665         if (nmi)
666                 nmi_enter();
667         else
668                 irq_enter();
669
670         for (i = 0; i < ppmu->n_counter; ++i) {
671                 event = cpuhw->event[i];
672
673                 val = read_pmc(i);
674                 if ((int)val < 0) {
675                         if (event) {
676                                 /* event has overflowed */
677                                 found = 1;
678                                 record_and_restart(event, val, regs);
679                         } else {
680                                 /*
681                                  * Disabled counter is negative,
682                                  * reset it just in case.
683                                  */
684                                 write_pmc(i, 0);
685                         }
686                 }
687         }
688
689         /* PMM will keep counters frozen until we return from the interrupt. */
690         mtmsr(mfmsr() | MSR_PMM);
691         mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
692         isync();
693
694         if (nmi)
695                 nmi_exit();
696         else
697                 irq_exit();
698 }
699
700 void hw_perf_event_setup(int cpu)
701 {
702         struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
703
704         memset(cpuhw, 0, sizeof(*cpuhw));
705 }
706
707 int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
708 {
709         if (ppmu)
710                 return -EBUSY;          /* something's already registered */
711
712         ppmu = pmu;
713         pr_info("%s performance monitor hardware support registered\n",
714                 pmu->name);
715
716         perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
717
718         return 0;
719 }