Linux-libre 4.4.228-gnu
[librecmc/linux-libre.git] / kernel / events / hw_breakpoint.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 2 of the License, or
5  * (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15  *
16  * Copyright (C) 2007 Alan Stern
17  * Copyright (C) IBM Corporation, 2009
18  * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
19  *
20  * Thanks to Ingo Molnar for his many suggestions.
21  *
22  * Authors: Alan Stern <stern@rowland.harvard.edu>
23  *          K.Prasad <prasad@linux.vnet.ibm.com>
24  *          Frederic Weisbecker <fweisbec@gmail.com>
25  */
26
27 /*
28  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29  * using the CPU's debug registers.
30  * This file contains the arch-independent routines.
31  */
32
33 #include <linux/irqflags.h>
34 #include <linux/kallsyms.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/percpu.h>
41 #include <linux/sched.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/list.h>
45 #include <linux/cpu.h>
46 #include <linux/smp.h>
47
48 #include <linux/hw_breakpoint.h>
49 /*
50  * Constraints data
51  */
52 struct bp_cpuinfo {
53         /* Number of pinned cpu breakpoints in a cpu */
54         unsigned int    cpu_pinned;
55         /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
56         unsigned int    *tsk_pinned;
57         /* Number of non-pinned cpu/task breakpoints in a cpu */
58         unsigned int    flexible; /* XXX: placeholder, see fetch_this_slot() */
59 };
60
61 static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
62 static int nr_slots[TYPE_MAX];
63
64 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
65 {
66         return per_cpu_ptr(bp_cpuinfo + type, cpu);
67 }
68
69 /* Keep track of the breakpoints attached to tasks */
70 static LIST_HEAD(bp_task_head);
71
72 static int constraints_initialized;
73
74 /* Gather the number of total pinned and un-pinned bp in a cpuset */
75 struct bp_busy_slots {
76         unsigned int pinned;
77         unsigned int flexible;
78 };
79
80 /* Serialize accesses to the above constraints */
81 static DEFINE_MUTEX(nr_bp_mutex);
82
83 __weak int hw_breakpoint_weight(struct perf_event *bp)
84 {
85         return 1;
86 }
87
88 static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
89 {
90         if (bp->attr.bp_type & HW_BREAKPOINT_RW)
91                 return TYPE_DATA;
92
93         return TYPE_INST;
94 }
95
96 /*
97  * Report the maximum number of pinned breakpoints a task
98  * have in this cpu
99  */
100 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
101 {
102         unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
103         int i;
104
105         for (i = nr_slots[type] - 1; i >= 0; i--) {
106                 if (tsk_pinned[i] > 0)
107                         return i + 1;
108         }
109
110         return 0;
111 }
112
113 /*
114  * Count the number of breakpoints of the same type and same task.
115  * The given event must be not on the list.
116  */
117 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
118 {
119         struct task_struct *tsk = bp->hw.target;
120         struct perf_event *iter;
121         int count = 0;
122
123         list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
124                 if (iter->hw.target == tsk &&
125                     find_slot_idx(iter) == type &&
126                     (iter->cpu < 0 || cpu == iter->cpu))
127                         count += hw_breakpoint_weight(iter);
128         }
129
130         return count;
131 }
132
133 static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
134 {
135         if (bp->cpu >= 0)
136                 return cpumask_of(bp->cpu);
137         return cpu_possible_mask;
138 }
139
140 /*
141  * Report the number of pinned/un-pinned breakpoints we have in
142  * a given cpu (cpu > -1) or in all of them (cpu = -1).
143  */
144 static void
145 fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
146                     enum bp_type_idx type)
147 {
148         const struct cpumask *cpumask = cpumask_of_bp(bp);
149         int cpu;
150
151         for_each_cpu(cpu, cpumask) {
152                 struct bp_cpuinfo *info = get_bp_info(cpu, type);
153                 int nr;
154
155                 nr = info->cpu_pinned;
156                 if (!bp->hw.target)
157                         nr += max_task_bp_pinned(cpu, type);
158                 else
159                         nr += task_bp_pinned(cpu, bp, type);
160
161                 if (nr > slots->pinned)
162                         slots->pinned = nr;
163
164                 nr = info->flexible;
165                 if (nr > slots->flexible)
166                         slots->flexible = nr;
167         }
168 }
169
170 /*
171  * For now, continue to consider flexible as pinned, until we can
172  * ensure no flexible event can ever be scheduled before a pinned event
173  * in a same cpu.
174  */
175 static void
176 fetch_this_slot(struct bp_busy_slots *slots, int weight)
177 {
178         slots->pinned += weight;
179 }
180
181 /*
182  * Add a pinned breakpoint for the given task in our constraint table
183  */
184 static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
185                                 enum bp_type_idx type, int weight)
186 {
187         unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
188         int old_idx, new_idx;
189
190         old_idx = task_bp_pinned(cpu, bp, type) - 1;
191         new_idx = old_idx + weight;
192
193         if (old_idx >= 0)
194                 tsk_pinned[old_idx]--;
195         if (new_idx >= 0)
196                 tsk_pinned[new_idx]++;
197 }
198
199 /*
200  * Add/remove the given breakpoint in our constraint table
201  */
202 static void
203 toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
204                int weight)
205 {
206         const struct cpumask *cpumask = cpumask_of_bp(bp);
207         int cpu;
208
209         if (!enable)
210                 weight = -weight;
211
212         /* Pinned counter cpu profiling */
213         if (!bp->hw.target) {
214                 get_bp_info(bp->cpu, type)->cpu_pinned += weight;
215                 return;
216         }
217
218         /* Pinned counter task profiling */
219         for_each_cpu(cpu, cpumask)
220                 toggle_bp_task_slot(bp, cpu, type, weight);
221
222         if (enable)
223                 list_add_tail(&bp->hw.bp_list, &bp_task_head);
224         else
225                 list_del(&bp->hw.bp_list);
226 }
227
228 /*
229  * Function to perform processor-specific cleanup during unregistration
230  */
231 __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
232 {
233         /*
234          * A weak stub function here for those archs that don't define
235          * it inside arch/.../kernel/hw_breakpoint.c
236          */
237 }
238
239 /*
240  * Contraints to check before allowing this new breakpoint counter:
241  *
242  *  == Non-pinned counter == (Considered as pinned for now)
243  *
244  *   - If attached to a single cpu, check:
245  *
246  *       (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
247  *           + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
248  *
249  *       -> If there are already non-pinned counters in this cpu, it means
250  *          there is already a free slot for them.
251  *          Otherwise, we check that the maximum number of per task
252  *          breakpoints (for this cpu) plus the number of per cpu breakpoint
253  *          (for this cpu) doesn't cover every registers.
254  *
255  *   - If attached to every cpus, check:
256  *
257  *       (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
258  *           + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
259  *
260  *       -> This is roughly the same, except we check the number of per cpu
261  *          bp for every cpu and we keep the max one. Same for the per tasks
262  *          breakpoints.
263  *
264  *
265  * == Pinned counter ==
266  *
267  *   - If attached to a single cpu, check:
268  *
269  *       ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
270  *            + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
271  *
272  *       -> Same checks as before. But now the info->flexible, if any, must keep
273  *          one register at least (or they will never be fed).
274  *
275  *   - If attached to every cpus, check:
276  *
277  *       ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
278  *            + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
279  */
280 static int __reserve_bp_slot(struct perf_event *bp)
281 {
282         struct bp_busy_slots slots = {0};
283         enum bp_type_idx type;
284         int weight;
285
286         /* We couldn't initialize breakpoint constraints on boot */
287         if (!constraints_initialized)
288                 return -ENOMEM;
289
290         /* Basic checks */
291         if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
292             bp->attr.bp_type == HW_BREAKPOINT_INVALID)
293                 return -EINVAL;
294
295         type = find_slot_idx(bp);
296         weight = hw_breakpoint_weight(bp);
297
298         fetch_bp_busy_slots(&slots, bp, type);
299         /*
300          * Simulate the addition of this breakpoint to the constraints
301          * and see the result.
302          */
303         fetch_this_slot(&slots, weight);
304
305         /* Flexible counters need to keep at least one slot */
306         if (slots.pinned + (!!slots.flexible) > nr_slots[type])
307                 return -ENOSPC;
308
309         toggle_bp_slot(bp, true, type, weight);
310
311         return 0;
312 }
313
314 int reserve_bp_slot(struct perf_event *bp)
315 {
316         int ret;
317
318         mutex_lock(&nr_bp_mutex);
319
320         ret = __reserve_bp_slot(bp);
321
322         mutex_unlock(&nr_bp_mutex);
323
324         return ret;
325 }
326
327 static void __release_bp_slot(struct perf_event *bp)
328 {
329         enum bp_type_idx type;
330         int weight;
331
332         type = find_slot_idx(bp);
333         weight = hw_breakpoint_weight(bp);
334         toggle_bp_slot(bp, false, type, weight);
335 }
336
337 void release_bp_slot(struct perf_event *bp)
338 {
339         mutex_lock(&nr_bp_mutex);
340
341         arch_unregister_hw_breakpoint(bp);
342         __release_bp_slot(bp);
343
344         mutex_unlock(&nr_bp_mutex);
345 }
346
347 /*
348  * Allow the kernel debugger to reserve breakpoint slots without
349  * taking a lock using the dbg_* variant of for the reserve and
350  * release breakpoint slots.
351  */
352 int dbg_reserve_bp_slot(struct perf_event *bp)
353 {
354         if (mutex_is_locked(&nr_bp_mutex))
355                 return -1;
356
357         return __reserve_bp_slot(bp);
358 }
359
360 int dbg_release_bp_slot(struct perf_event *bp)
361 {
362         if (mutex_is_locked(&nr_bp_mutex))
363                 return -1;
364
365         __release_bp_slot(bp);
366
367         return 0;
368 }
369
370 static int validate_hw_breakpoint(struct perf_event *bp)
371 {
372         int ret;
373
374         ret = arch_validate_hwbkpt_settings(bp);
375         if (ret)
376                 return ret;
377
378         if (arch_check_bp_in_kernelspace(bp)) {
379                 if (bp->attr.exclude_kernel)
380                         return -EINVAL;
381                 /*
382                  * Don't let unprivileged users set a breakpoint in the trap
383                  * path to avoid trap recursion attacks.
384                  */
385                 if (!capable(CAP_SYS_ADMIN))
386                         return -EPERM;
387         }
388
389         return 0;
390 }
391
392 int register_perf_hw_breakpoint(struct perf_event *bp)
393 {
394         int ret;
395
396         ret = reserve_bp_slot(bp);
397         if (ret)
398                 return ret;
399
400         ret = validate_hw_breakpoint(bp);
401
402         /* if arch_validate_hwbkpt_settings() fails then release bp slot */
403         if (ret)
404                 release_bp_slot(bp);
405
406         return ret;
407 }
408
409 /**
410  * register_user_hw_breakpoint - register a hardware breakpoint for user space
411  * @attr: breakpoint attributes
412  * @triggered: callback to trigger when we hit the breakpoint
413  * @tsk: pointer to 'task_struct' of the process to which the address belongs
414  */
415 struct perf_event *
416 register_user_hw_breakpoint(struct perf_event_attr *attr,
417                             perf_overflow_handler_t triggered,
418                             void *context,
419                             struct task_struct *tsk)
420 {
421         return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
422                                                 context);
423 }
424 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
425
426 /**
427  * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
428  * @bp: the breakpoint structure to modify
429  * @attr: new breakpoint attributes
430  */
431 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
432 {
433         /*
434          * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
435          * will not be possible to raise IPIs that invoke __perf_event_disable.
436          * So call the function directly after making sure we are targeting the
437          * current task.
438          */
439         if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
440                 __perf_event_disable(bp);
441         else
442                 perf_event_disable(bp);
443
444         bp->attr.bp_addr = attr->bp_addr;
445         bp->attr.bp_type = attr->bp_type;
446         bp->attr.bp_len = attr->bp_len;
447         bp->attr.disabled = 1;
448
449         if (!attr->disabled) {
450                 int err = validate_hw_breakpoint(bp);
451
452                 if (err)
453                         return err;
454
455                 perf_event_enable(bp);
456                 bp->attr.disabled = 0;
457         }
458
459         return 0;
460 }
461 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
462
463 /**
464  * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
465  * @bp: the breakpoint structure to unregister
466  */
467 void unregister_hw_breakpoint(struct perf_event *bp)
468 {
469         if (!bp)
470                 return;
471         perf_event_release_kernel(bp);
472 }
473 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
474
475 /**
476  * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
477  * @attr: breakpoint attributes
478  * @triggered: callback to trigger when we hit the breakpoint
479  *
480  * @return a set of per_cpu pointers to perf events
481  */
482 struct perf_event * __percpu *
483 register_wide_hw_breakpoint(struct perf_event_attr *attr,
484                             perf_overflow_handler_t triggered,
485                             void *context)
486 {
487         struct perf_event * __percpu *cpu_events, *bp;
488         long err = 0;
489         int cpu;
490
491         cpu_events = alloc_percpu(typeof(*cpu_events));
492         if (!cpu_events)
493                 return (void __percpu __force *)ERR_PTR(-ENOMEM);
494
495         get_online_cpus();
496         for_each_online_cpu(cpu) {
497                 bp = perf_event_create_kernel_counter(attr, cpu, NULL,
498                                                       triggered, context);
499                 if (IS_ERR(bp)) {
500                         err = PTR_ERR(bp);
501                         break;
502                 }
503
504                 per_cpu(*cpu_events, cpu) = bp;
505         }
506         put_online_cpus();
507
508         if (likely(!err))
509                 return cpu_events;
510
511         unregister_wide_hw_breakpoint(cpu_events);
512         return (void __percpu __force *)ERR_PTR(err);
513 }
514 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
515
516 /**
517  * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
518  * @cpu_events: the per cpu set of events to unregister
519  */
520 void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
521 {
522         int cpu;
523
524         for_each_possible_cpu(cpu)
525                 unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
526
527         free_percpu(cpu_events);
528 }
529 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
530
531 static struct notifier_block hw_breakpoint_exceptions_nb = {
532         .notifier_call = hw_breakpoint_exceptions_notify,
533         /* we need to be notified first */
534         .priority = 0x7fffffff
535 };
536
537 static void bp_perf_event_destroy(struct perf_event *event)
538 {
539         release_bp_slot(event);
540 }
541
542 static int hw_breakpoint_event_init(struct perf_event *bp)
543 {
544         int err;
545
546         if (bp->attr.type != PERF_TYPE_BREAKPOINT)
547                 return -ENOENT;
548
549         /*
550          * no branch sampling for breakpoint events
551          */
552         if (has_branch_stack(bp))
553                 return -EOPNOTSUPP;
554
555         err = register_perf_hw_breakpoint(bp);
556         if (err)
557                 return err;
558
559         bp->destroy = bp_perf_event_destroy;
560
561         return 0;
562 }
563
564 static int hw_breakpoint_add(struct perf_event *bp, int flags)
565 {
566         if (!(flags & PERF_EF_START))
567                 bp->hw.state = PERF_HES_STOPPED;
568
569         if (is_sampling_event(bp)) {
570                 bp->hw.last_period = bp->hw.sample_period;
571                 perf_swevent_set_period(bp);
572         }
573
574         return arch_install_hw_breakpoint(bp);
575 }
576
577 static void hw_breakpoint_del(struct perf_event *bp, int flags)
578 {
579         arch_uninstall_hw_breakpoint(bp);
580 }
581
582 static void hw_breakpoint_start(struct perf_event *bp, int flags)
583 {
584         bp->hw.state = 0;
585 }
586
587 static void hw_breakpoint_stop(struct perf_event *bp, int flags)
588 {
589         bp->hw.state = PERF_HES_STOPPED;
590 }
591
592 static struct pmu perf_breakpoint = {
593         .task_ctx_nr    = perf_sw_context, /* could eventually get its own */
594
595         .event_init     = hw_breakpoint_event_init,
596         .add            = hw_breakpoint_add,
597         .del            = hw_breakpoint_del,
598         .start          = hw_breakpoint_start,
599         .stop           = hw_breakpoint_stop,
600         .read           = hw_breakpoint_pmu_read,
601 };
602
603 int __init init_hw_breakpoint(void)
604 {
605         int cpu, err_cpu;
606         int i;
607
608         for (i = 0; i < TYPE_MAX; i++)
609                 nr_slots[i] = hw_breakpoint_slots(i);
610
611         for_each_possible_cpu(cpu) {
612                 for (i = 0; i < TYPE_MAX; i++) {
613                         struct bp_cpuinfo *info = get_bp_info(cpu, i);
614
615                         info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int),
616                                                         GFP_KERNEL);
617                         if (!info->tsk_pinned)
618                                 goto err_alloc;
619                 }
620         }
621
622         constraints_initialized = 1;
623
624         perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
625
626         return register_die_notifier(&hw_breakpoint_exceptions_nb);
627
628  err_alloc:
629         for_each_possible_cpu(err_cpu) {
630                 for (i = 0; i < TYPE_MAX; i++)
631                         kfree(get_bp_info(err_cpu, i)->tsk_pinned);
632                 if (err_cpu == cpu)
633                         break;
634         }
635
636         return -ENOMEM;
637 }
638
639