2 #ifndef _LINUX_KERNEL_TRACE_H
3 #define _LINUX_KERNEL_TRACE_H
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/clocksource.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/tracepoint.h>
12 #include <linux/ftrace.h>
13 #include <linux/hw_breakpoint.h>
14 #include <linux/trace_seq.h>
15 #include <linux/trace_events.h>
16 #include <linux/compiler.h>
17 #include <linux/trace_seq.h>
19 #ifdef CONFIG_FTRACE_SYSCALLS
20 #include <asm/unistd.h> /* For NR_SYSCALLS */
21 #include <asm/syscall.h> /* some archs define it here */
25 __TRACE_FIRST_TYPE = 0,
48 #define __field(type, item) type item;
51 #define __field_struct(type, item) __field(type, item)
54 #define __field_desc(type, container, item)
57 #define __array(type, item, size) type item[size];
60 #define __array_desc(type, container, item, size)
62 #undef __dynamic_array
63 #define __dynamic_array(type, item) type item[];
66 #define F_STRUCT(args...) args
69 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
70 struct struct_name { \
71 struct trace_entry ent; \
75 #undef FTRACE_ENTRY_DUP
76 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
78 #undef FTRACE_ENTRY_REG
79 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
81 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
84 #undef FTRACE_ENTRY_PACKED
85 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
87 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
90 #include "trace_entries.h"
93 * syscalls are special, and need special handling, this is why
94 * they are not included in trace_entries.h
96 struct syscall_trace_enter {
97 struct trace_entry ent;
102 struct syscall_trace_exit {
103 struct trace_entry ent;
108 struct kprobe_trace_entry_head {
109 struct trace_entry ent;
113 struct kretprobe_trace_entry_head {
114 struct trace_entry ent;
116 unsigned long ret_ip;
120 * trace_flag_type is an enumeration that holds different
121 * states when a trace occurs. These are:
122 * IRQS_OFF - interrupts were disabled
123 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
124 * NEED_RESCHED - reschedule is requested
125 * HARDIRQ - inside an interrupt handler
126 * SOFTIRQ - inside a softirq handler
128 enum trace_flag_type {
129 TRACE_FLAG_IRQS_OFF = 0x01,
130 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
131 TRACE_FLAG_NEED_RESCHED = 0x04,
132 TRACE_FLAG_HARDIRQ = 0x08,
133 TRACE_FLAG_SOFTIRQ = 0x10,
134 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
135 TRACE_FLAG_NMI = 0x40,
138 #define TRACE_BUF_SIZE 1024
143 * The CPU trace array - it consists of thousands of trace entries
144 * plus some other descriptor data: (for example which task started
147 struct trace_array_cpu {
149 void *buffer_page; /* ring buffer spare */
151 unsigned long entries;
152 unsigned long saved_latency;
153 unsigned long critical_start;
154 unsigned long critical_end;
155 unsigned long critical_sequence;
157 unsigned long policy;
158 unsigned long rt_priority;
159 unsigned long skipped_entries;
160 cycle_t preempt_timestamp;
163 char comm[TASK_COMM_LEN];
166 #ifdef CONFIG_FUNCTION_TRACER
167 bool ftrace_ignore_pid;
172 struct trace_option_dentry;
174 struct trace_buffer {
175 struct trace_array *tr;
176 struct ring_buffer *buffer;
177 struct trace_array_cpu __percpu *data;
182 #define TRACE_FLAGS_MAX_SIZE 32
184 struct trace_options {
185 struct tracer *tracer;
186 struct trace_option_dentry *topts;
189 struct trace_pid_list {
195 * The trace array - an array of per-CPU trace arrays. This is the
196 * highest level data structure that individual tracers deal with.
197 * They have on/off state as well:
200 struct list_head list;
202 struct trace_buffer trace_buffer;
203 #ifdef CONFIG_TRACER_MAX_TRACE
205 * The max_buffer is used to snapshot the trace when a maximum
206 * latency is reached, or when the user initiates a snapshot.
207 * Some tracers will use this to store a maximum trace while
208 * it continues examining live traces.
210 * The buffers for the max_buffer are set up the same as the trace_buffer
211 * When a snapshot is taken, the buffer of the max_buffer is swapped
212 * with the buffer of the trace_buffer and the buffers are reset for
213 * the trace_buffer so the tracing can continue.
215 struct trace_buffer max_buffer;
216 bool allocated_snapshot;
218 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
219 unsigned long max_latency;
221 struct trace_pid_list __rcu *filtered_pids;
223 * max_lock is used to protect the swapping of buffers
224 * when taking a max snapshot. The buffers themselves are
225 * protected by per_cpu spinlocks. But the action of the swap
226 * needs its own lock.
228 * This is defined as a arch_spinlock_t in order to help
229 * with performance when lockdep debugging is enabled.
231 * It is also used in other places outside the update_max_tr
232 * so it needs to be defined outside of the
233 * CONFIG_TRACER_MAX_TRACE.
235 arch_spinlock_t max_lock;
237 #ifdef CONFIG_FTRACE_SYSCALLS
238 int sys_refcount_enter;
239 int sys_refcount_exit;
240 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
241 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
246 struct tracer *current_trace;
247 unsigned int trace_flags;
248 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
250 raw_spinlock_t start_lock;
252 struct dentry *options;
253 struct dentry *percpu_dir;
254 struct dentry *event_dir;
255 struct trace_options *topts;
256 struct list_head systems;
257 struct list_head events;
258 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
260 #ifdef CONFIG_FUNCTION_TRACER
261 struct ftrace_ops *ops;
262 struct trace_pid_list __rcu *function_pids;
263 /* function tracing enabled */
264 int function_enabled;
269 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
272 extern struct list_head ftrace_trace_arrays;
274 extern struct mutex trace_types_lock;
276 extern int trace_array_get(struct trace_array *tr);
277 extern void trace_array_put(struct trace_array *tr);
280 * The global tracer (top) should be the first trace array added,
281 * but we check the flag anyway.
283 static inline struct trace_array *top_trace_array(void)
285 struct trace_array *tr;
287 if (list_empty(&ftrace_trace_arrays))
290 tr = list_entry(ftrace_trace_arrays.prev,
292 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
296 #define FTRACE_CMP_TYPE(var, type) \
297 __builtin_types_compatible_p(typeof(var), type *)
300 #define IF_ASSIGN(var, entry, etype, id) \
301 if (FTRACE_CMP_TYPE(var, etype)) { \
302 var = (typeof(var))(entry); \
303 WARN_ON(id && (entry)->type != id); \
307 /* Will cause compile errors if type is not found. */
308 extern void __ftrace_bad_type(void);
311 * The trace_assign_type is a verifier that the entry type is
312 * the same as the type being assigned. To add new types simply
313 * add a line with the following format:
315 * IF_ASSIGN(var, ent, type, id);
317 * Where "type" is the trace type that includes the trace_entry
318 * as the "ent" item. And "id" is the trace identifier that is
319 * used in the trace_type enum.
321 * If the type can have more than one id, then use zero.
323 #define trace_assign_type(var, ent) \
325 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
326 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
327 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
328 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
329 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
330 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
331 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
332 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
333 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
335 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
337 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
338 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
340 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
342 __ftrace_bad_type(); \
346 * An option specific to a tracer. This is a boolean value.
347 * The bit is the bit index that sets its value on the
348 * flags value in struct tracer_flags.
351 const char *name; /* Will appear on the trace_options file */
352 u32 bit; /* Mask assigned in val field in tracer_flags */
356 * The set of specific options for a tracer. Your tracer
357 * have to set the initial value of the flags val.
359 struct tracer_flags {
361 struct tracer_opt *opts;
362 struct tracer *trace;
365 /* Makes more easy to define a tracer opt */
366 #define TRACER_OPT(s, b) .name = #s, .bit = b
369 struct trace_option_dentry {
370 struct tracer_opt *opt;
371 struct tracer_flags *flags;
372 struct trace_array *tr;
373 struct dentry *entry;
377 * struct tracer - a specific tracer and its callbacks to interact with tracefs
378 * @name: the name chosen to select it on the available_tracers file
379 * @init: called when one switches to this tracer (echo name > current_tracer)
380 * @reset: called when one switches to another tracer
381 * @start: called when tracing is unpaused (echo 1 > tracing_on)
382 * @stop: called when tracing is paused (echo 0 > tracing_on)
383 * @update_thresh: called when tracing_thresh is updated
384 * @open: called when the trace file is opened
385 * @pipe_open: called when the trace_pipe file is opened
386 * @close: called when the trace file is released
387 * @pipe_close: called when the trace_pipe file is released
388 * @read: override the default read callback on trace_pipe
389 * @splice_read: override the default splice_read callback on trace_pipe
390 * @selftest: selftest to run on boot (see trace_selftest.c)
391 * @print_headers: override the first lines that describe your columns
392 * @print_line: callback that prints a trace
393 * @set_flag: signals one of your private flags changed (trace_options file)
394 * @flags: your private flags
398 int (*init)(struct trace_array *tr);
399 void (*reset)(struct trace_array *tr);
400 void (*start)(struct trace_array *tr);
401 void (*stop)(struct trace_array *tr);
402 int (*update_thresh)(struct trace_array *tr);
403 void (*open)(struct trace_iterator *iter);
404 void (*pipe_open)(struct trace_iterator *iter);
405 void (*close)(struct trace_iterator *iter);
406 void (*pipe_close)(struct trace_iterator *iter);
407 ssize_t (*read)(struct trace_iterator *iter,
408 struct file *filp, char __user *ubuf,
409 size_t cnt, loff_t *ppos);
410 ssize_t (*splice_read)(struct trace_iterator *iter,
413 struct pipe_inode_info *pipe,
416 #ifdef CONFIG_FTRACE_STARTUP_TEST
417 int (*selftest)(struct tracer *trace,
418 struct trace_array *tr);
420 void (*print_header)(struct seq_file *m);
421 enum print_line_t (*print_line)(struct trace_iterator *iter);
422 /* If you handled the flag setting, return 0 */
423 int (*set_flag)(struct trace_array *tr,
424 u32 old_flags, u32 bit, int set);
425 /* Return 0 if OK with change, else return non-zero */
426 int (*flag_changed)(struct trace_array *tr,
429 struct tracer_flags *flags;
433 bool allow_instances;
434 #ifdef CONFIG_TRACER_MAX_TRACE
440 /* Only current can touch trace_recursion */
443 * For function tracing recursion:
444 * The order of these bits are important.
446 * When function tracing occurs, the following steps are made:
447 * If arch does not support a ftrace feature:
448 * call internal function (uses INTERNAL bits) which calls...
449 * If callback is registered to the "global" list, the list
450 * function is called and recursion checks the GLOBAL bits.
451 * then this function calls...
452 * The function callback, which can use the FTRACE bits to
453 * check for recursion.
455 * Now if the arch does not suppport a feature, and it calls
456 * the global list function which calls the ftrace callback
457 * all three of these steps will do a recursion protection.
458 * There's no reason to do one if the previous caller already
459 * did. The recursion that we are protecting against will
460 * go through the same steps again.
462 * To prevent the multiple recursion checks, if a recursion
463 * bit is set that is higher than the MAX bit of the current
464 * check, then we know that the check was made by the previous
465 * caller, and we can skip the current check.
469 TRACE_BUFFER_NMI_BIT,
470 TRACE_BUFFER_IRQ_BIT,
471 TRACE_BUFFER_SIRQ_BIT,
473 /* Start of function recursion bits */
475 TRACE_FTRACE_NMI_BIT,
476 TRACE_FTRACE_IRQ_BIT,
477 TRACE_FTRACE_SIRQ_BIT,
479 /* INTERNAL_BITs must be greater than FTRACE_BITs */
481 TRACE_INTERNAL_NMI_BIT,
482 TRACE_INTERNAL_IRQ_BIT,
483 TRACE_INTERNAL_SIRQ_BIT,
487 * Abuse of the trace_recursion.
488 * As we need a way to maintain state if we are tracing the function
489 * graph in irq because we want to trace a particular function that
490 * was called in irq context but we have irq tracing off. Since this
491 * can only be modified by current, we can reuse trace_recursion.
496 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
497 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
498 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
500 #define TRACE_CONTEXT_BITS 4
502 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
503 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
505 #define TRACE_LIST_START TRACE_INTERNAL_BIT
506 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
508 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
510 static __always_inline int trace_get_context_bit(void)
514 if (in_interrupt()) {
528 static __always_inline int trace_test_and_set_recursion(int start, int max)
530 unsigned int val = current->trace_recursion;
533 /* A previous recursion check was made */
534 if ((val & TRACE_CONTEXT_MASK) > max)
537 bit = trace_get_context_bit() + start;
538 if (unlikely(val & (1 << bit)))
542 current->trace_recursion = val;
548 static __always_inline void trace_clear_recursion(int bit)
550 unsigned int val = current->trace_recursion;
559 current->trace_recursion = val;
562 static inline struct ring_buffer_iter *
563 trace_buffer_iter(struct trace_iterator *iter, int cpu)
565 if (iter->buffer_iter && iter->buffer_iter[cpu])
566 return iter->buffer_iter[cpu];
570 int tracer_init(struct tracer *t, struct trace_array *tr);
571 int tracing_is_enabled(void);
572 void tracing_reset(struct trace_buffer *buf, int cpu);
573 void tracing_reset_online_cpus(struct trace_buffer *buf);
574 void tracing_reset_current(int cpu);
575 void tracing_reset_all_online_cpus(void);
576 int tracing_open_generic(struct inode *inode, struct file *filp);
577 bool tracing_is_disabled(void);
578 int tracer_tracing_is_on(struct trace_array *tr);
579 struct dentry *trace_create_file(const char *name,
581 struct dentry *parent,
583 const struct file_operations *fops);
585 struct dentry *tracing_init_dentry(void);
587 struct ring_buffer_event;
589 struct ring_buffer_event *
590 trace_buffer_lock_reserve(struct ring_buffer *buffer,
596 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
597 struct trace_array_cpu *data);
599 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
600 int *ent_cpu, u64 *ent_ts);
602 void __buffer_unlock_commit(struct ring_buffer *buffer,
603 struct ring_buffer_event *event);
605 int trace_empty(struct trace_iterator *iter);
607 void *trace_find_next_entry_inc(struct trace_iterator *iter);
609 void trace_init_global_iter(struct trace_iterator *iter);
611 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
613 void trace_function(struct trace_array *tr,
615 unsigned long parent_ip,
616 unsigned long flags, int pc);
617 void trace_graph_function(struct trace_array *tr,
619 unsigned long parent_ip,
620 unsigned long flags, int pc);
621 void trace_latency_header(struct seq_file *m);
622 void trace_default_header(struct seq_file *m);
623 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
624 int trace_empty(struct trace_iterator *iter);
626 void trace_graph_return(struct ftrace_graph_ret *trace);
627 int trace_graph_entry(struct ftrace_graph_ent *trace);
628 void set_graph_array(struct trace_array *tr);
630 void tracing_start_cmdline_record(void);
631 void tracing_stop_cmdline_record(void);
632 int register_tracer(struct tracer *type);
633 int is_tracing_stopped(void);
635 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
637 extern cpumask_var_t __read_mostly tracing_buffer_mask;
639 #define for_each_tracing_cpu(cpu) \
640 for_each_cpu(cpu, tracing_buffer_mask)
642 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
644 extern unsigned long tracing_thresh;
650 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
652 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
653 struct task_struct *task);
654 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
655 struct task_struct *self,
656 struct task_struct *task);
657 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
658 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
659 int trace_pid_show(struct seq_file *m, void *v);
660 void trace_free_pid_list(struct trace_pid_list *pid_list);
661 int trace_pid_write(struct trace_pid_list *filtered_pids,
662 struct trace_pid_list **new_pid_list,
663 const char __user *ubuf, size_t cnt);
665 #ifdef CONFIG_TRACER_MAX_TRACE
666 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
667 void update_max_tr_single(struct trace_array *tr,
668 struct task_struct *tsk, int cpu);
669 #endif /* CONFIG_TRACER_MAX_TRACE */
671 #ifdef CONFIG_STACKTRACE
672 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
675 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
678 static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
679 unsigned long flags, int pc)
683 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
687 #endif /* CONFIG_STACKTRACE */
689 extern cycle_t ftrace_now(int cpu);
691 extern void trace_find_cmdline(int pid, char comm[]);
692 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
694 #ifdef CONFIG_DYNAMIC_FTRACE
695 extern unsigned long ftrace_update_tot_cnt;
697 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
698 extern int DYN_FTRACE_TEST_NAME(void);
699 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
700 extern int DYN_FTRACE_TEST_NAME2(void);
702 extern bool ring_buffer_expanded;
703 extern bool tracing_selftest_disabled;
705 #ifdef CONFIG_FTRACE_STARTUP_TEST
706 extern int trace_selftest_startup_function(struct tracer *trace,
707 struct trace_array *tr);
708 extern int trace_selftest_startup_function_graph(struct tracer *trace,
709 struct trace_array *tr);
710 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
711 struct trace_array *tr);
712 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
713 struct trace_array *tr);
714 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
715 struct trace_array *tr);
716 extern int trace_selftest_startup_wakeup(struct tracer *trace,
717 struct trace_array *tr);
718 extern int trace_selftest_startup_nop(struct tracer *trace,
719 struct trace_array *tr);
720 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
721 struct trace_array *tr);
722 extern int trace_selftest_startup_branch(struct tracer *trace,
723 struct trace_array *tr);
725 * Tracer data references selftest functions that only occur
726 * on boot up. These can be __init functions. Thus, when selftests
727 * are enabled, then the tracers need to reference __init functions.
729 #define __tracer_data __refdata
731 /* Tracers are seldom changed. Optimize when selftests are disabled. */
732 #define __tracer_data __read_mostly
733 #endif /* CONFIG_FTRACE_STARTUP_TEST */
735 extern void *head_page(struct trace_array_cpu *data);
736 extern unsigned long long ns2usecs(cycle_t nsec);
738 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
740 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
742 trace_array_vprintk(struct trace_array *tr,
743 unsigned long ip, const char *fmt, va_list args);
744 int trace_array_printk(struct trace_array *tr,
745 unsigned long ip, const char *fmt, ...);
746 int trace_array_printk_buf(struct ring_buffer *buffer,
747 unsigned long ip, const char *fmt, ...);
748 void trace_printk_seq(struct trace_seq *s);
749 enum print_line_t print_trace_line(struct trace_iterator *iter);
751 extern char trace_find_mark(unsigned long long duration);
753 /* Standard output formatting function used for function return traces */
754 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
757 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
758 #define TRACE_GRAPH_PRINT_CPU 0x2
759 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
760 #define TRACE_GRAPH_PRINT_PROC 0x8
761 #define TRACE_GRAPH_PRINT_DURATION 0x10
762 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
763 #define TRACE_GRAPH_PRINT_IRQS 0x40
764 #define TRACE_GRAPH_PRINT_TAIL 0x80
765 #define TRACE_GRAPH_SLEEP_TIME 0x100
766 #define TRACE_GRAPH_GRAPH_TIME 0x200
767 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
768 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
770 extern void ftrace_graph_sleep_time_control(bool enable);
771 extern void ftrace_graph_graph_time_control(bool enable);
773 extern enum print_line_t
774 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
775 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
777 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
778 extern void graph_trace_open(struct trace_iterator *iter);
779 extern void graph_trace_close(struct trace_iterator *iter);
780 extern int __trace_graph_entry(struct trace_array *tr,
781 struct ftrace_graph_ent *trace,
782 unsigned long flags, int pc);
783 extern void __trace_graph_return(struct trace_array *tr,
784 struct ftrace_graph_ret *trace,
785 unsigned long flags, int pc);
788 #ifdef CONFIG_DYNAMIC_FTRACE
789 /* TODO: make this variable */
790 #define FTRACE_GRAPH_MAX_FUNCS 32
791 extern int ftrace_graph_count;
792 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
793 extern int ftrace_graph_notrace_count;
794 extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
796 static inline int ftrace_graph_addr(unsigned long addr)
800 if (!ftrace_graph_count)
803 for (i = 0; i < ftrace_graph_count; i++) {
804 if (addr == ftrace_graph_funcs[i]) {
806 * If no irqs are to be traced, but a set_graph_function
807 * is set, and called by an interrupt handler, we still
811 trace_recursion_set(TRACE_IRQ_BIT);
813 trace_recursion_clear(TRACE_IRQ_BIT);
821 static inline int ftrace_graph_notrace_addr(unsigned long addr)
825 if (!ftrace_graph_notrace_count)
828 for (i = 0; i < ftrace_graph_notrace_count; i++) {
829 if (addr == ftrace_graph_notrace_funcs[i])
836 static inline int ftrace_graph_addr(unsigned long addr)
841 static inline int ftrace_graph_notrace_addr(unsigned long addr)
845 #endif /* CONFIG_DYNAMIC_FTRACE */
846 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
847 static inline enum print_line_t
848 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
850 return TRACE_TYPE_UNHANDLED;
852 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
854 extern struct list_head ftrace_pids;
856 #ifdef CONFIG_FUNCTION_TRACER
857 extern bool ftrace_filter_param __initdata;
858 static inline int ftrace_trace_task(struct trace_array *tr)
860 return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
862 extern int ftrace_is_dead(void);
863 int ftrace_create_function_files(struct trace_array *tr,
864 struct dentry *parent);
865 void ftrace_destroy_function_files(struct trace_array *tr);
866 void ftrace_init_global_array_ops(struct trace_array *tr);
867 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
868 void ftrace_reset_array_ops(struct trace_array *tr);
869 int using_ftrace_ops_list_func(void);
870 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
871 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
872 struct dentry *d_tracer);
873 void ftrace_clear_pids(struct trace_array *tr);
875 static inline int ftrace_trace_task(struct trace_array *tr)
879 static inline int ftrace_is_dead(void) { return 0; }
881 ftrace_create_function_files(struct trace_array *tr,
882 struct dentry *parent)
886 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
887 static inline __init void
888 ftrace_init_global_array_ops(struct trace_array *tr) { }
889 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
890 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
891 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
892 static inline void ftrace_clear_pids(struct trace_array *tr) { }
893 /* ftace_func_t type is not defined, use macro instead of static inline */
894 #define ftrace_init_array_ops(tr, func) do { } while (0)
895 #endif /* CONFIG_FUNCTION_TRACER */
897 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
898 void ftrace_create_filter_files(struct ftrace_ops *ops,
899 struct dentry *parent);
900 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
903 * The ops parameter passed in is usually undefined.
904 * This must be a macro.
906 #define ftrace_create_filter_files(ops, parent) do { } while (0)
907 #define ftrace_destroy_filter_files(ops) do { } while (0)
908 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
910 bool ftrace_event_is_function(struct trace_event_call *call);
913 * struct trace_parser - servers for reading the user input separated by spaces
914 * @cont: set if the input is not complete - no final space char was found
915 * @buffer: holds the parsed user input
916 * @idx: user input length
919 struct trace_parser {
926 static inline bool trace_parser_loaded(struct trace_parser *parser)
928 return (parser->idx != 0);
931 static inline bool trace_parser_cont(struct trace_parser *parser)
936 static inline void trace_parser_clear(struct trace_parser *parser)
938 parser->cont = false;
942 extern int trace_parser_get_init(struct trace_parser *parser, int size);
943 extern void trace_parser_put(struct trace_parser *parser);
944 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
945 size_t cnt, loff_t *ppos);
948 * Only create function graph options if function graph is configured.
950 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
951 # define FGRAPH_FLAGS \
952 C(DISPLAY_GRAPH, "display-graph"),
954 # define FGRAPH_FLAGS
957 #ifdef CONFIG_BRANCH_TRACER
958 # define BRANCH_FLAGS \
961 # define BRANCH_FLAGS
964 #ifdef CONFIG_FUNCTION_TRACER
965 # define FUNCTION_FLAGS \
966 C(FUNCTION, "function-trace"),
967 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
969 # define FUNCTION_FLAGS
970 # define FUNCTION_DEFAULT_FLAGS 0UL
973 #ifdef CONFIG_STACKTRACE
974 # define STACK_FLAGS \
975 C(STACKTRACE, "stacktrace"),
981 * trace_iterator_flags is an enumeration that defines bit
982 * positions into trace_flags that controls the output.
984 * NOTE: These bits must match the trace_options array in
985 * trace.c (this macro guarantees it).
987 #define TRACE_FLAGS \
988 C(PRINT_PARENT, "print-parent"), \
989 C(SYM_OFFSET, "sym-offset"), \
990 C(SYM_ADDR, "sym-addr"), \
991 C(VERBOSE, "verbose"), \
996 C(PRINTK, "trace_printk"), \
997 C(ANNOTATE, "annotate"), \
998 C(USERSTACKTRACE, "userstacktrace"), \
999 C(SYM_USEROBJ, "sym-userobj"), \
1000 C(PRINTK_MSGONLY, "printk-msg-only"), \
1001 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1002 C(LATENCY_FMT, "latency-format"), \
1003 C(RECORD_CMD, "record-cmd"), \
1004 C(OVERWRITE, "overwrite"), \
1005 C(STOP_ON_FREE, "disable_on_free"), \
1006 C(IRQ_INFO, "irq-info"), \
1007 C(MARKERS, "markers"), \
1008 C(EVENT_FORK, "event-fork"), \
1015 * By defining C, we can make TRACE_FLAGS a list of bit names
1016 * that will define the bits for the flag masks.
1019 #define C(a, b) TRACE_ITER_##a##_BIT
1021 enum trace_iterator_bits {
1023 /* Make sure we don't go more than we have bits for */
1028 * By redefining C, we can make TRACE_FLAGS a list of masks that
1029 * use the bits as defined above.
1032 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1034 enum trace_iterator_flags { TRACE_FLAGS };
1037 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1038 * control the output of kernel symbols.
1040 #define TRACE_ITER_SYM_MASK \
1041 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1043 extern struct tracer nop_trace;
1045 #ifdef CONFIG_BRANCH_TRACER
1046 extern int enable_branch_tracing(struct trace_array *tr);
1047 extern void disable_branch_tracing(void);
1048 static inline int trace_branch_enable(struct trace_array *tr)
1050 if (tr->trace_flags & TRACE_ITER_BRANCH)
1051 return enable_branch_tracing(tr);
1054 static inline void trace_branch_disable(void)
1056 /* due to races, always disable */
1057 disable_branch_tracing();
1060 static inline int trace_branch_enable(struct trace_array *tr)
1064 static inline void trace_branch_disable(void)
1067 #endif /* CONFIG_BRANCH_TRACER */
1069 /* set ring buffers to default size if not already done so */
1070 int tracing_update_buffers(void);
1072 struct ftrace_event_field {
1073 struct list_head link;
1082 struct event_filter {
1083 int n_preds; /* Number assigned */
1084 int a_preds; /* allocated */
1085 struct filter_pred *preds;
1086 struct filter_pred *root;
1087 char *filter_string;
1090 struct event_subsystem {
1091 struct list_head list;
1093 struct event_filter *filter;
1097 struct trace_subsystem_dir {
1098 struct list_head list;
1099 struct event_subsystem *subsystem;
1100 struct trace_array *tr;
1101 struct dentry *entry;
1106 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1107 struct ring_buffer *buffer,
1108 struct ring_buffer_event *event);
1110 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1111 struct ring_buffer *buffer,
1112 struct ring_buffer_event *event,
1113 unsigned long flags, int pc,
1114 struct pt_regs *regs);
1116 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1117 struct ring_buffer *buffer,
1118 struct ring_buffer_event *event,
1119 unsigned long flags, int pc)
1121 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1124 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1125 DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1126 void trace_buffered_event_disable(void);
1127 void trace_buffered_event_enable(void);
1130 __trace_event_discard_commit(struct ring_buffer *buffer,
1131 struct ring_buffer_event *event)
1133 if (this_cpu_read(trace_buffered_event) == event) {
1134 /* Simply release the temp buffer */
1135 this_cpu_dec(trace_buffered_event_cnt);
1138 ring_buffer_discard_commit(buffer, event);
1142 * Helper function for event_trigger_unlock_commit{_regs}().
1143 * If there are event triggers attached to this event that requires
1144 * filtering against its fields, then they wil be called as the
1145 * entry already holds the field information of the current event.
1147 * It also checks if the event should be discarded or not.
1148 * It is to be discarded if the event is soft disabled and the
1149 * event was only recorded to process triggers, or if the event
1150 * filter is active and this event did not match the filters.
1152 * Returns true if the event is discarded, false otherwise.
1155 __event_trigger_test_discard(struct trace_event_file *file,
1156 struct ring_buffer *buffer,
1157 struct ring_buffer_event *event,
1159 enum event_trigger_type *tt)
1161 unsigned long eflags = file->flags;
1163 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1164 *tt = event_triggers_call(file, entry);
1166 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1167 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1168 !filter_match_preds(file->filter, entry))) {
1169 __trace_event_discard_commit(buffer, event);
1177 * event_trigger_unlock_commit - handle triggers and finish event commit
1178 * @file: The file pointer assoctiated to the event
1179 * @buffer: The ring buffer that the event is being written to
1180 * @event: The event meta data in the ring buffer
1181 * @entry: The event itself
1182 * @irq_flags: The state of the interrupts at the start of the event
1183 * @pc: The state of the preempt count at the start of the event.
1185 * This is a helper function to handle triggers that require data
1186 * from the event itself. It also tests the event against filters and
1187 * if the event is soft disabled and should be discarded.
1190 event_trigger_unlock_commit(struct trace_event_file *file,
1191 struct ring_buffer *buffer,
1192 struct ring_buffer_event *event,
1193 void *entry, unsigned long irq_flags, int pc)
1195 enum event_trigger_type tt = ETT_NONE;
1197 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1198 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1201 event_triggers_post_call(file, tt, entry);
1205 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1206 * @file: The file pointer assoctiated to the event
1207 * @buffer: The ring buffer that the event is being written to
1208 * @event: The event meta data in the ring buffer
1209 * @entry: The event itself
1210 * @irq_flags: The state of the interrupts at the start of the event
1211 * @pc: The state of the preempt count at the start of the event.
1213 * This is a helper function to handle triggers that require data
1214 * from the event itself. It also tests the event against filters and
1215 * if the event is soft disabled and should be discarded.
1217 * Same as event_trigger_unlock_commit() but calls
1218 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1221 event_trigger_unlock_commit_regs(struct trace_event_file *file,
1222 struct ring_buffer *buffer,
1223 struct ring_buffer_event *event,
1224 void *entry, unsigned long irq_flags, int pc,
1225 struct pt_regs *regs)
1227 enum event_trigger_type tt = ETT_NONE;
1229 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1230 trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1231 irq_flags, pc, regs);
1234 event_triggers_post_call(file, tt, entry);
1237 #define FILTER_PRED_INVALID ((unsigned short)-1)
1238 #define FILTER_PRED_IS_RIGHT (1 << 15)
1239 #define FILTER_PRED_FOLD (1 << 15)
1242 * The max preds is the size of unsigned short with
1243 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1244 * and FOLD flags. The other is reserved.
1246 * 2^14 preds is way more than enough.
1248 #define MAX_FILTER_PRED 16384
1253 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1255 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1265 char pattern[MAX_FILTER_STR_VAL];
1268 regex_match_func match;
1271 struct filter_pred {
1272 filter_pred_fn_t fn;
1275 unsigned short *ops;
1276 struct ftrace_event_field *field;
1280 unsigned short index;
1281 unsigned short parent;
1282 unsigned short left;
1283 unsigned short right;
1286 static inline bool is_string_field(struct ftrace_event_field *field)
1288 return field->filter_type == FILTER_DYN_STRING ||
1289 field->filter_type == FILTER_STATIC_STRING ||
1290 field->filter_type == FILTER_PTR_STRING;
1293 static inline bool is_function_field(struct ftrace_event_field *field)
1295 return field->filter_type == FILTER_TRACE_FN;
1298 extern enum regex_type
1299 filter_parse_regex(char *buff, int len, char **search, int *not);
1300 extern void print_event_filter(struct trace_event_file *file,
1301 struct trace_seq *s);
1302 extern int apply_event_filter(struct trace_event_file *file,
1303 char *filter_string);
1304 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1305 char *filter_string);
1306 extern void print_subsystem_event_filter(struct event_subsystem *system,
1307 struct trace_seq *s);
1308 extern int filter_assign_type(const char *type);
1309 extern int create_event_filter(struct trace_event_call *call,
1310 char *filter_str, bool set_str,
1311 struct event_filter **filterp);
1312 extern void free_event_filter(struct event_filter *filter);
1314 struct ftrace_event_field *
1315 trace_find_event_field(struct trace_event_call *call, char *name);
1317 extern void trace_event_enable_cmd_record(bool enable);
1318 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1319 extern int event_trace_del_tracer(struct trace_array *tr);
1321 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1325 static inline void *event_file_data(struct file *filp)
1327 return ACCESS_ONCE(file_inode(filp)->i_private);
1330 extern struct mutex event_mutex;
1331 extern struct list_head ftrace_events;
1333 extern const struct file_operations event_trigger_fops;
1334 extern const struct file_operations event_hist_fops;
1336 #ifdef CONFIG_HIST_TRIGGERS
1337 extern int register_trigger_hist_cmd(void);
1338 extern int register_trigger_hist_enable_disable_cmds(void);
1340 static inline int register_trigger_hist_cmd(void) { return 0; }
1341 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1344 extern int register_trigger_cmds(void);
1345 extern void clear_event_triggers(struct trace_array *tr);
1347 struct event_trigger_data {
1348 unsigned long count;
1350 struct event_trigger_ops *ops;
1351 struct event_command *cmd_ops;
1352 struct event_filter __rcu *filter;
1357 struct list_head list;
1359 struct list_head named_list;
1360 struct event_trigger_data *named_data;
1364 #define ENABLE_EVENT_STR "enable_event"
1365 #define DISABLE_EVENT_STR "disable_event"
1366 #define ENABLE_HIST_STR "enable_hist"
1367 #define DISABLE_HIST_STR "disable_hist"
1369 struct enable_trigger_data {
1370 struct trace_event_file *file;
1375 extern int event_enable_trigger_print(struct seq_file *m,
1376 struct event_trigger_ops *ops,
1377 struct event_trigger_data *data);
1378 extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1379 struct event_trigger_data *data);
1380 extern int event_enable_trigger_func(struct event_command *cmd_ops,
1381 struct trace_event_file *file,
1382 char *glob, char *cmd, char *param);
1383 extern int event_enable_register_trigger(char *glob,
1384 struct event_trigger_ops *ops,
1385 struct event_trigger_data *data,
1386 struct trace_event_file *file);
1387 extern void event_enable_unregister_trigger(char *glob,
1388 struct event_trigger_ops *ops,
1389 struct event_trigger_data *test,
1390 struct trace_event_file *file);
1391 extern void trigger_data_free(struct event_trigger_data *data);
1392 extern int event_trigger_init(struct event_trigger_ops *ops,
1393 struct event_trigger_data *data);
1394 extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1395 int trigger_enable);
1396 extern void update_cond_flag(struct trace_event_file *file);
1397 extern void unregister_trigger(char *glob, struct event_trigger_ops *ops,
1398 struct event_trigger_data *test,
1399 struct trace_event_file *file);
1400 extern int set_trigger_filter(char *filter_str,
1401 struct event_trigger_data *trigger_data,
1402 struct trace_event_file *file);
1403 extern struct event_trigger_data *find_named_trigger(const char *name);
1404 extern bool is_named_trigger(struct event_trigger_data *test);
1405 extern int save_named_trigger(const char *name,
1406 struct event_trigger_data *data);
1407 extern void del_named_trigger(struct event_trigger_data *data);
1408 extern void pause_named_trigger(struct event_trigger_data *data);
1409 extern void unpause_named_trigger(struct event_trigger_data *data);
1410 extern void set_named_trigger_data(struct event_trigger_data *data,
1411 struct event_trigger_data *named_data);
1412 extern int register_event_command(struct event_command *cmd);
1413 extern int unregister_event_command(struct event_command *cmd);
1414 extern int register_trigger_hist_enable_disable_cmds(void);
1417 * struct event_trigger_ops - callbacks for trace event triggers
1419 * The methods in this structure provide per-event trigger hooks for
1420 * various trigger operations.
1422 * All the methods below, except for @init() and @free(), must be
1425 * @func: The trigger 'probe' function called when the triggering
1426 * event occurs. The data passed into this callback is the data
1427 * that was supplied to the event_command @reg() function that
1428 * registered the trigger (see struct event_command) along with
1429 * the trace record, rec.
1431 * @init: An optional initialization function called for the trigger
1432 * when the trigger is registered (via the event_command reg()
1433 * function). This can be used to perform per-trigger
1434 * initialization such as incrementing a per-trigger reference
1435 * count, for instance. This is usually implemented by the
1436 * generic utility function @event_trigger_init() (see
1437 * trace_event_triggers.c).
1439 * @free: An optional de-initialization function called for the
1440 * trigger when the trigger is unregistered (via the
1441 * event_command @reg() function). This can be used to perform
1442 * per-trigger de-initialization such as decrementing a
1443 * per-trigger reference count and freeing corresponding trigger
1444 * data, for instance. This is usually implemented by the
1445 * generic utility function @event_trigger_free() (see
1446 * trace_event_triggers.c).
1448 * @print: The callback function invoked to have the trigger print
1449 * itself. This is usually implemented by a wrapper function
1450 * that calls the generic utility function @event_trigger_print()
1451 * (see trace_event_triggers.c).
1453 struct event_trigger_ops {
1454 void (*func)(struct event_trigger_data *data,
1456 int (*init)(struct event_trigger_ops *ops,
1457 struct event_trigger_data *data);
1458 void (*free)(struct event_trigger_ops *ops,
1459 struct event_trigger_data *data);
1460 int (*print)(struct seq_file *m,
1461 struct event_trigger_ops *ops,
1462 struct event_trigger_data *data);
1466 * struct event_command - callbacks and data members for event commands
1468 * Event commands are invoked by users by writing the command name
1469 * into the 'trigger' file associated with a trace event. The
1470 * parameters associated with a specific invocation of an event
1471 * command are used to create an event trigger instance, which is
1472 * added to the list of trigger instances associated with that trace
1473 * event. When the event is hit, the set of triggers associated with
1474 * that event is invoked.
1476 * The data members in this structure provide per-event command data
1477 * for various event commands.
1479 * All the data members below, except for @post_trigger, must be set
1480 * for each event command.
1482 * @name: The unique name that identifies the event command. This is
1483 * the name used when setting triggers via trigger files.
1485 * @trigger_type: A unique id that identifies the event command
1486 * 'type'. This value has two purposes, the first to ensure that
1487 * only one trigger of the same type can be set at a given time
1488 * for a particular event e.g. it doesn't make sense to have both
1489 * a traceon and traceoff trigger attached to a single event at
1490 * the same time, so traceon and traceoff have the same type
1491 * though they have different names. The @trigger_type value is
1492 * also used as a bit value for deferring the actual trigger
1493 * action until after the current event is finished. Some
1494 * commands need to do this if they themselves log to the trace
1495 * buffer (see the @post_trigger() member below). @trigger_type
1496 * values are defined by adding new values to the trigger_type
1497 * enum in include/linux/trace_events.h.
1499 * @flags: See the enum event_command_flags below.
1501 * All the methods below, except for @set_filter() and @unreg_all(),
1502 * must be implemented.
1504 * @func: The callback function responsible for parsing and
1505 * registering the trigger written to the 'trigger' file by the
1506 * user. It allocates the trigger instance and registers it with
1507 * the appropriate trace event. It makes use of the other
1508 * event_command callback functions to orchestrate this, and is
1509 * usually implemented by the generic utility function
1510 * @event_trigger_callback() (see trace_event_triggers.c).
1512 * @reg: Adds the trigger to the list of triggers associated with the
1513 * event, and enables the event trigger itself, after
1514 * initializing it (via the event_trigger_ops @init() function).
1515 * This is also where commands can use the @trigger_type value to
1516 * make the decision as to whether or not multiple instances of
1517 * the trigger should be allowed. This is usually implemented by
1518 * the generic utility function @register_trigger() (see
1519 * trace_event_triggers.c).
1521 * @unreg: Removes the trigger from the list of triggers associated
1522 * with the event, and disables the event trigger itself, after
1523 * initializing it (via the event_trigger_ops @free() function).
1524 * This is usually implemented by the generic utility function
1525 * @unregister_trigger() (see trace_event_triggers.c).
1527 * @unreg_all: An optional function called to remove all the triggers
1528 * from the list of triggers associated with the event. Called
1529 * when a trigger file is opened in truncate mode.
1531 * @set_filter: An optional function called to parse and set a filter
1532 * for the trigger. If no @set_filter() method is set for the
1533 * event command, filters set by the user for the command will be
1534 * ignored. This is usually implemented by the generic utility
1535 * function @set_trigger_filter() (see trace_event_triggers.c).
1537 * @get_trigger_ops: The callback function invoked to retrieve the
1538 * event_trigger_ops implementation associated with the command.
1540 struct event_command {
1541 struct list_head list;
1543 enum event_trigger_type trigger_type;
1545 int (*func)(struct event_command *cmd_ops,
1546 struct trace_event_file *file,
1547 char *glob, char *cmd, char *params);
1548 int (*reg)(char *glob,
1549 struct event_trigger_ops *ops,
1550 struct event_trigger_data *data,
1551 struct trace_event_file *file);
1552 void (*unreg)(char *glob,
1553 struct event_trigger_ops *ops,
1554 struct event_trigger_data *data,
1555 struct trace_event_file *file);
1556 void (*unreg_all)(struct trace_event_file *file);
1557 int (*set_filter)(char *filter_str,
1558 struct event_trigger_data *data,
1559 struct trace_event_file *file);
1560 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1564 * enum event_command_flags - flags for struct event_command
1566 * @POST_TRIGGER: A flag that says whether or not this command needs
1567 * to have its action delayed until after the current event has
1568 * been closed. Some triggers need to avoid being invoked while
1569 * an event is currently in the process of being logged, since
1570 * the trigger may itself log data into the trace buffer. Thus
1571 * we make sure the current event is committed before invoking
1572 * those triggers. To do that, the trigger invocation is split
1573 * in two - the first part checks the filter using the current
1574 * trace record; if a command has the @post_trigger flag set, it
1575 * sets a bit for itself in the return value, otherwise it
1576 * directly invokes the trigger. Once all commands have been
1577 * either invoked or set their return flag, the current record is
1578 * either committed or discarded. At that point, if any commands
1579 * have deferred their triggers, those commands are finally
1580 * invoked following the close of the current event. In other
1581 * words, if the event_trigger_ops @func() probe implementation
1582 * itself logs to the trace buffer, this flag should be set,
1583 * otherwise it can be left unspecified.
1585 * @NEEDS_REC: A flag that says whether or not this command needs
1586 * access to the trace record in order to perform its function,
1587 * regardless of whether or not it has a filter associated with
1588 * it (filters make a trigger require access to the trace record
1589 * but are not always present).
1591 enum event_command_flags {
1592 EVENT_CMD_FL_POST_TRIGGER = 1,
1593 EVENT_CMD_FL_NEEDS_REC = 2,
1596 static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1598 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1601 static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1603 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1606 extern int trace_event_enable_disable(struct trace_event_file *file,
1607 int enable, int soft_disable);
1608 extern int tracing_alloc_snapshot(void);
1610 extern const char *__start___trace_bprintk_fmt[];
1611 extern const char *__stop___trace_bprintk_fmt[];
1613 extern const char *__start___tracepoint_str[];
1614 extern const char *__stop___tracepoint_str[];
1616 void trace_printk_control(bool enabled);
1617 void trace_printk_init_buffers(void);
1618 void trace_printk_start_comm(void);
1619 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1620 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1623 * Normal trace_printk() and friends allocates special buffers
1624 * to do the manipulation, as well as saves the print formats
1625 * into sections to display. But the trace infrastructure wants
1626 * to use these without the added overhead at the price of being
1627 * a bit slower (used mainly for warnings, where we don't care
1628 * about performance). The internal_trace_puts() is for such
1631 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1634 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1635 extern struct trace_event_call \
1636 __aligned(4) event_##call;
1637 #undef FTRACE_ENTRY_DUP
1638 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1639 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1641 #undef FTRACE_ENTRY_PACKED
1642 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1643 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1646 #include "trace_entries.h"
1648 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1649 int perf_ftrace_event_register(struct trace_event_call *call,
1650 enum trace_reg type, void *data);
1652 #define perf_ftrace_event_register NULL
1655 #ifdef CONFIG_FTRACE_SYSCALLS
1656 void init_ftrace_syscalls(void);
1657 const char *get_syscall_name(int syscall);
1659 static inline void init_ftrace_syscalls(void) { }
1660 static inline const char *get_syscall_name(int syscall)
1666 #ifdef CONFIG_EVENT_TRACING
1667 void trace_event_init(void);
1668 void trace_event_enum_update(struct trace_enum_map **map, int len);
1670 static inline void __init trace_event_init(void) { }
1671 static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
1674 extern struct trace_iterator *tracepoint_print_iter;
1676 #endif /* _LINUX_KERNEL_TRACE_H */