2 #ifndef _LINUX_KERNEL_TRACE_H
3 #define _LINUX_KERNEL_TRACE_H
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/clocksource.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/tracepoint.h>
12 #include <linux/ftrace.h>
13 #include <linux/hw_breakpoint.h>
14 #include <linux/trace_seq.h>
15 #include <linux/trace_events.h>
16 #include <linux/compiler.h>
17 #include <linux/trace_seq.h>
19 #ifdef CONFIG_FTRACE_SYSCALLS
20 #include <asm/unistd.h> /* For NR_SYSCALLS */
21 #include <asm/syscall.h> /* some archs define it here */
25 __TRACE_FIRST_TYPE = 0,
47 #define __field(type, item) type item;
50 #define __field_struct(type, item) __field(type, item)
53 #define __field_desc(type, container, item)
56 #define __array(type, item, size) type item[size];
59 #define __array_desc(type, container, item, size)
61 #undef __dynamic_array
62 #define __dynamic_array(type, item) type item[];
65 #define F_STRUCT(args...) args
68 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
69 struct struct_name { \
70 struct trace_entry ent; \
74 #undef FTRACE_ENTRY_DUP
75 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
77 #undef FTRACE_ENTRY_REG
78 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
80 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
83 #include "trace_entries.h"
86 * syscalls are special, and need special handling, this is why
87 * they are not included in trace_entries.h
89 struct syscall_trace_enter {
90 struct trace_entry ent;
95 struct syscall_trace_exit {
96 struct trace_entry ent;
101 struct kprobe_trace_entry_head {
102 struct trace_entry ent;
106 struct kretprobe_trace_entry_head {
107 struct trace_entry ent;
109 unsigned long ret_ip;
113 * trace_flag_type is an enumeration that holds different
114 * states when a trace occurs. These are:
115 * IRQS_OFF - interrupts were disabled
116 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
117 * NEED_RESCHED - reschedule is requested
118 * HARDIRQ - inside an interrupt handler
119 * SOFTIRQ - inside a softirq handler
120 * NEED_RESCHED_LAZY - lazy reschedule is requested
122 enum trace_flag_type {
123 TRACE_FLAG_IRQS_OFF = 0x01,
124 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
125 TRACE_FLAG_NEED_RESCHED = 0x04,
126 TRACE_FLAG_HARDIRQ = 0x08,
127 TRACE_FLAG_SOFTIRQ = 0x10,
128 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
129 TRACE_FLAG_NEED_RESCHED_LAZY = 0x40,
132 #define TRACE_BUF_SIZE 1024
137 * The CPU trace array - it consists of thousands of trace entries
138 * plus some other descriptor data: (for example which task started
141 struct trace_array_cpu {
143 void *buffer_page; /* ring buffer spare */
145 unsigned long entries;
146 unsigned long saved_latency;
147 unsigned long critical_start;
148 unsigned long critical_end;
149 unsigned long critical_sequence;
151 unsigned long policy;
152 unsigned long rt_priority;
153 unsigned long skipped_entries;
154 cycle_t preempt_timestamp;
157 char comm[TASK_COMM_LEN];
163 struct trace_option_dentry;
165 struct trace_buffer {
166 struct trace_array *tr;
167 struct ring_buffer *buffer;
168 struct trace_array_cpu __percpu *data;
173 #define TRACE_FLAGS_MAX_SIZE 32
175 struct trace_options {
176 struct tracer *tracer;
177 struct trace_option_dentry *topts;
180 struct trace_pid_list {
181 unsigned int nr_pids;
187 * The trace array - an array of per-CPU trace arrays. This is the
188 * highest level data structure that individual tracers deal with.
189 * They have on/off state as well:
192 struct list_head list;
194 struct trace_buffer trace_buffer;
195 #ifdef CONFIG_TRACER_MAX_TRACE
197 * The max_buffer is used to snapshot the trace when a maximum
198 * latency is reached, or when the user initiates a snapshot.
199 * Some tracers will use this to store a maximum trace while
200 * it continues examining live traces.
202 * The buffers for the max_buffer are set up the same as the trace_buffer
203 * When a snapshot is taken, the buffer of the max_buffer is swapped
204 * with the buffer of the trace_buffer and the buffers are reset for
205 * the trace_buffer so the tracing can continue.
207 struct trace_buffer max_buffer;
208 bool allocated_snapshot;
209 unsigned long max_latency;
211 struct trace_pid_list __rcu *filtered_pids;
213 * max_lock is used to protect the swapping of buffers
214 * when taking a max snapshot. The buffers themselves are
215 * protected by per_cpu spinlocks. But the action of the swap
216 * needs its own lock.
218 * This is defined as a arch_spinlock_t in order to help
219 * with performance when lockdep debugging is enabled.
221 * It is also used in other places outside the update_max_tr
222 * so it needs to be defined outside of the
223 * CONFIG_TRACER_MAX_TRACE.
225 arch_spinlock_t max_lock;
227 #ifdef CONFIG_FTRACE_SYSCALLS
228 int sys_refcount_enter;
229 int sys_refcount_exit;
230 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
231 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
236 struct tracer *current_trace;
237 unsigned int trace_flags;
238 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
240 raw_spinlock_t start_lock;
242 struct dentry *options;
243 struct dentry *percpu_dir;
244 struct dentry *event_dir;
245 struct trace_options *topts;
246 struct list_head systems;
247 struct list_head events;
248 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
250 #ifdef CONFIG_FUNCTION_TRACER
251 struct ftrace_ops *ops;
252 /* function tracing enabled */
253 int function_enabled;
258 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
261 extern struct list_head ftrace_trace_arrays;
263 extern struct mutex trace_types_lock;
265 extern int trace_array_get(struct trace_array *tr);
266 extern void trace_array_put(struct trace_array *tr);
269 * The global tracer (top) should be the first trace array added,
270 * but we check the flag anyway.
272 static inline struct trace_array *top_trace_array(void)
274 struct trace_array *tr;
276 if (list_empty(&ftrace_trace_arrays))
279 tr = list_entry(ftrace_trace_arrays.prev,
281 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
285 #define FTRACE_CMP_TYPE(var, type) \
286 __builtin_types_compatible_p(typeof(var), type *)
289 #define IF_ASSIGN(var, entry, etype, id) \
290 if (FTRACE_CMP_TYPE(var, etype)) { \
291 var = (typeof(var))(entry); \
292 WARN_ON(id && (entry)->type != id); \
296 /* Will cause compile errors if type is not found. */
297 extern void __ftrace_bad_type(void);
300 * The trace_assign_type is a verifier that the entry type is
301 * the same as the type being assigned. To add new types simply
302 * add a line with the following format:
304 * IF_ASSIGN(var, ent, type, id);
306 * Where "type" is the trace type that includes the trace_entry
307 * as the "ent" item. And "id" is the trace identifier that is
308 * used in the trace_type enum.
310 * If the type can have more than one id, then use zero.
312 #define trace_assign_type(var, ent) \
314 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
315 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
316 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
317 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
318 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
319 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
320 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
321 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
323 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
325 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
326 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
328 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
330 __ftrace_bad_type(); \
334 * An option specific to a tracer. This is a boolean value.
335 * The bit is the bit index that sets its value on the
336 * flags value in struct tracer_flags.
339 const char *name; /* Will appear on the trace_options file */
340 u32 bit; /* Mask assigned in val field in tracer_flags */
344 * The set of specific options for a tracer. Your tracer
345 * have to set the initial value of the flags val.
347 struct tracer_flags {
349 struct tracer_opt *opts;
352 /* Makes more easy to define a tracer opt */
353 #define TRACER_OPT(s, b) .name = #s, .bit = b
356 struct trace_option_dentry {
357 struct tracer_opt *opt;
358 struct tracer_flags *flags;
359 struct trace_array *tr;
360 struct dentry *entry;
364 * struct tracer - a specific tracer and its callbacks to interact with tracefs
365 * @name: the name chosen to select it on the available_tracers file
366 * @init: called when one switches to this tracer (echo name > current_tracer)
367 * @reset: called when one switches to another tracer
368 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
369 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
370 * @update_thresh: called when tracing_thresh is updated
371 * @open: called when the trace file is opened
372 * @pipe_open: called when the trace_pipe file is opened
373 * @close: called when the trace file is released
374 * @pipe_close: called when the trace_pipe file is released
375 * @read: override the default read callback on trace_pipe
376 * @splice_read: override the default splice_read callback on trace_pipe
377 * @selftest: selftest to run on boot (see trace_selftest.c)
378 * @print_headers: override the first lines that describe your columns
379 * @print_line: callback that prints a trace
380 * @set_flag: signals one of your private flags changed (trace_options file)
381 * @flags: your private flags
385 int (*init)(struct trace_array *tr);
386 void (*reset)(struct trace_array *tr);
387 void (*start)(struct trace_array *tr);
388 void (*stop)(struct trace_array *tr);
389 int (*update_thresh)(struct trace_array *tr);
390 void (*open)(struct trace_iterator *iter);
391 void (*pipe_open)(struct trace_iterator *iter);
392 void (*close)(struct trace_iterator *iter);
393 void (*pipe_close)(struct trace_iterator *iter);
394 ssize_t (*read)(struct trace_iterator *iter,
395 struct file *filp, char __user *ubuf,
396 size_t cnt, loff_t *ppos);
397 ssize_t (*splice_read)(struct trace_iterator *iter,
400 struct pipe_inode_info *pipe,
403 #ifdef CONFIG_FTRACE_STARTUP_TEST
404 int (*selftest)(struct tracer *trace,
405 struct trace_array *tr);
407 void (*print_header)(struct seq_file *m);
408 enum print_line_t (*print_line)(struct trace_iterator *iter);
409 /* If you handled the flag setting, return 0 */
410 int (*set_flag)(struct trace_array *tr,
411 u32 old_flags, u32 bit, int set);
412 /* Return 0 if OK with change, else return non-zero */
413 int (*flag_changed)(struct trace_array *tr,
416 struct tracer_flags *flags;
420 bool allow_instances;
421 #ifdef CONFIG_TRACER_MAX_TRACE
427 /* Only current can touch trace_recursion */
430 * For function tracing recursion:
431 * The order of these bits are important.
433 * When function tracing occurs, the following steps are made:
434 * If arch does not support a ftrace feature:
435 * call internal function (uses INTERNAL bits) which calls...
436 * If callback is registered to the "global" list, the list
437 * function is called and recursion checks the GLOBAL bits.
438 * then this function calls...
439 * The function callback, which can use the FTRACE bits to
440 * check for recursion.
442 * Now if the arch does not suppport a feature, and it calls
443 * the global list function which calls the ftrace callback
444 * all three of these steps will do a recursion protection.
445 * There's no reason to do one if the previous caller already
446 * did. The recursion that we are protecting against will
447 * go through the same steps again.
449 * To prevent the multiple recursion checks, if a recursion
450 * bit is set that is higher than the MAX bit of the current
451 * check, then we know that the check was made by the previous
452 * caller, and we can skip the current check.
456 TRACE_BUFFER_NMI_BIT,
457 TRACE_BUFFER_IRQ_BIT,
458 TRACE_BUFFER_SIRQ_BIT,
460 /* Start of function recursion bits */
462 TRACE_FTRACE_NMI_BIT,
463 TRACE_FTRACE_IRQ_BIT,
464 TRACE_FTRACE_SIRQ_BIT,
466 /* INTERNAL_BITs must be greater than FTRACE_BITs */
468 TRACE_INTERNAL_NMI_BIT,
469 TRACE_INTERNAL_IRQ_BIT,
470 TRACE_INTERNAL_SIRQ_BIT,
476 * Abuse of the trace_recursion.
477 * As we need a way to maintain state if we are tracing the function
478 * graph in irq because we want to trace a particular function that
479 * was called in irq context but we have irq tracing off. Since this
480 * can only be modified by current, we can reuse trace_recursion.
485 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
486 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
487 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
489 #define TRACE_CONTEXT_BITS 4
491 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
492 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
494 #define TRACE_LIST_START TRACE_INTERNAL_BIT
495 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
497 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
499 static __always_inline int trace_get_context_bit(void)
503 if (in_interrupt()) {
517 static __always_inline int trace_test_and_set_recursion(int start, int max)
519 unsigned int val = current->trace_recursion;
522 /* A previous recursion check was made */
523 if ((val & TRACE_CONTEXT_MASK) > max)
526 bit = trace_get_context_bit() + start;
527 if (unlikely(val & (1 << bit)))
531 current->trace_recursion = val;
537 static __always_inline void trace_clear_recursion(int bit)
539 unsigned int val = current->trace_recursion;
548 current->trace_recursion = val;
551 static inline struct ring_buffer_iter *
552 trace_buffer_iter(struct trace_iterator *iter, int cpu)
554 if (iter->buffer_iter && iter->buffer_iter[cpu])
555 return iter->buffer_iter[cpu];
559 int tracer_init(struct tracer *t, struct trace_array *tr);
560 int tracing_is_enabled(void);
561 void tracing_reset(struct trace_buffer *buf, int cpu);
562 void tracing_reset_online_cpus(struct trace_buffer *buf);
563 void tracing_reset_current(int cpu);
564 void tracing_reset_all_online_cpus(void);
565 int tracing_open_generic(struct inode *inode, struct file *filp);
566 bool tracing_is_disabled(void);
567 struct dentry *trace_create_file(const char *name,
569 struct dentry *parent,
571 const struct file_operations *fops);
573 struct dentry *tracing_init_dentry(void);
575 struct ring_buffer_event;
577 struct ring_buffer_event *
578 trace_buffer_lock_reserve(struct ring_buffer *buffer,
584 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
585 struct trace_array_cpu *data);
587 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
588 int *ent_cpu, u64 *ent_ts);
590 void __buffer_unlock_commit(struct ring_buffer *buffer,
591 struct ring_buffer_event *event);
593 int trace_empty(struct trace_iterator *iter);
595 void *trace_find_next_entry_inc(struct trace_iterator *iter);
597 void trace_init_global_iter(struct trace_iterator *iter);
599 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
601 void trace_function(struct trace_array *tr,
603 unsigned long parent_ip,
604 unsigned long flags, int pc);
605 void trace_graph_function(struct trace_array *tr,
607 unsigned long parent_ip,
608 unsigned long flags, int pc);
609 void trace_latency_header(struct seq_file *m);
610 void trace_default_header(struct seq_file *m);
611 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
612 int trace_empty(struct trace_iterator *iter);
614 void trace_graph_return(struct ftrace_graph_ret *trace);
615 int trace_graph_entry(struct ftrace_graph_ent *trace);
616 void set_graph_array(struct trace_array *tr);
618 void tracing_start_cmdline_record(void);
619 void tracing_stop_cmdline_record(void);
620 int register_tracer(struct tracer *type);
621 int is_tracing_stopped(void);
623 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
625 extern cpumask_var_t __read_mostly tracing_buffer_mask;
627 #define for_each_tracing_cpu(cpu) \
628 for_each_cpu(cpu, tracing_buffer_mask)
630 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
632 extern unsigned long tracing_thresh;
634 #ifdef CONFIG_TRACER_MAX_TRACE
635 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
636 void update_max_tr_single(struct trace_array *tr,
637 struct task_struct *tsk, int cpu);
638 #endif /* CONFIG_TRACER_MAX_TRACE */
640 #ifdef CONFIG_STACKTRACE
641 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
644 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
647 static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
648 unsigned long flags, int pc)
652 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
656 #endif /* CONFIG_STACKTRACE */
658 extern cycle_t ftrace_now(int cpu);
660 extern void trace_find_cmdline(int pid, char comm[]);
662 #ifdef CONFIG_DYNAMIC_FTRACE
663 extern unsigned long ftrace_update_tot_cnt;
665 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
666 extern int DYN_FTRACE_TEST_NAME(void);
667 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
668 extern int DYN_FTRACE_TEST_NAME2(void);
670 extern bool ring_buffer_expanded;
671 extern bool tracing_selftest_disabled;
673 #ifdef CONFIG_FTRACE_STARTUP_TEST
674 extern int trace_selftest_startup_function(struct tracer *trace,
675 struct trace_array *tr);
676 extern int trace_selftest_startup_function_graph(struct tracer *trace,
677 struct trace_array *tr);
678 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
679 struct trace_array *tr);
680 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
681 struct trace_array *tr);
682 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
683 struct trace_array *tr);
684 extern int trace_selftest_startup_wakeup(struct tracer *trace,
685 struct trace_array *tr);
686 extern int trace_selftest_startup_nop(struct tracer *trace,
687 struct trace_array *tr);
688 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
689 struct trace_array *tr);
690 extern int trace_selftest_startup_branch(struct tracer *trace,
691 struct trace_array *tr);
693 * Tracer data references selftest functions that only occur
694 * on boot up. These can be __init functions. Thus, when selftests
695 * are enabled, then the tracers need to reference __init functions.
697 #define __tracer_data __refdata
699 /* Tracers are seldom changed. Optimize when selftests are disabled. */
700 #define __tracer_data __read_mostly
701 #endif /* CONFIG_FTRACE_STARTUP_TEST */
703 extern void *head_page(struct trace_array_cpu *data);
704 extern unsigned long long ns2usecs(cycle_t nsec);
706 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
708 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
710 trace_array_vprintk(struct trace_array *tr,
711 unsigned long ip, const char *fmt, va_list args);
712 int trace_array_printk(struct trace_array *tr,
713 unsigned long ip, const char *fmt, ...);
714 int trace_array_printk_buf(struct ring_buffer *buffer,
715 unsigned long ip, const char *fmt, ...);
716 void trace_printk_seq(struct trace_seq *s);
717 enum print_line_t print_trace_line(struct trace_iterator *iter);
719 extern char trace_find_mark(unsigned long long duration);
721 /* Standard output formatting function used for function return traces */
722 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
725 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
726 #define TRACE_GRAPH_PRINT_CPU 0x2
727 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
728 #define TRACE_GRAPH_PRINT_PROC 0x8
729 #define TRACE_GRAPH_PRINT_DURATION 0x10
730 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
731 #define TRACE_GRAPH_PRINT_IRQS 0x40
732 #define TRACE_GRAPH_PRINT_TAIL 0x80
733 #define TRACE_GRAPH_SLEEP_TIME 0x100
734 #define TRACE_GRAPH_GRAPH_TIME 0x200
735 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
736 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
738 extern void ftrace_graph_sleep_time_control(bool enable);
739 extern void ftrace_graph_graph_time_control(bool enable);
741 extern enum print_line_t
742 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
743 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
745 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
746 extern void graph_trace_open(struct trace_iterator *iter);
747 extern void graph_trace_close(struct trace_iterator *iter);
748 extern int __trace_graph_entry(struct trace_array *tr,
749 struct ftrace_graph_ent *trace,
750 unsigned long flags, int pc);
751 extern void __trace_graph_return(struct trace_array *tr,
752 struct ftrace_graph_ret *trace,
753 unsigned long flags, int pc);
756 #ifdef CONFIG_DYNAMIC_FTRACE
757 /* TODO: make this variable */
758 #define FTRACE_GRAPH_MAX_FUNCS 32
759 extern int ftrace_graph_count;
760 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
761 extern int ftrace_graph_notrace_count;
762 extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
764 static inline int ftrace_graph_addr(unsigned long addr)
768 if (!ftrace_graph_count)
771 for (i = 0; i < ftrace_graph_count; i++) {
772 if (addr == ftrace_graph_funcs[i]) {
774 * If no irqs are to be traced, but a set_graph_function
775 * is set, and called by an interrupt handler, we still
779 trace_recursion_set(TRACE_IRQ_BIT);
781 trace_recursion_clear(TRACE_IRQ_BIT);
789 static inline int ftrace_graph_notrace_addr(unsigned long addr)
793 if (!ftrace_graph_notrace_count)
796 for (i = 0; i < ftrace_graph_notrace_count; i++) {
797 if (addr == ftrace_graph_notrace_funcs[i])
804 static inline int ftrace_graph_addr(unsigned long addr)
809 static inline int ftrace_graph_notrace_addr(unsigned long addr)
813 #endif /* CONFIG_DYNAMIC_FTRACE */
814 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
815 static inline enum print_line_t
816 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
818 return TRACE_TYPE_UNHANDLED;
820 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
822 extern struct list_head ftrace_pids;
824 #ifdef CONFIG_FUNCTION_TRACER
825 extern bool ftrace_filter_param __initdata;
826 static inline int ftrace_trace_task(struct task_struct *task)
828 if (list_empty(&ftrace_pids))
831 return test_tsk_trace_trace(task);
833 extern int ftrace_is_dead(void);
834 int ftrace_create_function_files(struct trace_array *tr,
835 struct dentry *parent);
836 void ftrace_destroy_function_files(struct trace_array *tr);
837 void ftrace_init_global_array_ops(struct trace_array *tr);
838 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
839 void ftrace_reset_array_ops(struct trace_array *tr);
840 int using_ftrace_ops_list_func(void);
842 static inline int ftrace_trace_task(struct task_struct *task)
846 static inline int ftrace_is_dead(void) { return 0; }
848 ftrace_create_function_files(struct trace_array *tr,
849 struct dentry *parent)
853 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
854 static inline __init void
855 ftrace_init_global_array_ops(struct trace_array *tr) { }
856 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
857 /* ftace_func_t type is not defined, use macro instead of static inline */
858 #define ftrace_init_array_ops(tr, func) do { } while (0)
859 #endif /* CONFIG_FUNCTION_TRACER */
861 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
862 void ftrace_create_filter_files(struct ftrace_ops *ops,
863 struct dentry *parent);
864 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
867 * The ops parameter passed in is usually undefined.
868 * This must be a macro.
870 #define ftrace_create_filter_files(ops, parent) do { } while (0)
871 #define ftrace_destroy_filter_files(ops) do { } while (0)
872 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
874 bool ftrace_event_is_function(struct trace_event_call *call);
877 * struct trace_parser - servers for reading the user input separated by spaces
878 * @cont: set if the input is not complete - no final space char was found
879 * @buffer: holds the parsed user input
880 * @idx: user input length
883 struct trace_parser {
890 static inline bool trace_parser_loaded(struct trace_parser *parser)
892 return (parser->idx != 0);
895 static inline bool trace_parser_cont(struct trace_parser *parser)
900 static inline void trace_parser_clear(struct trace_parser *parser)
902 parser->cont = false;
906 extern int trace_parser_get_init(struct trace_parser *parser, int size);
907 extern void trace_parser_put(struct trace_parser *parser);
908 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
909 size_t cnt, loff_t *ppos);
912 * Only create function graph options if function graph is configured.
914 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
915 # define FGRAPH_FLAGS \
916 C(DISPLAY_GRAPH, "display-graph"),
918 # define FGRAPH_FLAGS
921 #ifdef CONFIG_BRANCH_TRACER
922 # define BRANCH_FLAGS \
925 # define BRANCH_FLAGS
928 #ifdef CONFIG_FUNCTION_TRACER
929 # define FUNCTION_FLAGS \
930 C(FUNCTION, "function-trace"),
931 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
933 # define FUNCTION_FLAGS
934 # define FUNCTION_DEFAULT_FLAGS 0UL
937 #ifdef CONFIG_STACKTRACE
938 # define STACK_FLAGS \
939 C(STACKTRACE, "stacktrace"),
945 * trace_iterator_flags is an enumeration that defines bit
946 * positions into trace_flags that controls the output.
948 * NOTE: These bits must match the trace_options array in
949 * trace.c (this macro guarantees it).
951 #define TRACE_FLAGS \
952 C(PRINT_PARENT, "print-parent"), \
953 C(SYM_OFFSET, "sym-offset"), \
954 C(SYM_ADDR, "sym-addr"), \
955 C(VERBOSE, "verbose"), \
960 C(PRINTK, "trace_printk"), \
961 C(ANNOTATE, "annotate"), \
962 C(USERSTACKTRACE, "userstacktrace"), \
963 C(SYM_USEROBJ, "sym-userobj"), \
964 C(PRINTK_MSGONLY, "printk-msg-only"), \
965 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
966 C(LATENCY_FMT, "latency-format"), \
967 C(RECORD_CMD, "record-cmd"), \
968 C(OVERWRITE, "overwrite"), \
969 C(STOP_ON_FREE, "disable_on_free"), \
970 C(IRQ_INFO, "irq-info"), \
971 C(MARKERS, "markers"), \
978 * By defining C, we can make TRACE_FLAGS a list of bit names
979 * that will define the bits for the flag masks.
982 #define C(a, b) TRACE_ITER_##a##_BIT
984 enum trace_iterator_bits {
986 /* Make sure we don't go more than we have bits for */
991 * By redefining C, we can make TRACE_FLAGS a list of masks that
992 * use the bits as defined above.
995 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
997 enum trace_iterator_flags { TRACE_FLAGS };
1000 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1001 * control the output of kernel symbols.
1003 #define TRACE_ITER_SYM_MASK \
1004 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1006 extern struct tracer nop_trace;
1008 #ifdef CONFIG_BRANCH_TRACER
1009 extern int enable_branch_tracing(struct trace_array *tr);
1010 extern void disable_branch_tracing(void);
1011 static inline int trace_branch_enable(struct trace_array *tr)
1013 if (tr->trace_flags & TRACE_ITER_BRANCH)
1014 return enable_branch_tracing(tr);
1017 static inline void trace_branch_disable(void)
1019 /* due to races, always disable */
1020 disable_branch_tracing();
1023 static inline int trace_branch_enable(struct trace_array *tr)
1027 static inline void trace_branch_disable(void)
1030 #endif /* CONFIG_BRANCH_TRACER */
1032 /* set ring buffers to default size if not already done so */
1033 int tracing_update_buffers(void);
1035 struct ftrace_event_field {
1036 struct list_head link;
1045 struct event_filter {
1046 int n_preds; /* Number assigned */
1047 int a_preds; /* allocated */
1048 struct filter_pred *preds;
1049 struct filter_pred *root;
1050 char *filter_string;
1053 struct event_subsystem {
1054 struct list_head list;
1056 struct event_filter *filter;
1060 struct trace_subsystem_dir {
1061 struct list_head list;
1062 struct event_subsystem *subsystem;
1063 struct trace_array *tr;
1064 struct dentry *entry;
1069 #define FILTER_PRED_INVALID ((unsigned short)-1)
1070 #define FILTER_PRED_IS_RIGHT (1 << 15)
1071 #define FILTER_PRED_FOLD (1 << 15)
1074 * The max preds is the size of unsigned short with
1075 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1076 * and FOLD flags. The other is reserved.
1078 * 2^14 preds is way more than enough.
1080 #define MAX_FILTER_PRED 16384
1085 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1087 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1097 char pattern[MAX_FILTER_STR_VAL];
1100 regex_match_func match;
1103 struct filter_pred {
1104 filter_pred_fn_t fn;
1107 unsigned short *ops;
1108 struct ftrace_event_field *field;
1112 unsigned short index;
1113 unsigned short parent;
1114 unsigned short left;
1115 unsigned short right;
1118 extern enum regex_type
1119 filter_parse_regex(char *buff, int len, char **search, int *not);
1120 extern void print_event_filter(struct trace_event_file *file,
1121 struct trace_seq *s);
1122 extern int apply_event_filter(struct trace_event_file *file,
1123 char *filter_string);
1124 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1125 char *filter_string);
1126 extern void print_subsystem_event_filter(struct event_subsystem *system,
1127 struct trace_seq *s);
1128 extern int filter_assign_type(const char *type);
1129 extern int create_event_filter(struct trace_event_call *call,
1130 char *filter_str, bool set_str,
1131 struct event_filter **filterp);
1132 extern void free_event_filter(struct event_filter *filter);
1134 struct ftrace_event_field *
1135 trace_find_event_field(struct trace_event_call *call, char *name);
1137 extern void trace_event_enable_cmd_record(bool enable);
1138 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1139 extern int event_trace_del_tracer(struct trace_array *tr);
1141 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1145 static inline void *event_file_data(struct file *filp)
1147 return ACCESS_ONCE(file_inode(filp)->i_private);
1150 extern struct mutex event_mutex;
1151 extern struct list_head ftrace_events;
1153 extern const struct file_operations event_trigger_fops;
1155 extern int register_trigger_cmds(void);
1156 extern void clear_event_triggers(struct trace_array *tr);
1158 struct event_trigger_data {
1159 unsigned long count;
1161 struct event_trigger_ops *ops;
1162 struct event_command *cmd_ops;
1163 struct event_filter __rcu *filter;
1166 struct list_head list;
1170 * struct event_trigger_ops - callbacks for trace event triggers
1172 * The methods in this structure provide per-event trigger hooks for
1173 * various trigger operations.
1175 * All the methods below, except for @init() and @free(), must be
1178 * @func: The trigger 'probe' function called when the triggering
1179 * event occurs. The data passed into this callback is the data
1180 * that was supplied to the event_command @reg() function that
1181 * registered the trigger (see struct event_command).
1183 * @init: An optional initialization function called for the trigger
1184 * when the trigger is registered (via the event_command reg()
1185 * function). This can be used to perform per-trigger
1186 * initialization such as incrementing a per-trigger reference
1187 * count, for instance. This is usually implemented by the
1188 * generic utility function @event_trigger_init() (see
1189 * trace_event_triggers.c).
1191 * @free: An optional de-initialization function called for the
1192 * trigger when the trigger is unregistered (via the
1193 * event_command @reg() function). This can be used to perform
1194 * per-trigger de-initialization such as decrementing a
1195 * per-trigger reference count and freeing corresponding trigger
1196 * data, for instance. This is usually implemented by the
1197 * generic utility function @event_trigger_free() (see
1198 * trace_event_triggers.c).
1200 * @print: The callback function invoked to have the trigger print
1201 * itself. This is usually implemented by a wrapper function
1202 * that calls the generic utility function @event_trigger_print()
1203 * (see trace_event_triggers.c).
1205 struct event_trigger_ops {
1206 void (*func)(struct event_trigger_data *data);
1207 int (*init)(struct event_trigger_ops *ops,
1208 struct event_trigger_data *data);
1209 void (*free)(struct event_trigger_ops *ops,
1210 struct event_trigger_data *data);
1211 int (*print)(struct seq_file *m,
1212 struct event_trigger_ops *ops,
1213 struct event_trigger_data *data);
1217 * struct event_command - callbacks and data members for event commands
1219 * Event commands are invoked by users by writing the command name
1220 * into the 'trigger' file associated with a trace event. The
1221 * parameters associated with a specific invocation of an event
1222 * command are used to create an event trigger instance, which is
1223 * added to the list of trigger instances associated with that trace
1224 * event. When the event is hit, the set of triggers associated with
1225 * that event is invoked.
1227 * The data members in this structure provide per-event command data
1228 * for various event commands.
1230 * All the data members below, except for @post_trigger, must be set
1231 * for each event command.
1233 * @name: The unique name that identifies the event command. This is
1234 * the name used when setting triggers via trigger files.
1236 * @trigger_type: A unique id that identifies the event command
1237 * 'type'. This value has two purposes, the first to ensure that
1238 * only one trigger of the same type can be set at a given time
1239 * for a particular event e.g. it doesn't make sense to have both
1240 * a traceon and traceoff trigger attached to a single event at
1241 * the same time, so traceon and traceoff have the same type
1242 * though they have different names. The @trigger_type value is
1243 * also used as a bit value for deferring the actual trigger
1244 * action until after the current event is finished. Some
1245 * commands need to do this if they themselves log to the trace
1246 * buffer (see the @post_trigger() member below). @trigger_type
1247 * values are defined by adding new values to the trigger_type
1248 * enum in include/linux/trace_events.h.
1250 * @post_trigger: A flag that says whether or not this command needs
1251 * to have its action delayed until after the current event has
1252 * been closed. Some triggers need to avoid being invoked while
1253 * an event is currently in the process of being logged, since
1254 * the trigger may itself log data into the trace buffer. Thus
1255 * we make sure the current event is committed before invoking
1256 * those triggers. To do that, the trigger invocation is split
1257 * in two - the first part checks the filter using the current
1258 * trace record; if a command has the @post_trigger flag set, it
1259 * sets a bit for itself in the return value, otherwise it
1260 * directly invokes the trigger. Once all commands have been
1261 * either invoked or set their return flag, the current record is
1262 * either committed or discarded. At that point, if any commands
1263 * have deferred their triggers, those commands are finally
1264 * invoked following the close of the current event. In other
1265 * words, if the event_trigger_ops @func() probe implementation
1266 * itself logs to the trace buffer, this flag should be set,
1267 * otherwise it can be left unspecified.
1269 * All the methods below, except for @set_filter(), must be
1272 * @func: The callback function responsible for parsing and
1273 * registering the trigger written to the 'trigger' file by the
1274 * user. It allocates the trigger instance and registers it with
1275 * the appropriate trace event. It makes use of the other
1276 * event_command callback functions to orchestrate this, and is
1277 * usually implemented by the generic utility function
1278 * @event_trigger_callback() (see trace_event_triggers.c).
1280 * @reg: Adds the trigger to the list of triggers associated with the
1281 * event, and enables the event trigger itself, after
1282 * initializing it (via the event_trigger_ops @init() function).
1283 * This is also where commands can use the @trigger_type value to
1284 * make the decision as to whether or not multiple instances of
1285 * the trigger should be allowed. This is usually implemented by
1286 * the generic utility function @register_trigger() (see
1287 * trace_event_triggers.c).
1289 * @unreg: Removes the trigger from the list of triggers associated
1290 * with the event, and disables the event trigger itself, after
1291 * initializing it (via the event_trigger_ops @free() function).
1292 * This is usually implemented by the generic utility function
1293 * @unregister_trigger() (see trace_event_triggers.c).
1295 * @set_filter: An optional function called to parse and set a filter
1296 * for the trigger. If no @set_filter() method is set for the
1297 * event command, filters set by the user for the command will be
1298 * ignored. This is usually implemented by the generic utility
1299 * function @set_trigger_filter() (see trace_event_triggers.c).
1301 * @get_trigger_ops: The callback function invoked to retrieve the
1302 * event_trigger_ops implementation associated with the command.
1304 struct event_command {
1305 struct list_head list;
1307 enum event_trigger_type trigger_type;
1309 int (*func)(struct event_command *cmd_ops,
1310 struct trace_event_file *file,
1311 char *glob, char *cmd, char *params);
1312 int (*reg)(char *glob,
1313 struct event_trigger_ops *ops,
1314 struct event_trigger_data *data,
1315 struct trace_event_file *file);
1316 void (*unreg)(char *glob,
1317 struct event_trigger_ops *ops,
1318 struct event_trigger_data *data,
1319 struct trace_event_file *file);
1320 int (*set_filter)(char *filter_str,
1321 struct event_trigger_data *data,
1322 struct trace_event_file *file);
1323 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1326 extern int trace_event_enable_disable(struct trace_event_file *file,
1327 int enable, int soft_disable);
1328 extern int tracing_alloc_snapshot(void);
1330 extern const char *__start___trace_bprintk_fmt[];
1331 extern const char *__stop___trace_bprintk_fmt[];
1333 extern const char *__start___tracepoint_str[];
1334 extern const char *__stop___tracepoint_str[];
1336 void trace_printk_control(bool enabled);
1337 void trace_printk_init_buffers(void);
1338 void trace_printk_start_comm(void);
1339 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1340 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1343 * Normal trace_printk() and friends allocates special buffers
1344 * to do the manipulation, as well as saves the print formats
1345 * into sections to display. But the trace infrastructure wants
1346 * to use these without the added overhead at the price of being
1347 * a bit slower (used mainly for warnings, where we don't care
1348 * about performance). The internal_trace_puts() is for such
1351 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1354 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1355 extern struct trace_event_call \
1356 __aligned(4) event_##call;
1357 #undef FTRACE_ENTRY_DUP
1358 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1359 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1361 #include "trace_entries.h"
1363 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1364 int perf_ftrace_event_register(struct trace_event_call *call,
1365 enum trace_reg type, void *data);
1367 #define perf_ftrace_event_register NULL
1370 #ifdef CONFIG_FTRACE_SYSCALLS
1371 void init_ftrace_syscalls(void);
1373 static inline void init_ftrace_syscalls(void) { }
1376 #ifdef CONFIG_EVENT_TRACING
1377 void trace_event_init(void);
1378 void trace_event_enum_update(struct trace_enum_map **map, int len);
1380 static inline void __init trace_event_init(void) { }
1381 static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
1384 extern struct trace_iterator *tracepoint_print_iter;
1386 #endif /* _LINUX_KERNEL_TRACE_H */