2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/sched/rt.h>
46 #include "trace_output.h"
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
52 bool ring_buffer_expanded;
55 * We need to change this state when a selftest is running.
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
58 * insertions into the ring-buffer such as trace_printk could occurred
59 * at the same time, giving false positive or negative results.
61 static bool __read_mostly tracing_selftest_running;
64 * If a tracer is running, we do not want to run SELFTEST.
66 bool __read_mostly tracing_selftest_disabled;
68 /* Pipe tracepoints to printk */
69 struct trace_iterator *tracepoint_print_iter;
70 int tracepoint_printk;
72 /* For tracers that don't implement custom flags */
73 static struct tracer_opt dummy_tracer_opt[] = {
77 static struct tracer_flags dummy_tracer_flags = {
79 .opts = dummy_tracer_opt
83 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
93 static DEFINE_PER_CPU(bool, trace_cmdline_save);
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
101 static int tracing_disabled = 1;
103 cpumask_var_t __read_mostly tracing_buffer_mask;
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 enum ftrace_dump_mode ftrace_dump_on_oops;
123 /* When set, tracing will stop when a WARN*() is hit */
124 int __disable_trace_on_warning;
126 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
127 /* Map of enums to their values, for "enum_map" file */
128 struct trace_enum_map_head {
130 unsigned long length;
133 union trace_enum_map_item;
135 struct trace_enum_map_tail {
137 * "end" is first and points to NULL as it must be different
138 * than "mod" or "enum_string"
140 union trace_enum_map_item *next;
141 const char *end; /* points to NULL */
144 static DEFINE_MUTEX(trace_enum_mutex);
147 * The trace_enum_maps are saved in an array with two extra elements,
148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
151 * pointer to the next array of saved enum_map items.
153 union trace_enum_map_item {
154 struct trace_enum_map map;
155 struct trace_enum_map_head head;
156 struct trace_enum_map_tail tail;
159 static union trace_enum_map_item *trace_enum_maps;
160 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
162 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
164 #define MAX_TRACER_SIZE 100
165 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
166 static char *default_bootup_tracer;
168 static bool allocate_snapshot;
170 static int __init set_cmdline_ftrace(char *str)
172 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
173 default_bootup_tracer = bootup_tracer_buf;
174 /* We are using ftrace early, expand it */
175 ring_buffer_expanded = true;
178 __setup("ftrace=", set_cmdline_ftrace);
180 static int __init set_ftrace_dump_on_oops(char *str)
182 if (*str++ != '=' || !*str) {
183 ftrace_dump_on_oops = DUMP_ALL;
187 if (!strcmp("orig_cpu", str)) {
188 ftrace_dump_on_oops = DUMP_ORIG;
194 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
196 static int __init stop_trace_on_warning(char *str)
198 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
199 __disable_trace_on_warning = 1;
202 __setup("traceoff_on_warning", stop_trace_on_warning);
204 static int __init boot_alloc_snapshot(char *str)
206 allocate_snapshot = true;
207 /* We also need the main ring buffer expanded */
208 ring_buffer_expanded = true;
211 __setup("alloc_snapshot", boot_alloc_snapshot);
214 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
216 static int __init set_trace_boot_options(char *str)
218 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
221 __setup("trace_options=", set_trace_boot_options);
223 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
224 static char *trace_boot_clock __initdata;
226 static int __init set_trace_boot_clock(char *str)
228 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
229 trace_boot_clock = trace_boot_clock_buf;
232 __setup("trace_clock=", set_trace_boot_clock);
234 static int __init set_tracepoint_printk(char *str)
236 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
237 tracepoint_printk = 1;
240 __setup("tp_printk", set_tracepoint_printk);
242 unsigned long long ns2usecs(cycle_t nsec)
249 /* trace_flags holds trace_options default values */
250 #define TRACE_DEFAULT_FLAGS \
251 (FUNCTION_DEFAULT_FLAGS | \
252 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
253 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
254 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
255 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
257 /* trace_options that are only supported by global_trace */
258 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
259 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
263 * The global_trace is the descriptor that holds the tracing
264 * buffers for the live tracing. For each CPU, it contains
265 * a link list of pages that will store trace entries. The
266 * page descriptor of the pages in the memory is used to hold
267 * the link list by linking the lru item in the page descriptor
268 * to each of the pages in the buffer per CPU.
270 * For each active CPU there is a data field that holds the
271 * pages for the buffer for that CPU. Each CPU has the same number
272 * of pages allocated for its buffer.
274 static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
278 LIST_HEAD(ftrace_trace_arrays);
280 int trace_array_get(struct trace_array *this_tr)
282 struct trace_array *tr;
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
293 mutex_unlock(&trace_types_lock);
298 static void __trace_array_put(struct trace_array *this_tr)
300 WARN_ON(!this_tr->ref);
304 void trace_array_put(struct trace_array *this_tr)
306 mutex_lock(&trace_types_lock);
307 __trace_array_put(this_tr);
308 mutex_unlock(&trace_types_lock);
311 int filter_check_discard(struct trace_event_file *file, void *rec,
312 struct ring_buffer *buffer,
313 struct ring_buffer_event *event)
315 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
316 !filter_match_preds(file->filter, rec)) {
317 ring_buffer_discard_commit(buffer, event);
323 EXPORT_SYMBOL_GPL(filter_check_discard);
325 int call_filter_check_discard(struct trace_event_call *call, void *rec,
326 struct ring_buffer *buffer,
327 struct ring_buffer_event *event)
329 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
330 !filter_match_preds(call->filter, rec)) {
331 ring_buffer_discard_commit(buffer, event);
337 EXPORT_SYMBOL_GPL(call_filter_check_discard);
339 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
343 /* Early boot up does not have a buffer yet */
345 return trace_clock_local();
347 ts = ring_buffer_time_stamp(buf->buffer, cpu);
348 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
353 cycle_t ftrace_now(int cpu)
355 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
359 * tracing_is_enabled - Show if global_trace has been disabled
361 * Shows if the global trace has been enabled or not. It uses the
362 * mirror flag "buffer_disabled" to be used in fast paths such as for
363 * the irqsoff tracer. But it may be inaccurate due to races. If you
364 * need to know the accurate state, use tracing_is_on() which is a little
365 * slower, but accurate.
367 int tracing_is_enabled(void)
370 * For quick access (irqsoff uses this in fast path), just
371 * return the mirror variable of the state of the ring buffer.
372 * It's a little racy, but we don't really care.
375 return !global_trace.buffer_disabled;
379 * trace_buf_size is the size in bytes that is allocated
380 * for a buffer. Note, the number of bytes is always rounded
383 * This number is purposely set to a low number of 16384.
384 * If the dump on oops happens, it will be much appreciated
385 * to not have to wait for all that output. Anyway this can be
386 * boot time and run time configurable.
388 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
390 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
392 /* trace_types holds a link list of available tracers. */
393 static struct tracer *trace_types __read_mostly;
396 * trace_types_lock is used to protect the trace_types list.
398 DEFINE_MUTEX(trace_types_lock);
401 * serialize the access of the ring buffer
403 * ring buffer serializes readers, but it is low level protection.
404 * The validity of the events (which returns by ring_buffer_peek() ..etc)
405 * are not protected by ring buffer.
407 * The content of events may become garbage if we allow other process consumes
408 * these events concurrently:
409 * A) the page of the consumed events may become a normal page
410 * (not reader page) in ring buffer, and this page will be rewrited
411 * by events producer.
412 * B) The page of the consumed events may become a page for splice_read,
413 * and this page will be returned to system.
415 * These primitives allow multi process access to different cpu ring buffer
418 * These primitives don't distinguish read-only and read-consume access.
419 * Multi read-only access are also serialized.
423 static DECLARE_RWSEM(all_cpu_access_lock);
424 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
426 static inline void trace_access_lock(int cpu)
428 if (cpu == RING_BUFFER_ALL_CPUS) {
429 /* gain it for accessing the whole ring buffer. */
430 down_write(&all_cpu_access_lock);
432 /* gain it for accessing a cpu ring buffer. */
434 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
435 down_read(&all_cpu_access_lock);
437 /* Secondly block other access to this @cpu ring buffer. */
438 mutex_lock(&per_cpu(cpu_access_lock, cpu));
442 static inline void trace_access_unlock(int cpu)
444 if (cpu == RING_BUFFER_ALL_CPUS) {
445 up_write(&all_cpu_access_lock);
447 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
448 up_read(&all_cpu_access_lock);
452 static inline void trace_access_lock_init(void)
456 for_each_possible_cpu(cpu)
457 mutex_init(&per_cpu(cpu_access_lock, cpu));
462 static DEFINE_MUTEX(access_lock);
464 static inline void trace_access_lock(int cpu)
467 mutex_lock(&access_lock);
470 static inline void trace_access_unlock(int cpu)
473 mutex_unlock(&access_lock);
476 static inline void trace_access_lock_init(void)
482 #ifdef CONFIG_STACKTRACE
483 static void __ftrace_trace_stack(struct ring_buffer *buffer,
485 int skip, int pc, struct pt_regs *regs);
486 static inline void ftrace_trace_stack(struct trace_array *tr,
487 struct ring_buffer *buffer,
489 int skip, int pc, struct pt_regs *regs);
492 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
494 int skip, int pc, struct pt_regs *regs)
497 static inline void ftrace_trace_stack(struct trace_array *tr,
498 struct ring_buffer *buffer,
500 int skip, int pc, struct pt_regs *regs)
506 static void tracer_tracing_on(struct trace_array *tr)
508 if (tr->trace_buffer.buffer)
509 ring_buffer_record_on(tr->trace_buffer.buffer);
511 * This flag is looked at when buffers haven't been allocated
512 * yet, or by some tracers (like irqsoff), that just want to
513 * know if the ring buffer has been disabled, but it can handle
514 * races of where it gets disabled but we still do a record.
515 * As the check is in the fast path of the tracers, it is more
516 * important to be fast than accurate.
518 tr->buffer_disabled = 0;
519 /* Make the flag seen by readers */
524 * tracing_on - enable tracing buffers
526 * This function enables tracing buffers that may have been
527 * disabled with tracing_off.
529 void tracing_on(void)
531 tracer_tracing_on(&global_trace);
533 EXPORT_SYMBOL_GPL(tracing_on);
536 * __trace_puts - write a constant string into the trace buffer.
537 * @ip: The address of the caller
538 * @str: The constant string to write
539 * @size: The size of the string.
541 int __trace_puts(unsigned long ip, const char *str, int size)
543 struct ring_buffer_event *event;
544 struct ring_buffer *buffer;
545 struct print_entry *entry;
546 unsigned long irq_flags;
550 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
553 pc = preempt_count();
555 if (unlikely(tracing_selftest_running || tracing_disabled))
558 alloc = sizeof(*entry) + size + 2; /* possible \n added */
560 local_save_flags(irq_flags);
561 buffer = global_trace.trace_buffer.buffer;
562 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
567 entry = ring_buffer_event_data(event);
570 memcpy(&entry->buf, str, size);
572 /* Add a newline if necessary */
573 if (entry->buf[size - 1] != '\n') {
574 entry->buf[size] = '\n';
575 entry->buf[size + 1] = '\0';
577 entry->buf[size] = '\0';
579 __buffer_unlock_commit(buffer, event);
580 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
584 EXPORT_SYMBOL_GPL(__trace_puts);
587 * __trace_bputs - write the pointer to a constant string into trace buffer
588 * @ip: The address of the caller
589 * @str: The constant string to write to the buffer to
591 int __trace_bputs(unsigned long ip, const char *str)
593 struct ring_buffer_event *event;
594 struct ring_buffer *buffer;
595 struct bputs_entry *entry;
596 unsigned long irq_flags;
597 int size = sizeof(struct bputs_entry);
600 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
603 pc = preempt_count();
605 if (unlikely(tracing_selftest_running || tracing_disabled))
608 local_save_flags(irq_flags);
609 buffer = global_trace.trace_buffer.buffer;
610 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
615 entry = ring_buffer_event_data(event);
619 __buffer_unlock_commit(buffer, event);
620 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
624 EXPORT_SYMBOL_GPL(__trace_bputs);
626 #ifdef CONFIG_TRACER_SNAPSHOT
628 * trace_snapshot - take a snapshot of the current buffer.
630 * This causes a swap between the snapshot buffer and the current live
631 * tracing buffer. You can use this to take snapshots of the live
632 * trace when some condition is triggered, but continue to trace.
634 * Note, make sure to allocate the snapshot with either
635 * a tracing_snapshot_alloc(), or by doing it manually
636 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
638 * If the snapshot buffer is not allocated, it will stop tracing.
639 * Basically making a permanent snapshot.
641 void tracing_snapshot(void)
643 struct trace_array *tr = &global_trace;
644 struct tracer *tracer = tr->current_trace;
648 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
649 internal_trace_puts("*** snapshot is being ignored ***\n");
653 if (!tr->allocated_snapshot) {
654 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
655 internal_trace_puts("*** stopping trace here! ***\n");
660 /* Note, snapshot can not be used when the tracer uses it */
661 if (tracer->use_max_tr) {
662 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
663 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
667 local_irq_save(flags);
668 update_max_tr(tr, current, smp_processor_id());
669 local_irq_restore(flags);
671 EXPORT_SYMBOL_GPL(tracing_snapshot);
673 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
674 struct trace_buffer *size_buf, int cpu_id);
675 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
677 static int alloc_snapshot(struct trace_array *tr)
681 if (!tr->allocated_snapshot) {
683 /* allocate spare buffer */
684 ret = resize_buffer_duplicate_size(&tr->max_buffer,
685 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
689 tr->allocated_snapshot = true;
695 static void free_snapshot(struct trace_array *tr)
698 * We don't free the ring buffer. instead, resize it because
699 * The max_tr ring buffer has some state (e.g. ring->clock) and
700 * we want preserve it.
702 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
703 set_buffer_entries(&tr->max_buffer, 1);
704 tracing_reset_online_cpus(&tr->max_buffer);
705 tr->allocated_snapshot = false;
709 * tracing_alloc_snapshot - allocate snapshot buffer.
711 * This only allocates the snapshot buffer if it isn't already
712 * allocated - it doesn't also take a snapshot.
714 * This is meant to be used in cases where the snapshot buffer needs
715 * to be set up for events that can't sleep but need to be able to
716 * trigger a snapshot.
718 int tracing_alloc_snapshot(void)
720 struct trace_array *tr = &global_trace;
723 ret = alloc_snapshot(tr);
728 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
731 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
733 * This is similar to trace_snapshot(), but it will allocate the
734 * snapshot buffer if it isn't already allocated. Use this only
735 * where it is safe to sleep, as the allocation may sleep.
737 * This causes a swap between the snapshot buffer and the current live
738 * tracing buffer. You can use this to take snapshots of the live
739 * trace when some condition is triggered, but continue to trace.
741 void tracing_snapshot_alloc(void)
745 ret = tracing_alloc_snapshot();
751 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
753 void tracing_snapshot(void)
755 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
757 EXPORT_SYMBOL_GPL(tracing_snapshot);
758 int tracing_alloc_snapshot(void)
760 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
763 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
764 void tracing_snapshot_alloc(void)
769 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
770 #endif /* CONFIG_TRACER_SNAPSHOT */
772 static void tracer_tracing_off(struct trace_array *tr)
774 if (tr->trace_buffer.buffer)
775 ring_buffer_record_off(tr->trace_buffer.buffer);
777 * This flag is looked at when buffers haven't been allocated
778 * yet, or by some tracers (like irqsoff), that just want to
779 * know if the ring buffer has been disabled, but it can handle
780 * races of where it gets disabled but we still do a record.
781 * As the check is in the fast path of the tracers, it is more
782 * important to be fast than accurate.
784 tr->buffer_disabled = 1;
785 /* Make the flag seen by readers */
790 * tracing_off - turn off tracing buffers
792 * This function stops the tracing buffers from recording data.
793 * It does not disable any overhead the tracers themselves may
794 * be causing. This function simply causes all recording to
795 * the ring buffers to fail.
797 void tracing_off(void)
799 tracer_tracing_off(&global_trace);
801 EXPORT_SYMBOL_GPL(tracing_off);
803 void disable_trace_on_warning(void)
805 if (__disable_trace_on_warning)
810 * tracer_tracing_is_on - show real state of ring buffer enabled
811 * @tr : the trace array to know if ring buffer is enabled
813 * Shows real state of the ring buffer if it is enabled or not.
815 static int tracer_tracing_is_on(struct trace_array *tr)
817 if (tr->trace_buffer.buffer)
818 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
819 return !tr->buffer_disabled;
823 * tracing_is_on - show state of ring buffers enabled
825 int tracing_is_on(void)
827 return tracer_tracing_is_on(&global_trace);
829 EXPORT_SYMBOL_GPL(tracing_is_on);
831 static int __init set_buf_size(char *str)
833 unsigned long buf_size;
837 buf_size = memparse(str, &str);
838 /* nr_entries can not be zero */
841 trace_buf_size = buf_size;
844 __setup("trace_buf_size=", set_buf_size);
846 static int __init set_tracing_thresh(char *str)
848 unsigned long threshold;
853 ret = kstrtoul(str, 0, &threshold);
856 tracing_thresh = threshold * 1000;
859 __setup("tracing_thresh=", set_tracing_thresh);
861 unsigned long nsecs_to_usecs(unsigned long nsecs)
867 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
868 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
869 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
870 * of strings in the order that the enums were defined.
875 /* These must match the bit postions in trace_iterator_flags */
876 static const char *trace_options[] = {
884 int in_ns; /* is this clock in nanoseconds? */
886 { trace_clock_local, "local", 1 },
887 { trace_clock_global, "global", 1 },
888 { trace_clock_counter, "counter", 0 },
889 { trace_clock_jiffies, "uptime", 0 },
890 { trace_clock, "perf", 1 },
891 { ktime_get_mono_fast_ns, "mono", 1 },
892 { ktime_get_raw_fast_ns, "mono_raw", 1 },
897 * trace_parser_get_init - gets the buffer for trace parser
899 int trace_parser_get_init(struct trace_parser *parser, int size)
901 memset(parser, 0, sizeof(*parser));
903 parser->buffer = kmalloc(size, GFP_KERNEL);
912 * trace_parser_put - frees the buffer for trace parser
914 void trace_parser_put(struct trace_parser *parser)
916 kfree(parser->buffer);
920 * trace_get_user - reads the user input string separated by space
921 * (matched by isspace(ch))
923 * For each string found the 'struct trace_parser' is updated,
924 * and the function returns.
926 * Returns number of bytes read.
928 * See kernel/trace/trace.h for 'struct trace_parser' details.
930 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
931 size_t cnt, loff_t *ppos)
938 trace_parser_clear(parser);
940 ret = get_user(ch, ubuf++);
948 * The parser is not finished with the last write,
949 * continue reading the user input without skipping spaces.
952 /* skip white space */
953 while (cnt && isspace(ch)) {
954 ret = get_user(ch, ubuf++);
961 /* only spaces were written */
971 /* read the non-space input */
972 while (cnt && !isspace(ch)) {
973 if (parser->idx < parser->size - 1)
974 parser->buffer[parser->idx++] = ch;
979 ret = get_user(ch, ubuf++);
986 /* We either got finished input or we have to wait for another call. */
988 parser->buffer[parser->idx] = 0;
989 parser->cont = false;
990 } else if (parser->idx < parser->size - 1) {
992 parser->buffer[parser->idx++] = ch;
1005 /* TODO add a seq_buf_to_buffer() */
1006 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1010 if (trace_seq_used(s) <= s->seq.readpos)
1013 len = trace_seq_used(s) - s->seq.readpos;
1016 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1018 s->seq.readpos += cnt;
1022 unsigned long __read_mostly tracing_thresh;
1024 #ifdef CONFIG_TRACER_MAX_TRACE
1026 * Copy the new maximum trace into the separate maximum-trace
1027 * structure. (this way the maximum trace is permanently saved,
1028 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1031 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1033 struct trace_buffer *trace_buf = &tr->trace_buffer;
1034 struct trace_buffer *max_buf = &tr->max_buffer;
1035 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1036 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1039 max_buf->time_start = data->preempt_timestamp;
1041 max_data->saved_latency = tr->max_latency;
1042 max_data->critical_start = data->critical_start;
1043 max_data->critical_end = data->critical_end;
1045 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1046 max_data->pid = tsk->pid;
1048 * If tsk == current, then use current_uid(), as that does not use
1049 * RCU. The irq tracer can be called out of RCU scope.
1052 max_data->uid = current_uid();
1054 max_data->uid = task_uid(tsk);
1056 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1057 max_data->policy = tsk->policy;
1058 max_data->rt_priority = tsk->rt_priority;
1060 /* record this tasks comm */
1061 tracing_record_cmdline(tsk);
1065 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1067 * @tsk: the task with the latency
1068 * @cpu: The cpu that initiated the trace.
1070 * Flip the buffers between the @tr and the max_tr and record information
1071 * about which task was the cause of this latency.
1074 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1076 struct ring_buffer *buf;
1081 WARN_ON_ONCE(!irqs_disabled());
1083 if (!tr->allocated_snapshot) {
1084 /* Only the nop tracer should hit this when disabling */
1085 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1089 arch_spin_lock(&tr->max_lock);
1091 buf = tr->trace_buffer.buffer;
1092 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1093 tr->max_buffer.buffer = buf;
1095 __update_max_tr(tr, tsk, cpu);
1096 arch_spin_unlock(&tr->max_lock);
1100 * update_max_tr_single - only copy one trace over, and reset the rest
1102 * @tsk - task with the latency
1103 * @cpu - the cpu of the buffer to copy.
1105 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1108 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1115 WARN_ON_ONCE(!irqs_disabled());
1116 if (!tr->allocated_snapshot) {
1117 /* Only the nop tracer should hit this when disabling */
1118 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1122 arch_spin_lock(&tr->max_lock);
1124 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1126 if (ret == -EBUSY) {
1128 * We failed to swap the buffer due to a commit taking
1129 * place on this CPU. We fail to record, but we reset
1130 * the max trace buffer (no one writes directly to it)
1131 * and flag that it failed.
1133 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1134 "Failed to swap buffers due to commit in progress\n");
1137 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1139 __update_max_tr(tr, tsk, cpu);
1140 arch_spin_unlock(&tr->max_lock);
1142 #endif /* CONFIG_TRACER_MAX_TRACE */
1144 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1146 /* Iterators are static, they should be filled or empty */
1147 if (trace_buffer_iter(iter, iter->cpu_file))
1150 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1154 #ifdef CONFIG_FTRACE_STARTUP_TEST
1155 static int run_tracer_selftest(struct tracer *type)
1157 struct trace_array *tr = &global_trace;
1158 struct tracer *saved_tracer = tr->current_trace;
1161 if (!type->selftest || tracing_selftest_disabled)
1165 * Run a selftest on this tracer.
1166 * Here we reset the trace buffer, and set the current
1167 * tracer to be this tracer. The tracer can then run some
1168 * internal tracing to verify that everything is in order.
1169 * If we fail, we do not register this tracer.
1171 tracing_reset_online_cpus(&tr->trace_buffer);
1173 tr->current_trace = type;
1175 #ifdef CONFIG_TRACER_MAX_TRACE
1176 if (type->use_max_tr) {
1177 /* If we expanded the buffers, make sure the max is expanded too */
1178 if (ring_buffer_expanded)
1179 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1180 RING_BUFFER_ALL_CPUS);
1181 tr->allocated_snapshot = true;
1185 /* the test is responsible for initializing and enabling */
1186 pr_info("Testing tracer %s: ", type->name);
1187 ret = type->selftest(type, tr);
1188 /* the test is responsible for resetting too */
1189 tr->current_trace = saved_tracer;
1191 printk(KERN_CONT "FAILED!\n");
1192 /* Add the warning after printing 'FAILED' */
1196 /* Only reset on passing, to avoid touching corrupted buffers */
1197 tracing_reset_online_cpus(&tr->trace_buffer);
1199 #ifdef CONFIG_TRACER_MAX_TRACE
1200 if (type->use_max_tr) {
1201 tr->allocated_snapshot = false;
1203 /* Shrink the max buffer again */
1204 if (ring_buffer_expanded)
1205 ring_buffer_resize(tr->max_buffer.buffer, 1,
1206 RING_BUFFER_ALL_CPUS);
1210 printk(KERN_CONT "PASSED\n");
1214 static inline int run_tracer_selftest(struct tracer *type)
1218 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1220 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1222 static void __init apply_trace_boot_options(void);
1225 * register_tracer - register a tracer with the ftrace system.
1226 * @type - the plugin for the tracer
1228 * Register a new plugin tracer.
1230 int __init register_tracer(struct tracer *type)
1236 pr_info("Tracer must have a name\n");
1240 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1241 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1245 mutex_lock(&trace_types_lock);
1247 tracing_selftest_running = true;
1249 for (t = trace_types; t; t = t->next) {
1250 if (strcmp(type->name, t->name) == 0) {
1252 pr_info("Tracer %s already registered\n",
1259 if (!type->set_flag)
1260 type->set_flag = &dummy_set_flag;
1262 type->flags = &dummy_tracer_flags;
1264 if (!type->flags->opts)
1265 type->flags->opts = dummy_tracer_opt;
1267 ret = run_tracer_selftest(type);
1271 type->next = trace_types;
1273 add_tracer_options(&global_trace, type);
1276 tracing_selftest_running = false;
1277 mutex_unlock(&trace_types_lock);
1279 if (ret || !default_bootup_tracer)
1282 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1285 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1286 /* Do we want this tracer to start on bootup? */
1287 tracing_set_tracer(&global_trace, type->name);
1288 default_bootup_tracer = NULL;
1290 apply_trace_boot_options();
1292 /* disable other selftests, since this will break it. */
1293 tracing_selftest_disabled = true;
1294 #ifdef CONFIG_FTRACE_STARTUP_TEST
1295 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1303 void tracing_reset(struct trace_buffer *buf, int cpu)
1305 struct ring_buffer *buffer = buf->buffer;
1310 ring_buffer_record_disable(buffer);
1312 /* Make sure all commits have finished */
1313 synchronize_sched();
1314 ring_buffer_reset_cpu(buffer, cpu);
1316 ring_buffer_record_enable(buffer);
1319 void tracing_reset_online_cpus(struct trace_buffer *buf)
1321 struct ring_buffer *buffer = buf->buffer;
1327 ring_buffer_record_disable(buffer);
1329 /* Make sure all commits have finished */
1330 synchronize_sched();
1332 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1334 for_each_online_cpu(cpu)
1335 ring_buffer_reset_cpu(buffer, cpu);
1337 ring_buffer_record_enable(buffer);
1340 /* Must have trace_types_lock held */
1341 void tracing_reset_all_online_cpus(void)
1343 struct trace_array *tr;
1345 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1346 tracing_reset_online_cpus(&tr->trace_buffer);
1347 #ifdef CONFIG_TRACER_MAX_TRACE
1348 tracing_reset_online_cpus(&tr->max_buffer);
1353 #define SAVED_CMDLINES_DEFAULT 128
1354 #define NO_CMDLINE_MAP UINT_MAX
1355 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1356 struct saved_cmdlines_buffer {
1357 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1358 unsigned *map_cmdline_to_pid;
1359 unsigned cmdline_num;
1361 char *saved_cmdlines;
1363 static struct saved_cmdlines_buffer *savedcmd;
1365 /* temporary disable recording */
1366 static atomic_t trace_record_cmdline_disabled __read_mostly;
1368 static inline char *get_saved_cmdlines(int idx)
1370 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1373 static inline void set_cmdline(int idx, const char *cmdline)
1375 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1378 static int allocate_cmdlines_buffer(unsigned int val,
1379 struct saved_cmdlines_buffer *s)
1381 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1383 if (!s->map_cmdline_to_pid)
1386 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1387 if (!s->saved_cmdlines) {
1388 kfree(s->map_cmdline_to_pid);
1393 s->cmdline_num = val;
1394 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1395 sizeof(s->map_pid_to_cmdline));
1396 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1397 val * sizeof(*s->map_cmdline_to_pid));
1402 static int trace_create_savedcmd(void)
1406 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1410 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1420 int is_tracing_stopped(void)
1422 return global_trace.stop_count;
1426 * tracing_start - quick start of the tracer
1428 * If tracing is enabled but was stopped by tracing_stop,
1429 * this will start the tracer back up.
1431 void tracing_start(void)
1433 struct ring_buffer *buffer;
1434 unsigned long flags;
1436 if (tracing_disabled)
1439 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1440 if (--global_trace.stop_count) {
1441 if (global_trace.stop_count < 0) {
1442 /* Someone screwed up their debugging */
1444 global_trace.stop_count = 0;
1449 /* Prevent the buffers from switching */
1450 arch_spin_lock(&global_trace.max_lock);
1452 buffer = global_trace.trace_buffer.buffer;
1454 ring_buffer_record_enable(buffer);
1456 #ifdef CONFIG_TRACER_MAX_TRACE
1457 buffer = global_trace.max_buffer.buffer;
1459 ring_buffer_record_enable(buffer);
1462 arch_spin_unlock(&global_trace.max_lock);
1465 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1468 static void tracing_start_tr(struct trace_array *tr)
1470 struct ring_buffer *buffer;
1471 unsigned long flags;
1473 if (tracing_disabled)
1476 /* If global, we need to also start the max tracer */
1477 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1478 return tracing_start();
1480 raw_spin_lock_irqsave(&tr->start_lock, flags);
1482 if (--tr->stop_count) {
1483 if (tr->stop_count < 0) {
1484 /* Someone screwed up their debugging */
1491 buffer = tr->trace_buffer.buffer;
1493 ring_buffer_record_enable(buffer);
1496 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1500 * tracing_stop - quick stop of the tracer
1502 * Light weight way to stop tracing. Use in conjunction with
1505 void tracing_stop(void)
1507 struct ring_buffer *buffer;
1508 unsigned long flags;
1510 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1511 if (global_trace.stop_count++)
1514 /* Prevent the buffers from switching */
1515 arch_spin_lock(&global_trace.max_lock);
1517 buffer = global_trace.trace_buffer.buffer;
1519 ring_buffer_record_disable(buffer);
1521 #ifdef CONFIG_TRACER_MAX_TRACE
1522 buffer = global_trace.max_buffer.buffer;
1524 ring_buffer_record_disable(buffer);
1527 arch_spin_unlock(&global_trace.max_lock);
1530 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1533 static void tracing_stop_tr(struct trace_array *tr)
1535 struct ring_buffer *buffer;
1536 unsigned long flags;
1538 /* If global, we need to also stop the max tracer */
1539 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1540 return tracing_stop();
1542 raw_spin_lock_irqsave(&tr->start_lock, flags);
1543 if (tr->stop_count++)
1546 buffer = tr->trace_buffer.buffer;
1548 ring_buffer_record_disable(buffer);
1551 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1554 void trace_stop_cmdline_recording(void);
1556 static int trace_save_cmdline(struct task_struct *tsk)
1560 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1564 * It's not the end of the world if we don't get
1565 * the lock, but we also don't want to spin
1566 * nor do we want to disable interrupts,
1567 * so if we miss here, then better luck next time.
1569 if (!arch_spin_trylock(&trace_cmdline_lock))
1572 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1573 if (idx == NO_CMDLINE_MAP) {
1574 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1577 * Check whether the cmdline buffer at idx has a pid
1578 * mapped. We are going to overwrite that entry so we
1579 * need to clear the map_pid_to_cmdline. Otherwise we
1580 * would read the new comm for the old pid.
1582 pid = savedcmd->map_cmdline_to_pid[idx];
1583 if (pid != NO_CMDLINE_MAP)
1584 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1586 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1587 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1589 savedcmd->cmdline_idx = idx;
1592 set_cmdline(idx, tsk->comm);
1594 arch_spin_unlock(&trace_cmdline_lock);
1599 static void __trace_find_cmdline(int pid, char comm[])
1604 strcpy(comm, "<idle>");
1608 if (WARN_ON_ONCE(pid < 0)) {
1609 strcpy(comm, "<XXX>");
1613 if (pid > PID_MAX_DEFAULT) {
1614 strcpy(comm, "<...>");
1618 map = savedcmd->map_pid_to_cmdline[pid];
1619 if (map != NO_CMDLINE_MAP)
1620 strcpy(comm, get_saved_cmdlines(map));
1622 strcpy(comm, "<...>");
1625 void trace_find_cmdline(int pid, char comm[])
1628 arch_spin_lock(&trace_cmdline_lock);
1630 __trace_find_cmdline(pid, comm);
1632 arch_spin_unlock(&trace_cmdline_lock);
1636 void tracing_record_cmdline(struct task_struct *tsk)
1638 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1641 if (!__this_cpu_read(trace_cmdline_save))
1644 if (trace_save_cmdline(tsk))
1645 __this_cpu_write(trace_cmdline_save, false);
1649 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1652 struct task_struct *tsk = current;
1654 entry->preempt_count = pc & 0xff;
1655 entry->preempt_lazy_count = preempt_lazy_count();
1656 entry->pid = (tsk) ? tsk->pid : 0;
1658 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1659 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1661 TRACE_FLAG_IRQS_NOSUPPORT |
1663 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1664 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1665 (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
1666 (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
1667 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1669 entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
1671 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1673 struct ring_buffer_event *
1674 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1677 unsigned long flags, int pc)
1679 struct ring_buffer_event *event;
1681 event = ring_buffer_lock_reserve(buffer, len);
1682 if (event != NULL) {
1683 struct trace_entry *ent = ring_buffer_event_data(event);
1685 tracing_generic_entry_update(ent, flags, pc);
1693 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1695 __this_cpu_write(trace_cmdline_save, true);
1696 ring_buffer_unlock_commit(buffer, event);
1699 void trace_buffer_unlock_commit(struct trace_array *tr,
1700 struct ring_buffer *buffer,
1701 struct ring_buffer_event *event,
1702 unsigned long flags, int pc)
1704 __buffer_unlock_commit(buffer, event);
1706 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
1707 ftrace_trace_userstack(buffer, flags, pc);
1709 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1711 static struct ring_buffer *temp_buffer;
1713 struct ring_buffer_event *
1714 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1715 struct trace_event_file *trace_file,
1716 int type, unsigned long len,
1717 unsigned long flags, int pc)
1719 struct ring_buffer_event *entry;
1721 *current_rb = trace_file->tr->trace_buffer.buffer;
1722 entry = trace_buffer_lock_reserve(*current_rb,
1723 type, len, flags, pc);
1725 * If tracing is off, but we have triggers enabled
1726 * we still need to look at the event data. Use the temp_buffer
1727 * to store the trace event for the tigger to use. It's recusive
1728 * safe and will not be recorded anywhere.
1730 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1731 *current_rb = temp_buffer;
1732 entry = trace_buffer_lock_reserve(*current_rb,
1733 type, len, flags, pc);
1737 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1739 struct ring_buffer_event *
1740 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1741 int type, unsigned long len,
1742 unsigned long flags, int pc)
1744 *current_rb = global_trace.trace_buffer.buffer;
1745 return trace_buffer_lock_reserve(*current_rb,
1746 type, len, flags, pc);
1748 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1750 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1751 struct ring_buffer *buffer,
1752 struct ring_buffer_event *event,
1753 unsigned long flags, int pc,
1754 struct pt_regs *regs)
1756 __buffer_unlock_commit(buffer, event);
1758 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
1759 ftrace_trace_userstack(buffer, flags, pc);
1761 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1763 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1764 struct ring_buffer_event *event)
1766 ring_buffer_discard_commit(buffer, event);
1768 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1771 trace_function(struct trace_array *tr,
1772 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1775 struct trace_event_call *call = &event_function;
1776 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1777 struct ring_buffer_event *event;
1778 struct ftrace_entry *entry;
1780 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1784 entry = ring_buffer_event_data(event);
1786 entry->parent_ip = parent_ip;
1788 if (!call_filter_check_discard(call, entry, buffer, event))
1789 __buffer_unlock_commit(buffer, event);
1792 #ifdef CONFIG_STACKTRACE
1794 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1795 struct ftrace_stack {
1796 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1799 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1800 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1802 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1803 unsigned long flags,
1804 int skip, int pc, struct pt_regs *regs)
1806 struct trace_event_call *call = &event_kernel_stack;
1807 struct ring_buffer_event *event;
1808 struct stack_entry *entry;
1809 struct stack_trace trace;
1811 int size = FTRACE_STACK_ENTRIES;
1813 trace.nr_entries = 0;
1817 * Since events can happen in NMIs there's no safe way to
1818 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1819 * or NMI comes in, it will just have to use the default
1820 * FTRACE_STACK_SIZE.
1822 preempt_disable_notrace();
1824 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1826 * We don't need any atomic variables, just a barrier.
1827 * If an interrupt comes in, we don't care, because it would
1828 * have exited and put the counter back to what we want.
1829 * We just need a barrier to keep gcc from moving things
1833 if (use_stack == 1) {
1834 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1835 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1838 save_stack_trace_regs(regs, &trace);
1840 save_stack_trace(&trace);
1842 if (trace.nr_entries > size)
1843 size = trace.nr_entries;
1845 /* From now on, use_stack is a boolean */
1848 size *= sizeof(unsigned long);
1850 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1851 sizeof(*entry) + size, flags, pc);
1854 entry = ring_buffer_event_data(event);
1856 memset(&entry->caller, 0, size);
1859 memcpy(&entry->caller, trace.entries,
1860 trace.nr_entries * sizeof(unsigned long));
1862 trace.max_entries = FTRACE_STACK_ENTRIES;
1863 trace.entries = entry->caller;
1865 save_stack_trace_regs(regs, &trace);
1867 save_stack_trace(&trace);
1870 entry->size = trace.nr_entries;
1872 if (!call_filter_check_discard(call, entry, buffer, event))
1873 __buffer_unlock_commit(buffer, event);
1876 /* Again, don't let gcc optimize things here */
1878 __this_cpu_dec(ftrace_stack_reserve);
1879 preempt_enable_notrace();
1883 static inline void ftrace_trace_stack(struct trace_array *tr,
1884 struct ring_buffer *buffer,
1885 unsigned long flags,
1886 int skip, int pc, struct pt_regs *regs)
1888 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
1891 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1894 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1897 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1901 * trace_dump_stack - record a stack back trace in the trace buffer
1902 * @skip: Number of functions to skip (helper handlers)
1904 void trace_dump_stack(int skip)
1906 unsigned long flags;
1908 if (tracing_disabled || tracing_selftest_running)
1911 local_save_flags(flags);
1914 * Skip 3 more, seems to get us at the caller of
1918 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1919 flags, skip, preempt_count(), NULL);
1922 static DEFINE_PER_CPU(int, user_stack_count);
1925 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1927 struct trace_event_call *call = &event_user_stack;
1928 struct ring_buffer_event *event;
1929 struct userstack_entry *entry;
1930 struct stack_trace trace;
1932 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
1936 * NMIs can not handle page faults, even with fix ups.
1937 * The save user stack can (and often does) fault.
1939 if (unlikely(in_nmi()))
1943 * prevent recursion, since the user stack tracing may
1944 * trigger other kernel events.
1947 if (__this_cpu_read(user_stack_count))
1950 __this_cpu_inc(user_stack_count);
1952 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1953 sizeof(*entry), flags, pc);
1955 goto out_drop_count;
1956 entry = ring_buffer_event_data(event);
1958 entry->tgid = current->tgid;
1959 memset(&entry->caller, 0, sizeof(entry->caller));
1961 trace.nr_entries = 0;
1962 trace.max_entries = FTRACE_STACK_ENTRIES;
1964 trace.entries = entry->caller;
1966 save_stack_trace_user(&trace);
1967 if (!call_filter_check_discard(call, entry, buffer, event))
1968 __buffer_unlock_commit(buffer, event);
1971 __this_cpu_dec(user_stack_count);
1977 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1979 ftrace_trace_userstack(tr, flags, preempt_count());
1983 #endif /* CONFIG_STACKTRACE */
1985 /* created for use with alloc_percpu */
1986 struct trace_buffer_struct {
1987 char buffer[TRACE_BUF_SIZE];
1990 static struct trace_buffer_struct *trace_percpu_buffer;
1991 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1992 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1993 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1996 * The buffer used is dependent on the context. There is a per cpu
1997 * buffer for normal context, softirq contex, hard irq context and
1998 * for NMI context. Thise allows for lockless recording.
2000 * Note, if the buffers failed to be allocated, then this returns NULL
2002 static char *get_trace_buf(void)
2004 struct trace_buffer_struct *percpu_buffer;
2007 * If we have allocated per cpu buffers, then we do not
2008 * need to do any locking.
2011 percpu_buffer = trace_percpu_nmi_buffer;
2013 percpu_buffer = trace_percpu_irq_buffer;
2014 else if (in_softirq())
2015 percpu_buffer = trace_percpu_sirq_buffer;
2017 percpu_buffer = trace_percpu_buffer;
2022 return this_cpu_ptr(&percpu_buffer->buffer[0]);
2025 static int alloc_percpu_trace_buffer(void)
2027 struct trace_buffer_struct *buffers;
2028 struct trace_buffer_struct *sirq_buffers;
2029 struct trace_buffer_struct *irq_buffers;
2030 struct trace_buffer_struct *nmi_buffers;
2032 buffers = alloc_percpu(struct trace_buffer_struct);
2036 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2040 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2044 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2048 trace_percpu_buffer = buffers;
2049 trace_percpu_sirq_buffer = sirq_buffers;
2050 trace_percpu_irq_buffer = irq_buffers;
2051 trace_percpu_nmi_buffer = nmi_buffers;
2056 free_percpu(irq_buffers);
2058 free_percpu(sirq_buffers);
2060 free_percpu(buffers);
2062 WARN(1, "Could not allocate percpu trace_printk buffer");
2066 static int buffers_allocated;
2068 void trace_printk_init_buffers(void)
2070 if (buffers_allocated)
2073 if (alloc_percpu_trace_buffer())
2076 /* trace_printk() is for debug use only. Don't use it in production. */
2079 pr_warning("**********************************************************\n");
2080 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2081 pr_warning("** **\n");
2082 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2083 pr_warning("** **\n");
2084 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2085 pr_warning("** unsafe for production use. **\n");
2086 pr_warning("** **\n");
2087 pr_warning("** If you see this message and you are not debugging **\n");
2088 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2089 pr_warning("** **\n");
2090 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2091 pr_warning("**********************************************************\n");
2093 /* Expand the buffers to set size */
2094 tracing_update_buffers();
2096 buffers_allocated = 1;
2099 * trace_printk_init_buffers() can be called by modules.
2100 * If that happens, then we need to start cmdline recording
2101 * directly here. If the global_trace.buffer is already
2102 * allocated here, then this was called by module code.
2104 if (global_trace.trace_buffer.buffer)
2105 tracing_start_cmdline_record();
2108 void trace_printk_start_comm(void)
2110 /* Start tracing comms if trace printk is set */
2111 if (!buffers_allocated)
2113 tracing_start_cmdline_record();
2116 static void trace_printk_start_stop_comm(int enabled)
2118 if (!buffers_allocated)
2122 tracing_start_cmdline_record();
2124 tracing_stop_cmdline_record();
2128 * trace_vbprintk - write binary msg to tracing buffer
2131 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2133 struct trace_event_call *call = &event_bprint;
2134 struct ring_buffer_event *event;
2135 struct ring_buffer *buffer;
2136 struct trace_array *tr = &global_trace;
2137 struct bprint_entry *entry;
2138 unsigned long flags;
2140 int len = 0, size, pc;
2142 if (unlikely(tracing_selftest_running || tracing_disabled))
2145 /* Don't pollute graph traces with trace_vprintk internals */
2146 pause_graph_tracing();
2148 pc = preempt_count();
2149 preempt_disable_notrace();
2151 tbuffer = get_trace_buf();
2157 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2159 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2162 local_save_flags(flags);
2163 size = sizeof(*entry) + sizeof(u32) * len;
2164 buffer = tr->trace_buffer.buffer;
2165 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2169 entry = ring_buffer_event_data(event);
2173 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2174 if (!call_filter_check_discard(call, entry, buffer, event)) {
2175 __buffer_unlock_commit(buffer, event);
2176 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2180 preempt_enable_notrace();
2181 unpause_graph_tracing();
2185 EXPORT_SYMBOL_GPL(trace_vbprintk);
2188 __trace_array_vprintk(struct ring_buffer *buffer,
2189 unsigned long ip, const char *fmt, va_list args)
2191 struct trace_event_call *call = &event_print;
2192 struct ring_buffer_event *event;
2193 int len = 0, size, pc;
2194 struct print_entry *entry;
2195 unsigned long flags;
2198 if (tracing_disabled || tracing_selftest_running)
2201 /* Don't pollute graph traces with trace_vprintk internals */
2202 pause_graph_tracing();
2204 pc = preempt_count();
2205 preempt_disable_notrace();
2208 tbuffer = get_trace_buf();
2214 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2216 local_save_flags(flags);
2217 size = sizeof(*entry) + len + 1;
2218 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2222 entry = ring_buffer_event_data(event);
2225 memcpy(&entry->buf, tbuffer, len + 1);
2226 if (!call_filter_check_discard(call, entry, buffer, event)) {
2227 __buffer_unlock_commit(buffer, event);
2228 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2231 preempt_enable_notrace();
2232 unpause_graph_tracing();
2237 int trace_array_vprintk(struct trace_array *tr,
2238 unsigned long ip, const char *fmt, va_list args)
2240 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2243 int trace_array_printk(struct trace_array *tr,
2244 unsigned long ip, const char *fmt, ...)
2249 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2253 ret = trace_array_vprintk(tr, ip, fmt, ap);
2258 int trace_array_printk_buf(struct ring_buffer *buffer,
2259 unsigned long ip, const char *fmt, ...)
2264 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2268 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2273 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2275 return trace_array_vprintk(&global_trace, ip, fmt, args);
2277 EXPORT_SYMBOL_GPL(trace_vprintk);
2279 static void trace_iterator_increment(struct trace_iterator *iter)
2281 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2285 ring_buffer_read(buf_iter, NULL);
2288 static struct trace_entry *
2289 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2290 unsigned long *lost_events)
2292 struct ring_buffer_event *event;
2293 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2296 event = ring_buffer_iter_peek(buf_iter, ts);
2298 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2302 iter->ent_size = ring_buffer_event_length(event);
2303 return ring_buffer_event_data(event);
2309 static struct trace_entry *
2310 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2311 unsigned long *missing_events, u64 *ent_ts)
2313 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2314 struct trace_entry *ent, *next = NULL;
2315 unsigned long lost_events = 0, next_lost = 0;
2316 int cpu_file = iter->cpu_file;
2317 u64 next_ts = 0, ts;
2323 * If we are in a per_cpu trace file, don't bother by iterating over
2324 * all cpu and peek directly.
2326 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2327 if (ring_buffer_empty_cpu(buffer, cpu_file))
2329 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2331 *ent_cpu = cpu_file;
2336 for_each_tracing_cpu(cpu) {
2338 if (ring_buffer_empty_cpu(buffer, cpu))
2341 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2344 * Pick the entry with the smallest timestamp:
2346 if (ent && (!next || ts < next_ts)) {
2350 next_lost = lost_events;
2351 next_size = iter->ent_size;
2355 iter->ent_size = next_size;
2358 *ent_cpu = next_cpu;
2364 *missing_events = next_lost;
2369 /* Find the next real entry, without updating the iterator itself */
2370 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2371 int *ent_cpu, u64 *ent_ts)
2373 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2376 /* Find the next real entry, and increment the iterator to the next entry */
2377 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2379 iter->ent = __find_next_entry(iter, &iter->cpu,
2380 &iter->lost_events, &iter->ts);
2383 trace_iterator_increment(iter);
2385 return iter->ent ? iter : NULL;
2388 static void trace_consume(struct trace_iterator *iter)
2390 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2391 &iter->lost_events);
2394 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2396 struct trace_iterator *iter = m->private;
2400 WARN_ON_ONCE(iter->leftover);
2404 /* can't go backwards */
2409 ent = trace_find_next_entry_inc(iter);
2413 while (ent && iter->idx < i)
2414 ent = trace_find_next_entry_inc(iter);
2421 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2423 struct ring_buffer_event *event;
2424 struct ring_buffer_iter *buf_iter;
2425 unsigned long entries = 0;
2428 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2430 buf_iter = trace_buffer_iter(iter, cpu);
2434 ring_buffer_iter_reset(buf_iter);
2437 * We could have the case with the max latency tracers
2438 * that a reset never took place on a cpu. This is evident
2439 * by the timestamp being before the start of the buffer.
2441 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2442 if (ts >= iter->trace_buffer->time_start)
2445 ring_buffer_read(buf_iter, NULL);
2448 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2452 * The current tracer is copied to avoid a global locking
2455 static void *s_start(struct seq_file *m, loff_t *pos)
2457 struct trace_iterator *iter = m->private;
2458 struct trace_array *tr = iter->tr;
2459 int cpu_file = iter->cpu_file;
2465 * copy the tracer to avoid using a global lock all around.
2466 * iter->trace is a copy of current_trace, the pointer to the
2467 * name may be used instead of a strcmp(), as iter->trace->name
2468 * will point to the same string as current_trace->name.
2470 mutex_lock(&trace_types_lock);
2471 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2472 *iter->trace = *tr->current_trace;
2473 mutex_unlock(&trace_types_lock);
2475 #ifdef CONFIG_TRACER_MAX_TRACE
2476 if (iter->snapshot && iter->trace->use_max_tr)
2477 return ERR_PTR(-EBUSY);
2480 if (!iter->snapshot)
2481 atomic_inc(&trace_record_cmdline_disabled);
2483 if (*pos != iter->pos) {
2488 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2489 for_each_tracing_cpu(cpu)
2490 tracing_iter_reset(iter, cpu);
2492 tracing_iter_reset(iter, cpu_file);
2495 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2500 * If we overflowed the seq_file before, then we want
2501 * to just reuse the trace_seq buffer again.
2507 p = s_next(m, p, &l);
2511 trace_event_read_lock();
2512 trace_access_lock(cpu_file);
2516 static void s_stop(struct seq_file *m, void *p)
2518 struct trace_iterator *iter = m->private;
2520 #ifdef CONFIG_TRACER_MAX_TRACE
2521 if (iter->snapshot && iter->trace->use_max_tr)
2525 if (!iter->snapshot)
2526 atomic_dec(&trace_record_cmdline_disabled);
2528 trace_access_unlock(iter->cpu_file);
2529 trace_event_read_unlock();
2533 get_total_entries(struct trace_buffer *buf,
2534 unsigned long *total, unsigned long *entries)
2536 unsigned long count;
2542 for_each_tracing_cpu(cpu) {
2543 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2545 * If this buffer has skipped entries, then we hold all
2546 * entries for the trace and we need to ignore the
2547 * ones before the time stamp.
2549 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2550 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2551 /* total is the same as the entries */
2555 ring_buffer_overrun_cpu(buf->buffer, cpu);
2560 static void print_lat_help_header(struct seq_file *m)
2562 seq_puts(m, "# _--------=> CPU# \n"
2563 "# / _-------=> irqs-off \n"
2564 "# | / _------=> need-resched \n"
2565 "# || / _-----=> need-resched_lazy \n"
2566 "# ||| / _----=> hardirq/softirq \n"
2567 "# |||| / _---=> preempt-depth \n"
2568 "# ||||| / _--=> preempt-lazy-depth\n"
2569 "# |||||| / _-=> migrate-disable \n"
2570 "# ||||||| / delay \n"
2571 "# cmd pid |||||||| time | caller \n"
2572 "# \\ / |||||||| \\ | / \n");
2575 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2577 unsigned long total;
2578 unsigned long entries;
2580 get_total_entries(buf, &total, &entries);
2581 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2582 entries, total, num_online_cpus());
2586 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2588 print_event_info(buf, m);
2589 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2593 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2595 print_event_info(buf, m);
2596 seq_puts(m, "# _-----=> irqs-off\n"
2597 "# / _----=> need-resched\n"
2598 "# |/ _-----=> need-resched_lazy\n"
2599 "# || / _---=> hardirq/softirq\n"
2600 "# ||| / _--=> preempt-depth\n"
2601 "# |||| / _-=> preempt-lazy-depth\n"
2602 "# ||||| / _-=> migrate-disable \n"
2603 "# |||||| / delay\n"
2604 "# TASK-PID CPU# ||||||| TIMESTAMP FUNCTION\n"
2605 "# | | | ||||||| | |\n");
2609 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2611 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2612 struct trace_buffer *buf = iter->trace_buffer;
2613 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2614 struct tracer *type = iter->trace;
2615 unsigned long entries;
2616 unsigned long total;
2617 const char *name = "preemption";
2621 get_total_entries(buf, &total, &entries);
2623 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2625 seq_puts(m, "# -----------------------------------"
2626 "---------------------------------\n");
2627 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2628 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2629 nsecs_to_usecs(data->saved_latency),
2633 #if defined(CONFIG_PREEMPT_NONE)
2635 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2637 #elif defined(CONFIG_PREEMPT)
2642 /* These are reserved for later use */
2645 seq_printf(m, " #P:%d)\n", num_online_cpus());
2649 seq_puts(m, "# -----------------\n");
2650 seq_printf(m, "# | task: %.16s-%d "
2651 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2652 data->comm, data->pid,
2653 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2654 data->policy, data->rt_priority);
2655 seq_puts(m, "# -----------------\n");
2657 if (data->critical_start) {
2658 seq_puts(m, "# => started at: ");
2659 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2660 trace_print_seq(m, &iter->seq);
2661 seq_puts(m, "\n# => ended at: ");
2662 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2663 trace_print_seq(m, &iter->seq);
2664 seq_puts(m, "\n#\n");
2670 static void test_cpu_buff_start(struct trace_iterator *iter)
2672 struct trace_seq *s = &iter->seq;
2673 struct trace_array *tr = iter->tr;
2675 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
2678 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2681 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
2684 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2688 cpumask_set_cpu(iter->cpu, iter->started);
2690 /* Don't print started cpu buffer for the first entry of the trace */
2692 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2696 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2698 struct trace_array *tr = iter->tr;
2699 struct trace_seq *s = &iter->seq;
2700 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
2701 struct trace_entry *entry;
2702 struct trace_event *event;
2706 test_cpu_buff_start(iter);
2708 event = ftrace_find_event(entry->type);
2710 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2711 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2712 trace_print_lat_context(iter);
2714 trace_print_context(iter);
2717 if (trace_seq_has_overflowed(s))
2718 return TRACE_TYPE_PARTIAL_LINE;
2721 return event->funcs->trace(iter, sym_flags, event);
2723 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2725 return trace_handle_return(s);
2728 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2730 struct trace_array *tr = iter->tr;
2731 struct trace_seq *s = &iter->seq;
2732 struct trace_entry *entry;
2733 struct trace_event *event;
2737 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
2738 trace_seq_printf(s, "%d %d %llu ",
2739 entry->pid, iter->cpu, iter->ts);
2741 if (trace_seq_has_overflowed(s))
2742 return TRACE_TYPE_PARTIAL_LINE;
2744 event = ftrace_find_event(entry->type);
2746 return event->funcs->raw(iter, 0, event);
2748 trace_seq_printf(s, "%d ?\n", entry->type);
2750 return trace_handle_return(s);
2753 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2755 struct trace_array *tr = iter->tr;
2756 struct trace_seq *s = &iter->seq;
2757 unsigned char newline = '\n';
2758 struct trace_entry *entry;
2759 struct trace_event *event;
2763 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2764 SEQ_PUT_HEX_FIELD(s, entry->pid);
2765 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2766 SEQ_PUT_HEX_FIELD(s, iter->ts);
2767 if (trace_seq_has_overflowed(s))
2768 return TRACE_TYPE_PARTIAL_LINE;
2771 event = ftrace_find_event(entry->type);
2773 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2774 if (ret != TRACE_TYPE_HANDLED)
2778 SEQ_PUT_FIELD(s, newline);
2780 return trace_handle_return(s);
2783 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2785 struct trace_array *tr = iter->tr;
2786 struct trace_seq *s = &iter->seq;
2787 struct trace_entry *entry;
2788 struct trace_event *event;
2792 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2793 SEQ_PUT_FIELD(s, entry->pid);
2794 SEQ_PUT_FIELD(s, iter->cpu);
2795 SEQ_PUT_FIELD(s, iter->ts);
2796 if (trace_seq_has_overflowed(s))
2797 return TRACE_TYPE_PARTIAL_LINE;
2800 event = ftrace_find_event(entry->type);
2801 return event ? event->funcs->binary(iter, 0, event) :
2805 int trace_empty(struct trace_iterator *iter)
2807 struct ring_buffer_iter *buf_iter;
2810 /* If we are looking at one CPU buffer, only check that one */
2811 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2812 cpu = iter->cpu_file;
2813 buf_iter = trace_buffer_iter(iter, cpu);
2815 if (!ring_buffer_iter_empty(buf_iter))
2818 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2824 for_each_tracing_cpu(cpu) {
2825 buf_iter = trace_buffer_iter(iter, cpu);
2827 if (!ring_buffer_iter_empty(buf_iter))
2830 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2838 /* Called with trace_event_read_lock() held. */
2839 enum print_line_t print_trace_line(struct trace_iterator *iter)
2841 struct trace_array *tr = iter->tr;
2842 unsigned long trace_flags = tr->trace_flags;
2843 enum print_line_t ret;
2845 if (iter->lost_events) {
2846 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2847 iter->cpu, iter->lost_events);
2848 if (trace_seq_has_overflowed(&iter->seq))
2849 return TRACE_TYPE_PARTIAL_LINE;
2852 if (iter->trace && iter->trace->print_line) {
2853 ret = iter->trace->print_line(iter);
2854 if (ret != TRACE_TYPE_UNHANDLED)
2858 if (iter->ent->type == TRACE_BPUTS &&
2859 trace_flags & TRACE_ITER_PRINTK &&
2860 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2861 return trace_print_bputs_msg_only(iter);
2863 if (iter->ent->type == TRACE_BPRINT &&
2864 trace_flags & TRACE_ITER_PRINTK &&
2865 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2866 return trace_print_bprintk_msg_only(iter);
2868 if (iter->ent->type == TRACE_PRINT &&
2869 trace_flags & TRACE_ITER_PRINTK &&
2870 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2871 return trace_print_printk_msg_only(iter);
2873 if (trace_flags & TRACE_ITER_BIN)
2874 return print_bin_fmt(iter);
2876 if (trace_flags & TRACE_ITER_HEX)
2877 return print_hex_fmt(iter);
2879 if (trace_flags & TRACE_ITER_RAW)
2880 return print_raw_fmt(iter);
2882 return print_trace_fmt(iter);
2885 void trace_latency_header(struct seq_file *m)
2887 struct trace_iterator *iter = m->private;
2888 struct trace_array *tr = iter->tr;
2890 /* print nothing if the buffers are empty */
2891 if (trace_empty(iter))
2894 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2895 print_trace_header(m, iter);
2897 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
2898 print_lat_help_header(m);
2901 void trace_default_header(struct seq_file *m)
2903 struct trace_iterator *iter = m->private;
2904 struct trace_array *tr = iter->tr;
2905 unsigned long trace_flags = tr->trace_flags;
2907 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2910 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2911 /* print nothing if the buffers are empty */
2912 if (trace_empty(iter))
2914 print_trace_header(m, iter);
2915 if (!(trace_flags & TRACE_ITER_VERBOSE))
2916 print_lat_help_header(m);
2918 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2919 if (trace_flags & TRACE_ITER_IRQ_INFO)
2920 print_func_help_header_irq(iter->trace_buffer, m);
2922 print_func_help_header(iter->trace_buffer, m);
2927 static void test_ftrace_alive(struct seq_file *m)
2929 if (!ftrace_is_dead())
2931 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2932 "# MAY BE MISSING FUNCTION EVENTS\n");
2935 #ifdef CONFIG_TRACER_MAX_TRACE
2936 static void show_snapshot_main_help(struct seq_file *m)
2938 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2939 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2940 "# Takes a snapshot of the main buffer.\n"
2941 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2942 "# (Doesn't have to be '2' works with any number that\n"
2943 "# is not a '0' or '1')\n");
2946 static void show_snapshot_percpu_help(struct seq_file *m)
2948 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2949 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2950 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2951 "# Takes a snapshot of the main buffer for this cpu.\n");
2953 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2954 "# Must use main snapshot file to allocate.\n");
2956 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2957 "# (Doesn't have to be '2' works with any number that\n"
2958 "# is not a '0' or '1')\n");
2961 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2963 if (iter->tr->allocated_snapshot)
2964 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2966 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2968 seq_puts(m, "# Snapshot commands:\n");
2969 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2970 show_snapshot_main_help(m);
2972 show_snapshot_percpu_help(m);
2975 /* Should never be called */
2976 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2979 static int s_show(struct seq_file *m, void *v)
2981 struct trace_iterator *iter = v;
2984 if (iter->ent == NULL) {
2986 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2988 test_ftrace_alive(m);
2990 if (iter->snapshot && trace_empty(iter))
2991 print_snapshot_help(m, iter);
2992 else if (iter->trace && iter->trace->print_header)
2993 iter->trace->print_header(m);
2995 trace_default_header(m);
2997 } else if (iter->leftover) {
2999 * If we filled the seq_file buffer earlier, we
3000 * want to just show it now.
3002 ret = trace_print_seq(m, &iter->seq);
3004 /* ret should this time be zero, but you never know */
3005 iter->leftover = ret;
3008 print_trace_line(iter);
3009 ret = trace_print_seq(m, &iter->seq);
3011 * If we overflow the seq_file buffer, then it will
3012 * ask us for this data again at start up.
3014 * ret is 0 if seq_file write succeeded.
3017 iter->leftover = ret;
3024 * Should be used after trace_array_get(), trace_types_lock
3025 * ensures that i_cdev was already initialized.
3027 static inline int tracing_get_cpu(struct inode *inode)
3029 if (inode->i_cdev) /* See trace_create_cpu_file() */
3030 return (long)inode->i_cdev - 1;
3031 return RING_BUFFER_ALL_CPUS;
3034 static const struct seq_operations tracer_seq_ops = {
3041 static struct trace_iterator *
3042 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3044 struct trace_array *tr = inode->i_private;
3045 struct trace_iterator *iter;
3048 if (tracing_disabled)
3049 return ERR_PTR(-ENODEV);
3051 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3053 return ERR_PTR(-ENOMEM);
3055 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3057 if (!iter->buffer_iter)
3061 * We make a copy of the current tracer to avoid concurrent
3062 * changes on it while we are reading.
3064 mutex_lock(&trace_types_lock);
3065 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3069 *iter->trace = *tr->current_trace;
3071 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3076 #ifdef CONFIG_TRACER_MAX_TRACE
3077 /* Currently only the top directory has a snapshot */
3078 if (tr->current_trace->print_max || snapshot)
3079 iter->trace_buffer = &tr->max_buffer;
3082 iter->trace_buffer = &tr->trace_buffer;
3083 iter->snapshot = snapshot;
3085 iter->cpu_file = tracing_get_cpu(inode);
3086 mutex_init(&iter->mutex);
3088 /* Notify the tracer early; before we stop tracing. */
3089 if (iter->trace && iter->trace->open)
3090 iter->trace->open(iter);
3092 /* Annotate start of buffers if we had overruns */
3093 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3094 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3096 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3097 if (trace_clocks[tr->clock_id].in_ns)
3098 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3100 /* stop the trace while dumping if we are not opening "snapshot" */
3101 if (!iter->snapshot)
3102 tracing_stop_tr(tr);
3104 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3105 for_each_tracing_cpu(cpu) {
3106 iter->buffer_iter[cpu] =
3107 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3109 ring_buffer_read_prepare_sync();
3110 for_each_tracing_cpu(cpu) {
3111 ring_buffer_read_start(iter->buffer_iter[cpu]);
3112 tracing_iter_reset(iter, cpu);
3115 cpu = iter->cpu_file;
3116 iter->buffer_iter[cpu] =
3117 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3118 ring_buffer_read_prepare_sync();
3119 ring_buffer_read_start(iter->buffer_iter[cpu]);
3120 tracing_iter_reset(iter, cpu);
3123 mutex_unlock(&trace_types_lock);
3128 mutex_unlock(&trace_types_lock);
3130 kfree(iter->buffer_iter);
3132 seq_release_private(inode, file);
3133 return ERR_PTR(-ENOMEM);
3136 int tracing_open_generic(struct inode *inode, struct file *filp)
3138 if (tracing_disabled)
3141 filp->private_data = inode->i_private;
3145 bool tracing_is_disabled(void)
3147 return (tracing_disabled) ? true: false;
3151 * Open and update trace_array ref count.
3152 * Must have the current trace_array passed to it.
3154 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3156 struct trace_array *tr = inode->i_private;
3158 if (tracing_disabled)
3161 if (trace_array_get(tr) < 0)
3164 filp->private_data = inode->i_private;
3169 static int tracing_release(struct inode *inode, struct file *file)
3171 struct trace_array *tr = inode->i_private;
3172 struct seq_file *m = file->private_data;
3173 struct trace_iterator *iter;
3176 if (!(file->f_mode & FMODE_READ)) {
3177 trace_array_put(tr);
3181 /* Writes do not use seq_file */
3183 mutex_lock(&trace_types_lock);
3185 for_each_tracing_cpu(cpu) {
3186 if (iter->buffer_iter[cpu])
3187 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3190 if (iter->trace && iter->trace->close)
3191 iter->trace->close(iter);
3193 if (!iter->snapshot)
3194 /* reenable tracing if it was previously enabled */
3195 tracing_start_tr(tr);
3197 __trace_array_put(tr);
3199 mutex_unlock(&trace_types_lock);
3201 mutex_destroy(&iter->mutex);
3202 free_cpumask_var(iter->started);
3204 kfree(iter->buffer_iter);
3205 seq_release_private(inode, file);
3210 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3212 struct trace_array *tr = inode->i_private;
3214 trace_array_put(tr);
3218 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3220 struct trace_array *tr = inode->i_private;
3222 trace_array_put(tr);
3224 return single_release(inode, file);
3227 static int tracing_open(struct inode *inode, struct file *file)
3229 struct trace_array *tr = inode->i_private;
3230 struct trace_iterator *iter;
3233 if (trace_array_get(tr) < 0)
3236 /* If this file was open for write, then erase contents */
3237 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3238 int cpu = tracing_get_cpu(inode);
3240 if (cpu == RING_BUFFER_ALL_CPUS)
3241 tracing_reset_online_cpus(&tr->trace_buffer);
3243 tracing_reset(&tr->trace_buffer, cpu);
3246 if (file->f_mode & FMODE_READ) {
3247 iter = __tracing_open(inode, file, false);
3249 ret = PTR_ERR(iter);
3250 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3251 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3255 trace_array_put(tr);
3261 * Some tracers are not suitable for instance buffers.
3262 * A tracer is always available for the global array (toplevel)
3263 * or if it explicitly states that it is.
3266 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3268 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3271 /* Find the next tracer that this trace array may use */
3272 static struct tracer *
3273 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3275 while (t && !trace_ok_for_array(t, tr))
3282 t_next(struct seq_file *m, void *v, loff_t *pos)
3284 struct trace_array *tr = m->private;
3285 struct tracer *t = v;
3290 t = get_tracer_for_array(tr, t->next);
3295 static void *t_start(struct seq_file *m, loff_t *pos)
3297 struct trace_array *tr = m->private;
3301 mutex_lock(&trace_types_lock);
3303 t = get_tracer_for_array(tr, trace_types);
3304 for (; t && l < *pos; t = t_next(m, t, &l))
3310 static void t_stop(struct seq_file *m, void *p)
3312 mutex_unlock(&trace_types_lock);
3315 static int t_show(struct seq_file *m, void *v)
3317 struct tracer *t = v;
3322 seq_puts(m, t->name);
3331 static const struct seq_operations show_traces_seq_ops = {
3338 static int show_traces_open(struct inode *inode, struct file *file)
3340 struct trace_array *tr = inode->i_private;
3344 if (tracing_disabled)
3347 ret = seq_open(file, &show_traces_seq_ops);
3351 m = file->private_data;
3358 tracing_write_stub(struct file *filp, const char __user *ubuf,
3359 size_t count, loff_t *ppos)
3364 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3368 if (file->f_mode & FMODE_READ)
3369 ret = seq_lseek(file, offset, whence);
3371 file->f_pos = ret = 0;
3376 static const struct file_operations tracing_fops = {
3377 .open = tracing_open,
3379 .write = tracing_write_stub,
3380 .llseek = tracing_lseek,
3381 .release = tracing_release,
3384 static const struct file_operations show_traces_fops = {
3385 .open = show_traces_open,
3387 .release = seq_release,
3388 .llseek = seq_lseek,
3392 * The tracer itself will not take this lock, but still we want
3393 * to provide a consistent cpumask to user-space:
3395 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3398 * Temporary storage for the character representation of the
3399 * CPU bitmask (and one more byte for the newline):
3401 static char mask_str[NR_CPUS + 1];
3404 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3405 size_t count, loff_t *ppos)
3407 struct trace_array *tr = file_inode(filp)->i_private;
3410 mutex_lock(&tracing_cpumask_update_lock);
3412 len = snprintf(mask_str, count, "%*pb\n",
3413 cpumask_pr_args(tr->tracing_cpumask));
3418 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3421 mutex_unlock(&tracing_cpumask_update_lock);
3427 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3428 size_t count, loff_t *ppos)
3430 struct trace_array *tr = file_inode(filp)->i_private;
3431 cpumask_var_t tracing_cpumask_new;
3434 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3437 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3441 mutex_lock(&tracing_cpumask_update_lock);
3443 local_irq_disable();
3444 arch_spin_lock(&tr->max_lock);
3445 for_each_tracing_cpu(cpu) {
3447 * Increase/decrease the disabled counter if we are
3448 * about to flip a bit in the cpumask:
3450 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3451 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3452 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3453 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3455 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3456 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3457 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3458 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3461 arch_spin_unlock(&tr->max_lock);
3464 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3466 mutex_unlock(&tracing_cpumask_update_lock);
3467 free_cpumask_var(tracing_cpumask_new);
3472 free_cpumask_var(tracing_cpumask_new);
3477 static const struct file_operations tracing_cpumask_fops = {
3478 .open = tracing_open_generic_tr,
3479 .read = tracing_cpumask_read,
3480 .write = tracing_cpumask_write,
3481 .release = tracing_release_generic_tr,
3482 .llseek = generic_file_llseek,
3485 static int tracing_trace_options_show(struct seq_file *m, void *v)
3487 struct tracer_opt *trace_opts;
3488 struct trace_array *tr = m->private;
3492 mutex_lock(&trace_types_lock);
3493 tracer_flags = tr->current_trace->flags->val;
3494 trace_opts = tr->current_trace->flags->opts;
3496 for (i = 0; trace_options[i]; i++) {
3497 if (tr->trace_flags & (1 << i))
3498 seq_printf(m, "%s\n", trace_options[i]);
3500 seq_printf(m, "no%s\n", trace_options[i]);
3503 for (i = 0; trace_opts[i].name; i++) {
3504 if (tracer_flags & trace_opts[i].bit)
3505 seq_printf(m, "%s\n", trace_opts[i].name);
3507 seq_printf(m, "no%s\n", trace_opts[i].name);
3509 mutex_unlock(&trace_types_lock);
3514 static int __set_tracer_option(struct trace_array *tr,
3515 struct tracer_flags *tracer_flags,
3516 struct tracer_opt *opts, int neg)
3518 struct tracer *trace = tr->current_trace;
3521 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3526 tracer_flags->val &= ~opts->bit;
3528 tracer_flags->val |= opts->bit;
3532 /* Try to assign a tracer specific option */
3533 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3535 struct tracer *trace = tr->current_trace;
3536 struct tracer_flags *tracer_flags = trace->flags;
3537 struct tracer_opt *opts = NULL;
3540 for (i = 0; tracer_flags->opts[i].name; i++) {
3541 opts = &tracer_flags->opts[i];
3543 if (strcmp(cmp, opts->name) == 0)
3544 return __set_tracer_option(tr, trace->flags, opts, neg);
3550 /* Some tracers require overwrite to stay enabled */
3551 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3553 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3559 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3561 /* do nothing if flag is already set */
3562 if (!!(tr->trace_flags & mask) == !!enabled)
3565 /* Give the tracer a chance to approve the change */
3566 if (tr->current_trace->flag_changed)
3567 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3571 tr->trace_flags |= mask;
3573 tr->trace_flags &= ~mask;
3575 if (mask == TRACE_ITER_RECORD_CMD)
3576 trace_event_enable_cmd_record(enabled);
3578 if (mask == TRACE_ITER_OVERWRITE) {
3579 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3580 #ifdef CONFIG_TRACER_MAX_TRACE
3581 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3585 if (mask == TRACE_ITER_PRINTK) {
3586 trace_printk_start_stop_comm(enabled);
3587 trace_printk_control(enabled);
3593 static int trace_set_options(struct trace_array *tr, char *option)
3599 size_t orig_len = strlen(option);
3601 cmp = strstrip(option);
3603 if (strncmp(cmp, "no", 2) == 0) {
3608 mutex_lock(&trace_types_lock);
3610 for (i = 0; trace_options[i]; i++) {
3611 if (strcmp(cmp, trace_options[i]) == 0) {
3612 ret = set_tracer_flag(tr, 1 << i, !neg);
3617 /* If no option could be set, test the specific tracer options */
3618 if (!trace_options[i])
3619 ret = set_tracer_option(tr, cmp, neg);
3621 mutex_unlock(&trace_types_lock);
3624 * If the first trailing whitespace is replaced with '\0' by strstrip,
3625 * turn it back into a space.
3627 if (orig_len > strlen(option))
3628 option[strlen(option)] = ' ';
3633 static void __init apply_trace_boot_options(void)
3635 char *buf = trace_boot_options_buf;
3639 option = strsep(&buf, ",");
3645 trace_set_options(&global_trace, option);
3647 /* Put back the comma to allow this to be called again */
3654 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3655 size_t cnt, loff_t *ppos)
3657 struct seq_file *m = filp->private_data;
3658 struct trace_array *tr = m->private;
3662 if (cnt >= sizeof(buf))
3665 if (copy_from_user(&buf, ubuf, cnt))
3670 ret = trace_set_options(tr, buf);
3679 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3681 struct trace_array *tr = inode->i_private;
3684 if (tracing_disabled)
3687 if (trace_array_get(tr) < 0)
3690 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3692 trace_array_put(tr);
3697 static const struct file_operations tracing_iter_fops = {
3698 .open = tracing_trace_options_open,
3700 .llseek = seq_lseek,
3701 .release = tracing_single_release_tr,
3702 .write = tracing_trace_options_write,
3705 static const char readme_msg[] =
3706 "tracing mini-HOWTO:\n\n"
3707 "# echo 0 > tracing_on : quick way to disable tracing\n"
3708 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3709 " Important files:\n"
3710 " trace\t\t\t- The static contents of the buffer\n"
3711 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3712 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3713 " current_tracer\t- function and latency tracers\n"
3714 " available_tracers\t- list of configured tracers for current_tracer\n"
3715 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3716 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3717 " trace_clock\t\t-change the clock used to order events\n"
3718 " local: Per cpu clock but may not be synced across CPUs\n"
3719 " global: Synced across CPUs but slows tracing down.\n"
3720 " counter: Not a clock, but just an increment\n"
3721 " uptime: Jiffy counter from time of boot\n"
3722 " perf: Same clock that perf events use\n"
3723 #ifdef CONFIG_X86_64
3724 " x86-tsc: TSC cycle counter\n"
3726 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3727 " tracing_cpumask\t- Limit which CPUs to trace\n"
3728 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3729 "\t\t\t Remove sub-buffer with rmdir\n"
3730 " trace_options\t\t- Set format or modify how tracing happens\n"
3731 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3732 "\t\t\t option name\n"
3733 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3734 #ifdef CONFIG_DYNAMIC_FTRACE
3735 "\n available_filter_functions - list of functions that can be filtered on\n"
3736 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3737 "\t\t\t functions\n"
3738 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3739 "\t modules: Can select a group via module\n"
3740 "\t Format: :mod:<module-name>\n"
3741 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3742 "\t triggers: a command to perform when function is hit\n"
3743 "\t Format: <function>:<trigger>[:count]\n"
3744 "\t trigger: traceon, traceoff\n"
3745 "\t\t enable_event:<system>:<event>\n"
3746 "\t\t disable_event:<system>:<event>\n"
3747 #ifdef CONFIG_STACKTRACE
3750 #ifdef CONFIG_TRACER_SNAPSHOT
3755 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3756 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3757 "\t The first one will disable tracing every time do_fault is hit\n"
3758 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3759 "\t The first time do trap is hit and it disables tracing, the\n"
3760 "\t counter will decrement to 2. If tracing is already disabled,\n"
3761 "\t the counter will not decrement. It only decrements when the\n"
3762 "\t trigger did work\n"
3763 "\t To remove trigger without count:\n"
3764 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3765 "\t To remove trigger with a count:\n"
3766 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3767 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3768 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3769 "\t modules: Can select a group via module command :mod:\n"
3770 "\t Does not accept triggers\n"
3771 #endif /* CONFIG_DYNAMIC_FTRACE */
3772 #ifdef CONFIG_FUNCTION_TRACER
3773 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3776 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3777 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3778 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3779 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3781 #ifdef CONFIG_TRACER_SNAPSHOT
3782 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3783 "\t\t\t snapshot buffer. Read the contents for more\n"
3784 "\t\t\t information\n"
3786 #ifdef CONFIG_STACK_TRACER
3787 " stack_trace\t\t- Shows the max stack trace when active\n"
3788 " stack_max_size\t- Shows current max stack size that was traced\n"
3789 "\t\t\t Write into this file to reset the max size (trigger a\n"
3790 "\t\t\t new trace)\n"
3791 #ifdef CONFIG_DYNAMIC_FTRACE
3792 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3795 #endif /* CONFIG_STACK_TRACER */
3796 " events/\t\t- Directory containing all trace event subsystems:\n"
3797 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3798 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3799 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3801 " filter\t\t- If set, only events passing filter are traced\n"
3802 " events/<system>/<event>/\t- Directory containing control files for\n"
3804 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3805 " filter\t\t- If set, only events passing filter are traced\n"
3806 " trigger\t\t- If set, a command to perform when event is hit\n"
3807 "\t Format: <trigger>[:count][if <filter>]\n"
3808 "\t trigger: traceon, traceoff\n"
3809 "\t enable_event:<system>:<event>\n"
3810 "\t disable_event:<system>:<event>\n"
3811 #ifdef CONFIG_STACKTRACE
3814 #ifdef CONFIG_TRACER_SNAPSHOT
3817 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3818 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3819 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3820 "\t events/block/block_unplug/trigger\n"
3821 "\t The first disables tracing every time block_unplug is hit.\n"
3822 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3823 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3824 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3825 "\t Like function triggers, the counter is only decremented if it\n"
3826 "\t enabled or disabled tracing.\n"
3827 "\t To remove a trigger without a count:\n"
3828 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3829 "\t To remove a trigger with a count:\n"
3830 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3831 "\t Filters can be ignored when removing a trigger.\n"
3835 tracing_readme_read(struct file *filp, char __user *ubuf,
3836 size_t cnt, loff_t *ppos)
3838 return simple_read_from_buffer(ubuf, cnt, ppos,
3839 readme_msg, strlen(readme_msg));
3842 static const struct file_operations tracing_readme_fops = {
3843 .open = tracing_open_generic,
3844 .read = tracing_readme_read,
3845 .llseek = generic_file_llseek,
3848 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3850 unsigned int *ptr = v;
3852 if (*pos || m->count)
3857 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3859 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3868 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3874 arch_spin_lock(&trace_cmdline_lock);
3876 v = &savedcmd->map_cmdline_to_pid[0];
3878 v = saved_cmdlines_next(m, v, &l);
3886 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3888 arch_spin_unlock(&trace_cmdline_lock);
3892 static int saved_cmdlines_show(struct seq_file *m, void *v)
3894 char buf[TASK_COMM_LEN];
3895 unsigned int *pid = v;
3897 __trace_find_cmdline(*pid, buf);
3898 seq_printf(m, "%d %s\n", *pid, buf);
3902 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3903 .start = saved_cmdlines_start,
3904 .next = saved_cmdlines_next,
3905 .stop = saved_cmdlines_stop,
3906 .show = saved_cmdlines_show,
3909 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3911 if (tracing_disabled)
3914 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3917 static const struct file_operations tracing_saved_cmdlines_fops = {
3918 .open = tracing_saved_cmdlines_open,
3920 .llseek = seq_lseek,
3921 .release = seq_release,
3925 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3926 size_t cnt, loff_t *ppos)
3931 arch_spin_lock(&trace_cmdline_lock);
3932 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3933 arch_spin_unlock(&trace_cmdline_lock);
3935 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3938 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3940 kfree(s->saved_cmdlines);
3941 kfree(s->map_cmdline_to_pid);
3945 static int tracing_resize_saved_cmdlines(unsigned int val)
3947 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3949 s = kmalloc(sizeof(*s), GFP_KERNEL);
3953 if (allocate_cmdlines_buffer(val, s) < 0) {
3958 arch_spin_lock(&trace_cmdline_lock);
3959 savedcmd_temp = savedcmd;
3961 arch_spin_unlock(&trace_cmdline_lock);
3962 free_saved_cmdlines_buffer(savedcmd_temp);
3968 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3969 size_t cnt, loff_t *ppos)
3974 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3978 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3979 if (!val || val > PID_MAX_DEFAULT)
3982 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3991 static const struct file_operations tracing_saved_cmdlines_size_fops = {
3992 .open = tracing_open_generic,
3993 .read = tracing_saved_cmdlines_size_read,
3994 .write = tracing_saved_cmdlines_size_write,
3997 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
3998 static union trace_enum_map_item *
3999 update_enum_map(union trace_enum_map_item *ptr)
4001 if (!ptr->map.enum_string) {
4002 if (ptr->tail.next) {
4003 ptr = ptr->tail.next;
4004 /* Set ptr to the next real item (skip head) */
4012 static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4014 union trace_enum_map_item *ptr = v;
4017 * Paranoid! If ptr points to end, we don't want to increment past it.
4018 * This really should never happen.
4020 ptr = update_enum_map(ptr);
4021 if (WARN_ON_ONCE(!ptr))
4028 ptr = update_enum_map(ptr);
4033 static void *enum_map_start(struct seq_file *m, loff_t *pos)
4035 union trace_enum_map_item *v;
4038 mutex_lock(&trace_enum_mutex);
4040 v = trace_enum_maps;
4044 while (v && l < *pos) {
4045 v = enum_map_next(m, v, &l);
4051 static void enum_map_stop(struct seq_file *m, void *v)
4053 mutex_unlock(&trace_enum_mutex);
4056 static int enum_map_show(struct seq_file *m, void *v)
4058 union trace_enum_map_item *ptr = v;
4060 seq_printf(m, "%s %ld (%s)\n",
4061 ptr->map.enum_string, ptr->map.enum_value,
4067 static const struct seq_operations tracing_enum_map_seq_ops = {
4068 .start = enum_map_start,
4069 .next = enum_map_next,
4070 .stop = enum_map_stop,
4071 .show = enum_map_show,
4074 static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4076 if (tracing_disabled)
4079 return seq_open(filp, &tracing_enum_map_seq_ops);
4082 static const struct file_operations tracing_enum_map_fops = {
4083 .open = tracing_enum_map_open,
4085 .llseek = seq_lseek,
4086 .release = seq_release,
4089 static inline union trace_enum_map_item *
4090 trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4092 /* Return tail of array given the head */
4093 return ptr + ptr->head.length + 1;
4097 trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4100 struct trace_enum_map **stop;
4101 struct trace_enum_map **map;
4102 union trace_enum_map_item *map_array;
4103 union trace_enum_map_item *ptr;
4108 * The trace_enum_maps contains the map plus a head and tail item,
4109 * where the head holds the module and length of array, and the
4110 * tail holds a pointer to the next list.
4112 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4114 pr_warning("Unable to allocate trace enum mapping\n");
4118 mutex_lock(&trace_enum_mutex);
4120 if (!trace_enum_maps)
4121 trace_enum_maps = map_array;
4123 ptr = trace_enum_maps;
4125 ptr = trace_enum_jmp_to_tail(ptr);
4126 if (!ptr->tail.next)
4128 ptr = ptr->tail.next;
4131 ptr->tail.next = map_array;
4133 map_array->head.mod = mod;
4134 map_array->head.length = len;
4137 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4138 map_array->map = **map;
4141 memset(map_array, 0, sizeof(*map_array));
4143 mutex_unlock(&trace_enum_mutex);
4146 static void trace_create_enum_file(struct dentry *d_tracer)
4148 trace_create_file("enum_map", 0444, d_tracer,
4149 NULL, &tracing_enum_map_fops);
4152 #else /* CONFIG_TRACE_ENUM_MAP_FILE */
4153 static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4154 static inline void trace_insert_enum_map_file(struct module *mod,
4155 struct trace_enum_map **start, int len) { }
4156 #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4158 static void trace_insert_enum_map(struct module *mod,
4159 struct trace_enum_map **start, int len)
4161 struct trace_enum_map **map;
4168 trace_event_enum_update(map, len);
4170 trace_insert_enum_map_file(mod, start, len);
4174 tracing_set_trace_read(struct file *filp, char __user *ubuf,
4175 size_t cnt, loff_t *ppos)
4177 struct trace_array *tr = filp->private_data;
4178 char buf[MAX_TRACER_SIZE+2];
4181 mutex_lock(&trace_types_lock);
4182 r = sprintf(buf, "%s\n", tr->current_trace->name);
4183 mutex_unlock(&trace_types_lock);
4185 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4188 int tracer_init(struct tracer *t, struct trace_array *tr)
4190 tracing_reset_online_cpus(&tr->trace_buffer);
4194 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4198 for_each_tracing_cpu(cpu)
4199 per_cpu_ptr(buf->data, cpu)->entries = val;
4202 #ifdef CONFIG_TRACER_MAX_TRACE
4203 /* resize @tr's buffer to the size of @size_tr's entries */
4204 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4205 struct trace_buffer *size_buf, int cpu_id)
4209 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4210 for_each_tracing_cpu(cpu) {
4211 ret = ring_buffer_resize(trace_buf->buffer,
4212 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4215 per_cpu_ptr(trace_buf->data, cpu)->entries =
4216 per_cpu_ptr(size_buf->data, cpu)->entries;
4219 ret = ring_buffer_resize(trace_buf->buffer,
4220 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4222 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4223 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4228 #endif /* CONFIG_TRACER_MAX_TRACE */
4230 static int __tracing_resize_ring_buffer(struct trace_array *tr,
4231 unsigned long size, int cpu)
4236 * If kernel or user changes the size of the ring buffer
4237 * we use the size that was given, and we can forget about
4238 * expanding it later.
4240 ring_buffer_expanded = true;
4242 /* May be called before buffers are initialized */
4243 if (!tr->trace_buffer.buffer)
4246 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4250 #ifdef CONFIG_TRACER_MAX_TRACE
4251 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4252 !tr->current_trace->use_max_tr)
4255 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4257 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4258 &tr->trace_buffer, cpu);
4261 * AARGH! We are left with different
4262 * size max buffer!!!!
4263 * The max buffer is our "snapshot" buffer.
4264 * When a tracer needs a snapshot (one of the
4265 * latency tracers), it swaps the max buffer
4266 * with the saved snap shot. We succeeded to
4267 * update the size of the main buffer, but failed to
4268 * update the size of the max buffer. But when we tried
4269 * to reset the main buffer to the original size, we
4270 * failed there too. This is very unlikely to
4271 * happen, but if it does, warn and kill all
4275 tracing_disabled = 1;
4280 if (cpu == RING_BUFFER_ALL_CPUS)
4281 set_buffer_entries(&tr->max_buffer, size);
4283 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4286 #endif /* CONFIG_TRACER_MAX_TRACE */
4288 if (cpu == RING_BUFFER_ALL_CPUS)
4289 set_buffer_entries(&tr->trace_buffer, size);
4291 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4296 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4297 unsigned long size, int cpu_id)
4301 mutex_lock(&trace_types_lock);
4303 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4304 /* make sure, this cpu is enabled in the mask */
4305 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4311 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4316 mutex_unlock(&trace_types_lock);
4323 * tracing_update_buffers - used by tracing facility to expand ring buffers
4325 * To save on memory when the tracing is never used on a system with it
4326 * configured in. The ring buffers are set to a minimum size. But once
4327 * a user starts to use the tracing facility, then they need to grow
4328 * to their default size.
4330 * This function is to be called when a tracer is about to be used.
4332 int tracing_update_buffers(void)
4336 mutex_lock(&trace_types_lock);
4337 if (!ring_buffer_expanded)
4338 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4339 RING_BUFFER_ALL_CPUS);
4340 mutex_unlock(&trace_types_lock);
4345 struct trace_option_dentry;
4348 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4351 * Used to clear out the tracer before deletion of an instance.
4352 * Must have trace_types_lock held.
4354 static void tracing_set_nop(struct trace_array *tr)
4356 if (tr->current_trace == &nop_trace)
4359 tr->current_trace->enabled--;
4361 if (tr->current_trace->reset)
4362 tr->current_trace->reset(tr);
4364 tr->current_trace = &nop_trace;
4367 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4369 /* Only enable if the directory has been created already. */
4373 create_trace_option_files(tr, t);
4376 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4379 #ifdef CONFIG_TRACER_MAX_TRACE
4384 mutex_lock(&trace_types_lock);
4386 if (!ring_buffer_expanded) {
4387 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4388 RING_BUFFER_ALL_CPUS);
4394 for (t = trace_types; t; t = t->next) {
4395 if (strcmp(t->name, buf) == 0)
4402 if (t == tr->current_trace)
4405 /* Some tracers are only allowed for the top level buffer */
4406 if (!trace_ok_for_array(t, tr)) {
4411 /* If trace pipe files are being read, we can't change the tracer */
4412 if (tr->current_trace->ref) {
4417 trace_branch_disable();
4419 tr->current_trace->enabled--;
4421 if (tr->current_trace->reset)
4422 tr->current_trace->reset(tr);
4424 /* Current trace needs to be nop_trace before synchronize_sched */
4425 tr->current_trace = &nop_trace;
4427 #ifdef CONFIG_TRACER_MAX_TRACE
4428 had_max_tr = tr->allocated_snapshot;
4430 if (had_max_tr && !t->use_max_tr) {
4432 * We need to make sure that the update_max_tr sees that
4433 * current_trace changed to nop_trace to keep it from
4434 * swapping the buffers after we resize it.
4435 * The update_max_tr is called from interrupts disabled
4436 * so a synchronized_sched() is sufficient.
4438 synchronize_sched();
4443 #ifdef CONFIG_TRACER_MAX_TRACE
4444 if (t->use_max_tr && !had_max_tr) {
4445 ret = alloc_snapshot(tr);
4452 ret = tracer_init(t, tr);
4457 tr->current_trace = t;
4458 tr->current_trace->enabled++;
4459 trace_branch_enable(tr);
4461 mutex_unlock(&trace_types_lock);
4467 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4468 size_t cnt, loff_t *ppos)
4470 struct trace_array *tr = filp->private_data;
4471 char buf[MAX_TRACER_SIZE+1];
4478 if (cnt > MAX_TRACER_SIZE)
4479 cnt = MAX_TRACER_SIZE;
4481 if (copy_from_user(&buf, ubuf, cnt))
4486 /* strip ending whitespace. */
4487 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4490 err = tracing_set_tracer(tr, buf);
4500 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4501 size_t cnt, loff_t *ppos)
4506 r = snprintf(buf, sizeof(buf), "%ld\n",
4507 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4508 if (r > sizeof(buf))
4510 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4514 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4515 size_t cnt, loff_t *ppos)
4520 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4530 tracing_thresh_read(struct file *filp, char __user *ubuf,
4531 size_t cnt, loff_t *ppos)
4533 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4537 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4538 size_t cnt, loff_t *ppos)
4540 struct trace_array *tr = filp->private_data;
4543 mutex_lock(&trace_types_lock);
4544 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4548 if (tr->current_trace->update_thresh) {
4549 ret = tr->current_trace->update_thresh(tr);
4556 mutex_unlock(&trace_types_lock);
4561 #ifdef CONFIG_TRACER_MAX_TRACE
4564 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4565 size_t cnt, loff_t *ppos)
4567 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4571 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4572 size_t cnt, loff_t *ppos)
4574 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4579 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4581 struct trace_array *tr = inode->i_private;
4582 struct trace_iterator *iter;
4585 if (tracing_disabled)
4588 if (trace_array_get(tr) < 0)
4591 mutex_lock(&trace_types_lock);
4593 /* create a buffer to store the information to pass to userspace */
4594 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4597 __trace_array_put(tr);
4601 trace_seq_init(&iter->seq);
4602 iter->trace = tr->current_trace;
4604 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4609 /* trace pipe does not show start of buffer */
4610 cpumask_setall(iter->started);
4612 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4613 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4615 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4616 if (trace_clocks[tr->clock_id].in_ns)
4617 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4620 iter->trace_buffer = &tr->trace_buffer;
4621 iter->cpu_file = tracing_get_cpu(inode);
4622 mutex_init(&iter->mutex);
4623 filp->private_data = iter;
4625 if (iter->trace->pipe_open)
4626 iter->trace->pipe_open(iter);
4628 nonseekable_open(inode, filp);
4630 tr->current_trace->ref++;
4632 mutex_unlock(&trace_types_lock);
4638 __trace_array_put(tr);
4639 mutex_unlock(&trace_types_lock);
4643 static int tracing_release_pipe(struct inode *inode, struct file *file)
4645 struct trace_iterator *iter = file->private_data;
4646 struct trace_array *tr = inode->i_private;
4648 mutex_lock(&trace_types_lock);
4650 tr->current_trace->ref--;
4652 if (iter->trace->pipe_close)
4653 iter->trace->pipe_close(iter);
4655 mutex_unlock(&trace_types_lock);
4657 free_cpumask_var(iter->started);
4658 mutex_destroy(&iter->mutex);
4661 trace_array_put(tr);
4667 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4669 struct trace_array *tr = iter->tr;
4671 /* Iterators are static, they should be filled or empty */
4672 if (trace_buffer_iter(iter, iter->cpu_file))
4673 return POLLIN | POLLRDNORM;
4675 if (tr->trace_flags & TRACE_ITER_BLOCK)
4677 * Always select as readable when in blocking mode
4679 return POLLIN | POLLRDNORM;
4681 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4686 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4688 struct trace_iterator *iter = filp->private_data;
4690 return trace_poll(iter, filp, poll_table);
4693 /* Must be called with iter->mutex held. */
4694 static int tracing_wait_pipe(struct file *filp)
4696 struct trace_iterator *iter = filp->private_data;
4699 while (trace_empty(iter)) {
4701 if ((filp->f_flags & O_NONBLOCK)) {
4706 * We block until we read something and tracing is disabled.
4707 * We still block if tracing is disabled, but we have never
4708 * read anything. This allows a user to cat this file, and
4709 * then enable tracing. But after we have read something,
4710 * we give an EOF when tracing is again disabled.
4712 * iter->pos will be 0 if we haven't read anything.
4714 if (!tracing_is_on() && iter->pos)
4717 mutex_unlock(&iter->mutex);
4719 ret = wait_on_pipe(iter, false);
4721 mutex_lock(&iter->mutex);
4734 tracing_read_pipe(struct file *filp, char __user *ubuf,
4735 size_t cnt, loff_t *ppos)
4737 struct trace_iterator *iter = filp->private_data;
4741 * Avoid more than one consumer on a single file descriptor
4742 * This is just a matter of traces coherency, the ring buffer itself
4745 mutex_lock(&iter->mutex);
4747 /* return any leftover data */
4748 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4752 trace_seq_init(&iter->seq);
4754 if (iter->trace->read) {
4755 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4761 sret = tracing_wait_pipe(filp);
4765 /* stop when tracing is finished */
4766 if (trace_empty(iter)) {
4771 if (cnt >= PAGE_SIZE)
4772 cnt = PAGE_SIZE - 1;
4774 /* reset all but tr, trace, and overruns */
4775 memset(&iter->seq, 0,
4776 sizeof(struct trace_iterator) -
4777 offsetof(struct trace_iterator, seq));
4778 cpumask_clear(iter->started);
4781 trace_event_read_lock();
4782 trace_access_lock(iter->cpu_file);
4783 while (trace_find_next_entry_inc(iter) != NULL) {
4784 enum print_line_t ret;
4785 int save_len = iter->seq.seq.len;
4787 ret = print_trace_line(iter);
4788 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4789 /* don't print partial lines */
4790 iter->seq.seq.len = save_len;
4793 if (ret != TRACE_TYPE_NO_CONSUME)
4794 trace_consume(iter);
4796 if (trace_seq_used(&iter->seq) >= cnt)
4800 * Setting the full flag means we reached the trace_seq buffer
4801 * size and we should leave by partial output condition above.
4802 * One of the trace_seq_* functions is not used properly.
4804 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4807 trace_access_unlock(iter->cpu_file);
4808 trace_event_read_unlock();
4810 /* Now copy what we have to the user */
4811 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4812 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4813 trace_seq_init(&iter->seq);
4816 * If there was nothing to send to user, in spite of consuming trace
4817 * entries, go back to wait for more entries.
4823 mutex_unlock(&iter->mutex);
4828 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4831 __free_page(spd->pages[idx]);
4834 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4836 .confirm = generic_pipe_buf_confirm,
4837 .release = generic_pipe_buf_release,
4838 .steal = generic_pipe_buf_steal,
4839 .get = generic_pipe_buf_get,
4843 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4849 /* Seq buffer is page-sized, exactly what we need. */
4851 save_len = iter->seq.seq.len;
4852 ret = print_trace_line(iter);
4854 if (trace_seq_has_overflowed(&iter->seq)) {
4855 iter->seq.seq.len = save_len;
4860 * This should not be hit, because it should only
4861 * be set if the iter->seq overflowed. But check it
4862 * anyway to be safe.
4864 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4865 iter->seq.seq.len = save_len;
4869 count = trace_seq_used(&iter->seq) - save_len;
4872 iter->seq.seq.len = save_len;
4876 if (ret != TRACE_TYPE_NO_CONSUME)
4877 trace_consume(iter);
4879 if (!trace_find_next_entry_inc(iter)) {
4889 static ssize_t tracing_splice_read_pipe(struct file *filp,
4891 struct pipe_inode_info *pipe,
4895 struct page *pages_def[PIPE_DEF_BUFFERS];
4896 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4897 struct trace_iterator *iter = filp->private_data;
4898 struct splice_pipe_desc spd = {
4900 .partial = partial_def,
4901 .nr_pages = 0, /* This gets updated below. */
4902 .nr_pages_max = PIPE_DEF_BUFFERS,
4904 .ops = &tracing_pipe_buf_ops,
4905 .spd_release = tracing_spd_release_pipe,
4911 if (splice_grow_spd(pipe, &spd))
4914 mutex_lock(&iter->mutex);
4916 if (iter->trace->splice_read) {
4917 ret = iter->trace->splice_read(iter, filp,
4918 ppos, pipe, len, flags);
4923 ret = tracing_wait_pipe(filp);
4927 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4932 trace_event_read_lock();
4933 trace_access_lock(iter->cpu_file);
4935 /* Fill as many pages as possible. */
4936 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4937 spd.pages[i] = alloc_page(GFP_KERNEL);
4941 rem = tracing_fill_pipe_page(rem, iter);
4943 /* Copy the data into the page, so we can start over. */
4944 ret = trace_seq_to_buffer(&iter->seq,
4945 page_address(spd.pages[i]),
4946 trace_seq_used(&iter->seq));
4948 __free_page(spd.pages[i]);
4951 spd.partial[i].offset = 0;
4952 spd.partial[i].len = trace_seq_used(&iter->seq);
4954 trace_seq_init(&iter->seq);
4957 trace_access_unlock(iter->cpu_file);
4958 trace_event_read_unlock();
4959 mutex_unlock(&iter->mutex);
4964 ret = splice_to_pipe(pipe, &spd);
4968 splice_shrink_spd(&spd);
4972 mutex_unlock(&iter->mutex);
4977 tracing_entries_read(struct file *filp, char __user *ubuf,
4978 size_t cnt, loff_t *ppos)
4980 struct inode *inode = file_inode(filp);
4981 struct trace_array *tr = inode->i_private;
4982 int cpu = tracing_get_cpu(inode);
4987 mutex_lock(&trace_types_lock);
4989 if (cpu == RING_BUFFER_ALL_CPUS) {
4990 int cpu, buf_size_same;
4995 /* check if all cpu sizes are same */
4996 for_each_tracing_cpu(cpu) {
4997 /* fill in the size from first enabled cpu */
4999 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5000 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5006 if (buf_size_same) {
5007 if (!ring_buffer_expanded)
5008 r = sprintf(buf, "%lu (expanded: %lu)\n",
5010 trace_buf_size >> 10);
5012 r = sprintf(buf, "%lu\n", size >> 10);
5014 r = sprintf(buf, "X\n");
5016 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5018 mutex_unlock(&trace_types_lock);
5020 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5025 tracing_entries_write(struct file *filp, const char __user *ubuf,
5026 size_t cnt, loff_t *ppos)
5028 struct inode *inode = file_inode(filp);
5029 struct trace_array *tr = inode->i_private;
5033 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5037 /* must have at least 1 entry */
5041 /* value is in KB */
5043 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5053 tracing_total_entries_read(struct file *filp, char __user *ubuf,
5054 size_t cnt, loff_t *ppos)
5056 struct trace_array *tr = filp->private_data;
5059 unsigned long size = 0, expanded_size = 0;
5061 mutex_lock(&trace_types_lock);
5062 for_each_tracing_cpu(cpu) {
5063 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5064 if (!ring_buffer_expanded)
5065 expanded_size += trace_buf_size >> 10;
5067 if (ring_buffer_expanded)
5068 r = sprintf(buf, "%lu\n", size);
5070 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5071 mutex_unlock(&trace_types_lock);
5073 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5077 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5078 size_t cnt, loff_t *ppos)
5081 * There is no need to read what the user has written, this function
5082 * is just to make sure that there is no error when "echo" is used
5091 tracing_free_buffer_release(struct inode *inode, struct file *filp)
5093 struct trace_array *tr = inode->i_private;
5095 /* disable tracing ? */
5096 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5097 tracer_tracing_off(tr);
5098 /* resize the ring buffer to 0 */
5099 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5101 trace_array_put(tr);
5107 tracing_mark_write(struct file *filp, const char __user *ubuf,
5108 size_t cnt, loff_t *fpos)
5110 unsigned long addr = (unsigned long)ubuf;
5111 struct trace_array *tr = filp->private_data;
5112 struct ring_buffer_event *event;
5113 struct ring_buffer *buffer;
5114 struct print_entry *entry;
5115 unsigned long irq_flags;
5116 struct page *pages[2];
5126 if (tracing_disabled)
5129 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5132 if (cnt > TRACE_BUF_SIZE)
5133 cnt = TRACE_BUF_SIZE;
5136 * Userspace is injecting traces into the kernel trace buffer.
5137 * We want to be as non intrusive as possible.
5138 * To do so, we do not want to allocate any special buffers
5139 * or take any locks, but instead write the userspace data
5140 * straight into the ring buffer.
5142 * First we need to pin the userspace buffer into memory,
5143 * which, most likely it is, because it just referenced it.
5144 * But there's no guarantee that it is. By using get_user_pages_fast()
5145 * and kmap_atomic/kunmap_atomic() we can get access to the
5146 * pages directly. We then write the data directly into the
5149 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5151 /* check if we cross pages */
5152 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5155 offset = addr & (PAGE_SIZE - 1);
5158 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5159 if (ret < nr_pages) {
5161 put_page(pages[ret]);
5166 for (i = 0; i < nr_pages; i++)
5167 map_page[i] = kmap_atomic(pages[i]);
5169 local_save_flags(irq_flags);
5170 size = sizeof(*entry) + cnt + 2; /* possible \n added */
5171 buffer = tr->trace_buffer.buffer;
5172 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5173 irq_flags, preempt_count());
5175 /* Ring buffer disabled, return as if not open for write */
5180 entry = ring_buffer_event_data(event);
5181 entry->ip = _THIS_IP_;
5183 if (nr_pages == 2) {
5184 len = PAGE_SIZE - offset;
5185 memcpy(&entry->buf, map_page[0] + offset, len);
5186 memcpy(&entry->buf[len], map_page[1], cnt - len);
5188 memcpy(&entry->buf, map_page[0] + offset, cnt);
5190 if (entry->buf[cnt - 1] != '\n') {
5191 entry->buf[cnt] = '\n';
5192 entry->buf[cnt + 1] = '\0';
5194 entry->buf[cnt] = '\0';
5196 __buffer_unlock_commit(buffer, event);
5203 for (i = nr_pages - 1; i >= 0; i--) {
5204 kunmap_atomic(map_page[i]);
5211 static int tracing_clock_show(struct seq_file *m, void *v)
5213 struct trace_array *tr = m->private;
5216 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5218 "%s%s%s%s", i ? " " : "",
5219 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5220 i == tr->clock_id ? "]" : "");
5226 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5230 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5231 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5234 if (i == ARRAY_SIZE(trace_clocks))
5237 mutex_lock(&trace_types_lock);
5241 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5244 * New clock may not be consistent with the previous clock.
5245 * Reset the buffer so that it doesn't have incomparable timestamps.
5247 tracing_reset_online_cpus(&tr->trace_buffer);
5249 #ifdef CONFIG_TRACER_MAX_TRACE
5250 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5251 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5252 tracing_reset_online_cpus(&tr->max_buffer);
5255 mutex_unlock(&trace_types_lock);
5260 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5261 size_t cnt, loff_t *fpos)
5263 struct seq_file *m = filp->private_data;
5264 struct trace_array *tr = m->private;
5266 const char *clockstr;
5269 if (cnt >= sizeof(buf))
5272 if (copy_from_user(&buf, ubuf, cnt))
5277 clockstr = strstrip(buf);
5279 ret = tracing_set_clock(tr, clockstr);
5288 static int tracing_clock_open(struct inode *inode, struct file *file)
5290 struct trace_array *tr = inode->i_private;
5293 if (tracing_disabled)
5296 if (trace_array_get(tr))
5299 ret = single_open(file, tracing_clock_show, inode->i_private);
5301 trace_array_put(tr);
5306 struct ftrace_buffer_info {
5307 struct trace_iterator iter;
5312 #ifdef CONFIG_TRACER_SNAPSHOT
5313 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5315 struct trace_array *tr = inode->i_private;
5316 struct trace_iterator *iter;
5320 if (trace_array_get(tr) < 0)
5323 if (file->f_mode & FMODE_READ) {
5324 iter = __tracing_open(inode, file, true);
5326 ret = PTR_ERR(iter);
5328 /* Writes still need the seq_file to hold the private data */
5330 m = kzalloc(sizeof(*m), GFP_KERNEL);
5333 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5341 iter->trace_buffer = &tr->max_buffer;
5342 iter->cpu_file = tracing_get_cpu(inode);
5344 file->private_data = m;
5348 trace_array_put(tr);
5354 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5357 struct seq_file *m = filp->private_data;
5358 struct trace_iterator *iter = m->private;
5359 struct trace_array *tr = iter->tr;
5363 ret = tracing_update_buffers();
5367 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5371 mutex_lock(&trace_types_lock);
5373 if (tr->current_trace->use_max_tr) {
5380 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5384 if (tr->allocated_snapshot)
5388 /* Only allow per-cpu swap if the ring buffer supports it */
5389 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5390 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5395 if (!tr->allocated_snapshot) {
5396 ret = alloc_snapshot(tr);
5400 local_irq_disable();
5401 /* Now, we're going to swap */
5402 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5403 update_max_tr(tr, current, smp_processor_id());
5405 update_max_tr_single(tr, current, iter->cpu_file);
5409 if (tr->allocated_snapshot) {
5410 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5411 tracing_reset_online_cpus(&tr->max_buffer);
5413 tracing_reset(&tr->max_buffer, iter->cpu_file);
5423 mutex_unlock(&trace_types_lock);
5427 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5429 struct seq_file *m = file->private_data;
5432 ret = tracing_release(inode, file);
5434 if (file->f_mode & FMODE_READ)
5437 /* If write only, the seq_file is just a stub */
5445 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5446 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5447 size_t count, loff_t *ppos);
5448 static int tracing_buffers_release(struct inode *inode, struct file *file);
5449 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5450 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5452 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5454 struct ftrace_buffer_info *info;
5457 ret = tracing_buffers_open(inode, filp);
5461 info = filp->private_data;
5463 if (info->iter.trace->use_max_tr) {
5464 tracing_buffers_release(inode, filp);
5468 info->iter.snapshot = true;
5469 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5474 #endif /* CONFIG_TRACER_SNAPSHOT */
5477 static const struct file_operations tracing_thresh_fops = {
5478 .open = tracing_open_generic,
5479 .read = tracing_thresh_read,
5480 .write = tracing_thresh_write,
5481 .llseek = generic_file_llseek,
5484 #ifdef CONFIG_TRACER_MAX_TRACE
5485 static const struct file_operations tracing_max_lat_fops = {
5486 .open = tracing_open_generic,
5487 .read = tracing_max_lat_read,
5488 .write = tracing_max_lat_write,
5489 .llseek = generic_file_llseek,
5493 static const struct file_operations set_tracer_fops = {
5494 .open = tracing_open_generic,
5495 .read = tracing_set_trace_read,
5496 .write = tracing_set_trace_write,
5497 .llseek = generic_file_llseek,
5500 static const struct file_operations tracing_pipe_fops = {
5501 .open = tracing_open_pipe,
5502 .poll = tracing_poll_pipe,
5503 .read = tracing_read_pipe,
5504 .splice_read = tracing_splice_read_pipe,
5505 .release = tracing_release_pipe,
5506 .llseek = no_llseek,
5509 static const struct file_operations tracing_entries_fops = {
5510 .open = tracing_open_generic_tr,
5511 .read = tracing_entries_read,
5512 .write = tracing_entries_write,
5513 .llseek = generic_file_llseek,
5514 .release = tracing_release_generic_tr,
5517 static const struct file_operations tracing_total_entries_fops = {
5518 .open = tracing_open_generic_tr,
5519 .read = tracing_total_entries_read,
5520 .llseek = generic_file_llseek,
5521 .release = tracing_release_generic_tr,
5524 static const struct file_operations tracing_free_buffer_fops = {
5525 .open = tracing_open_generic_tr,
5526 .write = tracing_free_buffer_write,
5527 .release = tracing_free_buffer_release,
5530 static const struct file_operations tracing_mark_fops = {
5531 .open = tracing_open_generic_tr,
5532 .write = tracing_mark_write,
5533 .llseek = generic_file_llseek,
5534 .release = tracing_release_generic_tr,
5537 static const struct file_operations trace_clock_fops = {
5538 .open = tracing_clock_open,
5540 .llseek = seq_lseek,
5541 .release = tracing_single_release_tr,
5542 .write = tracing_clock_write,
5545 #ifdef CONFIG_TRACER_SNAPSHOT
5546 static const struct file_operations snapshot_fops = {
5547 .open = tracing_snapshot_open,
5549 .write = tracing_snapshot_write,
5550 .llseek = tracing_lseek,
5551 .release = tracing_snapshot_release,
5554 static const struct file_operations snapshot_raw_fops = {
5555 .open = snapshot_raw_open,
5556 .read = tracing_buffers_read,
5557 .release = tracing_buffers_release,
5558 .splice_read = tracing_buffers_splice_read,
5559 .llseek = no_llseek,
5562 #endif /* CONFIG_TRACER_SNAPSHOT */
5564 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5566 struct trace_array *tr = inode->i_private;
5567 struct ftrace_buffer_info *info;
5570 if (tracing_disabled)
5573 if (trace_array_get(tr) < 0)
5576 info = kzalloc(sizeof(*info), GFP_KERNEL);
5578 trace_array_put(tr);
5582 mutex_lock(&trace_types_lock);
5585 info->iter.cpu_file = tracing_get_cpu(inode);
5586 info->iter.trace = tr->current_trace;
5587 info->iter.trace_buffer = &tr->trace_buffer;
5589 /* Force reading ring buffer for first read */
5590 info->read = (unsigned int)-1;
5592 filp->private_data = info;
5594 tr->current_trace->ref++;
5596 mutex_unlock(&trace_types_lock);
5598 ret = nonseekable_open(inode, filp);
5600 trace_array_put(tr);
5606 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5608 struct ftrace_buffer_info *info = filp->private_data;
5609 struct trace_iterator *iter = &info->iter;
5611 return trace_poll(iter, filp, poll_table);
5615 tracing_buffers_read(struct file *filp, char __user *ubuf,
5616 size_t count, loff_t *ppos)
5618 struct ftrace_buffer_info *info = filp->private_data;
5619 struct trace_iterator *iter = &info->iter;
5626 #ifdef CONFIG_TRACER_MAX_TRACE
5627 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5632 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5637 /* Do we have previous read data to read? */
5638 if (info->read < PAGE_SIZE)
5642 trace_access_lock(iter->cpu_file);
5643 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5647 trace_access_unlock(iter->cpu_file);
5650 if (trace_empty(iter)) {
5651 if ((filp->f_flags & O_NONBLOCK))
5654 ret = wait_on_pipe(iter, false);
5665 size = PAGE_SIZE - info->read;
5669 ret = copy_to_user(ubuf, info->spare + info->read, size);
5681 static int tracing_buffers_release(struct inode *inode, struct file *file)
5683 struct ftrace_buffer_info *info = file->private_data;
5684 struct trace_iterator *iter = &info->iter;
5686 mutex_lock(&trace_types_lock);
5688 iter->tr->current_trace->ref--;
5690 __trace_array_put(iter->tr);
5693 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5696 mutex_unlock(&trace_types_lock);
5702 struct ring_buffer *buffer;
5707 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5708 struct pipe_buffer *buf)
5710 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5715 ring_buffer_free_read_page(ref->buffer, ref->page);
5720 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5721 struct pipe_buffer *buf)
5723 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5728 /* Pipe buffer operations for a buffer. */
5729 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5731 .confirm = generic_pipe_buf_confirm,
5732 .release = buffer_pipe_buf_release,
5733 .steal = generic_pipe_buf_steal,
5734 .get = buffer_pipe_buf_get,
5738 * Callback from splice_to_pipe(), if we need to release some pages
5739 * at the end of the spd in case we error'ed out in filling the pipe.
5741 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5743 struct buffer_ref *ref =
5744 (struct buffer_ref *)spd->partial[i].private;
5749 ring_buffer_free_read_page(ref->buffer, ref->page);
5751 spd->partial[i].private = 0;
5755 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5756 struct pipe_inode_info *pipe, size_t len,
5759 struct ftrace_buffer_info *info = file->private_data;
5760 struct trace_iterator *iter = &info->iter;
5761 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5762 struct page *pages_def[PIPE_DEF_BUFFERS];
5763 struct splice_pipe_desc spd = {
5765 .partial = partial_def,
5766 .nr_pages_max = PIPE_DEF_BUFFERS,
5768 .ops = &buffer_pipe_buf_ops,
5769 .spd_release = buffer_spd_release,
5771 struct buffer_ref *ref;
5772 int entries, size, i;
5775 #ifdef CONFIG_TRACER_MAX_TRACE
5776 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5780 if (*ppos & (PAGE_SIZE - 1))
5783 if (len & (PAGE_SIZE - 1)) {
5784 if (len < PAGE_SIZE)
5789 if (splice_grow_spd(pipe, &spd))
5793 trace_access_lock(iter->cpu_file);
5794 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5796 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5800 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5807 ref->buffer = iter->trace_buffer->buffer;
5808 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5815 r = ring_buffer_read_page(ref->buffer, &ref->page,
5816 len, iter->cpu_file, 1);
5818 ring_buffer_free_read_page(ref->buffer, ref->page);
5824 * zero out any left over data, this is going to
5827 size = ring_buffer_page_len(ref->page);
5828 if (size < PAGE_SIZE)
5829 memset(ref->page + size, 0, PAGE_SIZE - size);
5831 page = virt_to_page(ref->page);
5833 spd.pages[i] = page;
5834 spd.partial[i].len = PAGE_SIZE;
5835 spd.partial[i].offset = 0;
5836 spd.partial[i].private = (unsigned long)ref;
5840 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5843 trace_access_unlock(iter->cpu_file);
5846 /* did we read anything? */
5847 if (!spd.nr_pages) {
5852 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5855 ret = wait_on_pipe(iter, true);
5862 ret = splice_to_pipe(pipe, &spd);
5864 splice_shrink_spd(&spd);
5869 static const struct file_operations tracing_buffers_fops = {
5870 .open = tracing_buffers_open,
5871 .read = tracing_buffers_read,
5872 .poll = tracing_buffers_poll,
5873 .release = tracing_buffers_release,
5874 .splice_read = tracing_buffers_splice_read,
5875 .llseek = no_llseek,
5879 tracing_stats_read(struct file *filp, char __user *ubuf,
5880 size_t count, loff_t *ppos)
5882 struct inode *inode = file_inode(filp);
5883 struct trace_array *tr = inode->i_private;
5884 struct trace_buffer *trace_buf = &tr->trace_buffer;
5885 int cpu = tracing_get_cpu(inode);
5886 struct trace_seq *s;
5888 unsigned long long t;
5889 unsigned long usec_rem;
5891 s = kmalloc(sizeof(*s), GFP_KERNEL);
5897 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5898 trace_seq_printf(s, "entries: %ld\n", cnt);
5900 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5901 trace_seq_printf(s, "overrun: %ld\n", cnt);
5903 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5904 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5906 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5907 trace_seq_printf(s, "bytes: %ld\n", cnt);
5909 if (trace_clocks[tr->clock_id].in_ns) {
5910 /* local or global for trace_clock */
5911 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5912 usec_rem = do_div(t, USEC_PER_SEC);
5913 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5916 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5917 usec_rem = do_div(t, USEC_PER_SEC);
5918 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5920 /* counter or tsc mode for trace_clock */
5921 trace_seq_printf(s, "oldest event ts: %llu\n",
5922 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5924 trace_seq_printf(s, "now ts: %llu\n",
5925 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5928 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5929 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5931 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5932 trace_seq_printf(s, "read events: %ld\n", cnt);
5934 count = simple_read_from_buffer(ubuf, count, ppos,
5935 s->buffer, trace_seq_used(s));
5942 static const struct file_operations tracing_stats_fops = {
5943 .open = tracing_open_generic_tr,
5944 .read = tracing_stats_read,
5945 .llseek = generic_file_llseek,
5946 .release = tracing_release_generic_tr,
5949 #ifdef CONFIG_DYNAMIC_FTRACE
5951 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5957 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5958 size_t cnt, loff_t *ppos)
5960 static char ftrace_dyn_info_buffer[1024];
5961 static DEFINE_MUTEX(dyn_info_mutex);
5962 unsigned long *p = filp->private_data;
5963 char *buf = ftrace_dyn_info_buffer;
5964 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5967 mutex_lock(&dyn_info_mutex);
5968 r = sprintf(buf, "%ld ", *p);
5970 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5973 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5975 mutex_unlock(&dyn_info_mutex);
5980 static const struct file_operations tracing_dyn_info_fops = {
5981 .open = tracing_open_generic,
5982 .read = tracing_read_dyn_info,
5983 .llseek = generic_file_llseek,
5985 #endif /* CONFIG_DYNAMIC_FTRACE */
5987 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5989 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5995 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5997 unsigned long *count = (long *)data;
6009 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6010 struct ftrace_probe_ops *ops, void *data)
6012 long count = (long)data;
6014 seq_printf(m, "%ps:", (void *)ip);
6016 seq_puts(m, "snapshot");
6019 seq_puts(m, ":unlimited\n");
6021 seq_printf(m, ":count=%ld\n", count);
6026 static struct ftrace_probe_ops snapshot_probe_ops = {
6027 .func = ftrace_snapshot,
6028 .print = ftrace_snapshot_print,
6031 static struct ftrace_probe_ops snapshot_count_probe_ops = {
6032 .func = ftrace_count_snapshot,
6033 .print = ftrace_snapshot_print,
6037 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6038 char *glob, char *cmd, char *param, int enable)
6040 struct ftrace_probe_ops *ops;
6041 void *count = (void *)-1;
6045 /* hash funcs only work with set_ftrace_filter */
6049 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6051 if (glob[0] == '!') {
6052 unregister_ftrace_function_probe_func(glob+1, ops);
6059 number = strsep(¶m, ":");
6061 if (!strlen(number))
6065 * We use the callback data field (which is a pointer)
6068 ret = kstrtoul(number, 0, (unsigned long *)&count);
6073 ret = register_ftrace_function_probe(glob, ops, count);
6076 alloc_snapshot(&global_trace);
6078 return ret < 0 ? ret : 0;
6081 static struct ftrace_func_command ftrace_snapshot_cmd = {
6083 .func = ftrace_trace_snapshot_callback,
6086 static __init int register_snapshot_cmd(void)
6088 return register_ftrace_command(&ftrace_snapshot_cmd);
6091 static inline __init int register_snapshot_cmd(void) { return 0; }
6092 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6094 static struct dentry *tracing_get_dentry(struct trace_array *tr)
6096 if (WARN_ON(!tr->dir))
6097 return ERR_PTR(-ENODEV);
6099 /* Top directory uses NULL as the parent */
6100 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6103 /* All sub buffers have a descriptor */
6107 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6109 struct dentry *d_tracer;
6112 return tr->percpu_dir;
6114 d_tracer = tracing_get_dentry(tr);
6115 if (IS_ERR(d_tracer))
6118 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6120 WARN_ONCE(!tr->percpu_dir,
6121 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6123 return tr->percpu_dir;
6126 static struct dentry *
6127 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6128 void *data, long cpu, const struct file_operations *fops)
6130 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6132 if (ret) /* See tracing_get_cpu() */
6133 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6138 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6140 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6141 struct dentry *d_cpu;
6142 char cpu_dir[30]; /* 30 characters should be more than enough */
6147 snprintf(cpu_dir, 30, "cpu%ld", cpu);
6148 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6150 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
6154 /* per cpu trace_pipe */
6155 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6156 tr, cpu, &tracing_pipe_fops);
6159 trace_create_cpu_file("trace", 0644, d_cpu,
6160 tr, cpu, &tracing_fops);
6162 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6163 tr, cpu, &tracing_buffers_fops);
6165 trace_create_cpu_file("stats", 0444, d_cpu,
6166 tr, cpu, &tracing_stats_fops);
6168 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6169 tr, cpu, &tracing_entries_fops);
6171 #ifdef CONFIG_TRACER_SNAPSHOT
6172 trace_create_cpu_file("snapshot", 0644, d_cpu,
6173 tr, cpu, &snapshot_fops);
6175 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6176 tr, cpu, &snapshot_raw_fops);
6180 #ifdef CONFIG_FTRACE_SELFTEST
6181 /* Let selftest have access to static functions in this file */
6182 #include "trace_selftest.c"
6186 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6189 struct trace_option_dentry *topt = filp->private_data;
6192 if (topt->flags->val & topt->opt->bit)
6197 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6201 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6204 struct trace_option_dentry *topt = filp->private_data;
6208 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6212 if (val != 0 && val != 1)
6215 if (!!(topt->flags->val & topt->opt->bit) != val) {
6216 mutex_lock(&trace_types_lock);
6217 ret = __set_tracer_option(topt->tr, topt->flags,
6219 mutex_unlock(&trace_types_lock);
6230 static const struct file_operations trace_options_fops = {
6231 .open = tracing_open_generic,
6232 .read = trace_options_read,
6233 .write = trace_options_write,
6234 .llseek = generic_file_llseek,
6238 * In order to pass in both the trace_array descriptor as well as the index
6239 * to the flag that the trace option file represents, the trace_array
6240 * has a character array of trace_flags_index[], which holds the index
6241 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6242 * The address of this character array is passed to the flag option file
6243 * read/write callbacks.
6245 * In order to extract both the index and the trace_array descriptor,
6246 * get_tr_index() uses the following algorithm.
6250 * As the pointer itself contains the address of the index (remember
6253 * Then to get the trace_array descriptor, by subtracting that index
6254 * from the ptr, we get to the start of the index itself.
6256 * ptr - idx == &index[0]
6258 * Then a simple container_of() from that pointer gets us to the
6259 * trace_array descriptor.
6261 static void get_tr_index(void *data, struct trace_array **ptr,
6262 unsigned int *pindex)
6264 *pindex = *(unsigned char *)data;
6266 *ptr = container_of(data - *pindex, struct trace_array,
6271 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6274 void *tr_index = filp->private_data;
6275 struct trace_array *tr;
6279 get_tr_index(tr_index, &tr, &index);
6281 if (tr->trace_flags & (1 << index))
6286 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6290 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6293 void *tr_index = filp->private_data;
6294 struct trace_array *tr;
6299 get_tr_index(tr_index, &tr, &index);
6301 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6305 if (val != 0 && val != 1)
6308 mutex_lock(&trace_types_lock);
6309 ret = set_tracer_flag(tr, 1 << index, val);
6310 mutex_unlock(&trace_types_lock);
6320 static const struct file_operations trace_options_core_fops = {
6321 .open = tracing_open_generic,
6322 .read = trace_options_core_read,
6323 .write = trace_options_core_write,
6324 .llseek = generic_file_llseek,
6327 struct dentry *trace_create_file(const char *name,
6329 struct dentry *parent,
6331 const struct file_operations *fops)
6335 ret = tracefs_create_file(name, mode, parent, data, fops);
6337 pr_warning("Could not create tracefs '%s' entry\n", name);
6343 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6345 struct dentry *d_tracer;
6350 d_tracer = tracing_get_dentry(tr);
6351 if (IS_ERR(d_tracer))
6354 tr->options = tracefs_create_dir("options", d_tracer);
6356 pr_warning("Could not create tracefs directory 'options'\n");
6364 create_trace_option_file(struct trace_array *tr,
6365 struct trace_option_dentry *topt,
6366 struct tracer_flags *flags,
6367 struct tracer_opt *opt)
6369 struct dentry *t_options;
6371 t_options = trace_options_init_dentry(tr);
6375 topt->flags = flags;
6379 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6380 &trace_options_fops);
6385 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6387 struct trace_option_dentry *topts;
6388 struct trace_options *tr_topts;
6389 struct tracer_flags *flags;
6390 struct tracer_opt *opts;
6397 flags = tracer->flags;
6399 if (!flags || !flags->opts)
6403 * If this is an instance, only create flags for tracers
6404 * the instance may have.
6406 if (!trace_ok_for_array(tracer, tr))
6409 for (i = 0; i < tr->nr_topts; i++) {
6411 * Check if these flags have already been added.
6412 * Some tracers share flags.
6414 if (tr->topts[i].tracer->flags == tracer->flags)
6420 for (cnt = 0; opts[cnt].name; cnt++)
6423 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6427 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6434 tr->topts = tr_topts;
6435 tr->topts[tr->nr_topts].tracer = tracer;
6436 tr->topts[tr->nr_topts].topts = topts;
6439 for (cnt = 0; opts[cnt].name; cnt++) {
6440 create_trace_option_file(tr, &topts[cnt], flags,
6442 WARN_ONCE(topts[cnt].entry == NULL,
6443 "Failed to create trace option: %s",
6448 static struct dentry *
6449 create_trace_option_core_file(struct trace_array *tr,
6450 const char *option, long index)
6452 struct dentry *t_options;
6454 t_options = trace_options_init_dentry(tr);
6458 return trace_create_file(option, 0644, t_options,
6459 (void *)&tr->trace_flags_index[index],
6460 &trace_options_core_fops);
6463 static void create_trace_options_dir(struct trace_array *tr)
6465 struct dentry *t_options;
6466 bool top_level = tr == &global_trace;
6469 t_options = trace_options_init_dentry(tr);
6473 for (i = 0; trace_options[i]; i++) {
6475 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6476 create_trace_option_core_file(tr, trace_options[i], i);
6481 rb_simple_read(struct file *filp, char __user *ubuf,
6482 size_t cnt, loff_t *ppos)
6484 struct trace_array *tr = filp->private_data;
6488 r = tracer_tracing_is_on(tr);
6489 r = sprintf(buf, "%d\n", r);
6491 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6495 rb_simple_write(struct file *filp, const char __user *ubuf,
6496 size_t cnt, loff_t *ppos)
6498 struct trace_array *tr = filp->private_data;
6499 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6503 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6508 mutex_lock(&trace_types_lock);
6510 tracer_tracing_on(tr);
6511 if (tr->current_trace->start)
6512 tr->current_trace->start(tr);
6514 tracer_tracing_off(tr);
6515 if (tr->current_trace->stop)
6516 tr->current_trace->stop(tr);
6518 mutex_unlock(&trace_types_lock);
6526 static const struct file_operations rb_simple_fops = {
6527 .open = tracing_open_generic_tr,
6528 .read = rb_simple_read,
6529 .write = rb_simple_write,
6530 .release = tracing_release_generic_tr,
6531 .llseek = default_llseek,
6534 struct dentry *trace_instance_dir;
6537 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6540 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6542 enum ring_buffer_flags rb_flags;
6544 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6548 buf->buffer = ring_buffer_alloc(size, rb_flags);
6552 buf->data = alloc_percpu(struct trace_array_cpu);
6554 ring_buffer_free(buf->buffer);
6558 /* Allocate the first page for all buffers */
6559 set_buffer_entries(&tr->trace_buffer,
6560 ring_buffer_size(tr->trace_buffer.buffer, 0));
6565 static int allocate_trace_buffers(struct trace_array *tr, int size)
6569 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6573 #ifdef CONFIG_TRACER_MAX_TRACE
6574 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6575 allocate_snapshot ? size : 1);
6577 ring_buffer_free(tr->trace_buffer.buffer);
6578 free_percpu(tr->trace_buffer.data);
6581 tr->allocated_snapshot = allocate_snapshot;
6584 * Only the top level trace array gets its snapshot allocated
6585 * from the kernel command line.
6587 allocate_snapshot = false;
6592 static void free_trace_buffer(struct trace_buffer *buf)
6595 ring_buffer_free(buf->buffer);
6597 free_percpu(buf->data);
6602 static void free_trace_buffers(struct trace_array *tr)
6607 free_trace_buffer(&tr->trace_buffer);
6609 #ifdef CONFIG_TRACER_MAX_TRACE
6610 free_trace_buffer(&tr->max_buffer);
6614 static void init_trace_flags_index(struct trace_array *tr)
6618 /* Used by the trace options files */
6619 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6620 tr->trace_flags_index[i] = i;
6623 static void __update_tracer_options(struct trace_array *tr)
6627 for (t = trace_types; t; t = t->next)
6628 add_tracer_options(tr, t);
6631 static void update_tracer_options(struct trace_array *tr)
6633 mutex_lock(&trace_types_lock);
6634 __update_tracer_options(tr);
6635 mutex_unlock(&trace_types_lock);
6638 static int instance_mkdir(const char *name)
6640 struct trace_array *tr;
6643 mutex_lock(&trace_types_lock);
6646 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6647 if (tr->name && strcmp(tr->name, name) == 0)
6652 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6656 tr->name = kstrdup(name, GFP_KERNEL);
6660 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6663 tr->trace_flags = global_trace.trace_flags;
6665 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6667 raw_spin_lock_init(&tr->start_lock);
6669 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6671 tr->current_trace = &nop_trace;
6673 INIT_LIST_HEAD(&tr->systems);
6674 INIT_LIST_HEAD(&tr->events);
6676 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6679 tr->dir = tracefs_create_dir(name, trace_instance_dir);
6683 ret = event_trace_add_tracer(tr->dir, tr);
6685 tracefs_remove_recursive(tr->dir);
6689 init_tracer_tracefs(tr, tr->dir);
6690 init_trace_flags_index(tr);
6691 __update_tracer_options(tr);
6693 list_add(&tr->list, &ftrace_trace_arrays);
6695 mutex_unlock(&trace_types_lock);
6700 free_trace_buffers(tr);
6701 free_cpumask_var(tr->tracing_cpumask);
6706 mutex_unlock(&trace_types_lock);
6712 static int instance_rmdir(const char *name)
6714 struct trace_array *tr;
6719 mutex_lock(&trace_types_lock);
6722 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6723 if (tr->name && strcmp(tr->name, name) == 0) {
6732 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6735 list_del(&tr->list);
6737 tracing_set_nop(tr);
6738 event_trace_del_tracer(tr);
6739 ftrace_destroy_function_files(tr);
6740 tracefs_remove_recursive(tr->dir);
6741 free_trace_buffers(tr);
6743 for (i = 0; i < tr->nr_topts; i++) {
6744 kfree(tr->topts[i].topts);
6754 mutex_unlock(&trace_types_lock);
6759 static __init void create_trace_instances(struct dentry *d_tracer)
6761 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6764 if (WARN_ON(!trace_instance_dir))
6769 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6773 trace_create_file("available_tracers", 0444, d_tracer,
6774 tr, &show_traces_fops);
6776 trace_create_file("current_tracer", 0644, d_tracer,
6777 tr, &set_tracer_fops);
6779 trace_create_file("tracing_cpumask", 0644, d_tracer,
6780 tr, &tracing_cpumask_fops);
6782 trace_create_file("trace_options", 0644, d_tracer,
6783 tr, &tracing_iter_fops);
6785 trace_create_file("trace", 0644, d_tracer,
6788 trace_create_file("trace_pipe", 0444, d_tracer,
6789 tr, &tracing_pipe_fops);
6791 trace_create_file("buffer_size_kb", 0644, d_tracer,
6792 tr, &tracing_entries_fops);
6794 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6795 tr, &tracing_total_entries_fops);
6797 trace_create_file("free_buffer", 0200, d_tracer,
6798 tr, &tracing_free_buffer_fops);
6800 trace_create_file("trace_marker", 0220, d_tracer,
6801 tr, &tracing_mark_fops);
6803 trace_create_file("trace_clock", 0644, d_tracer, tr,
6806 trace_create_file("tracing_on", 0644, d_tracer,
6807 tr, &rb_simple_fops);
6809 create_trace_options_dir(tr);
6811 #ifdef CONFIG_TRACER_MAX_TRACE
6812 trace_create_file("tracing_max_latency", 0644, d_tracer,
6813 &tr->max_latency, &tracing_max_lat_fops);
6816 if (ftrace_create_function_files(tr, d_tracer))
6817 WARN(1, "Could not allocate function filter files");
6819 #ifdef CONFIG_TRACER_SNAPSHOT
6820 trace_create_file("snapshot", 0644, d_tracer,
6821 tr, &snapshot_fops);
6824 for_each_tracing_cpu(cpu)
6825 tracing_init_tracefs_percpu(tr, cpu);
6829 static struct vfsmount *trace_automount(void *ingore)
6831 struct vfsmount *mnt;
6832 struct file_system_type *type;
6835 * To maintain backward compatibility for tools that mount
6836 * debugfs to get to the tracing facility, tracefs is automatically
6837 * mounted to the debugfs/tracing directory.
6839 type = get_fs_type("tracefs");
6842 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6843 put_filesystem(type);
6852 * tracing_init_dentry - initialize top level trace array
6854 * This is called when creating files or directories in the tracing
6855 * directory. It is called via fs_initcall() by any of the boot up code
6856 * and expects to return the dentry of the top level tracing directory.
6858 struct dentry *tracing_init_dentry(void)
6860 struct trace_array *tr = &global_trace;
6862 /* The top level trace array uses NULL as parent */
6866 if (WARN_ON(!tracefs_initialized()) ||
6867 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6868 WARN_ON(!debugfs_initialized())))
6869 return ERR_PTR(-ENODEV);
6872 * As there may still be users that expect the tracing
6873 * files to exist in debugfs/tracing, we must automount
6874 * the tracefs file system there, so older tools still
6875 * work with the newer kerenl.
6877 tr->dir = debugfs_create_automount("tracing", NULL,
6878 trace_automount, NULL);
6880 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6881 return ERR_PTR(-ENOMEM);
6887 extern struct trace_enum_map *__start_ftrace_enum_maps[];
6888 extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6890 static void __init trace_enum_init(void)
6894 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6895 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6898 #ifdef CONFIG_MODULES
6899 static void trace_module_add_enums(struct module *mod)
6901 if (!mod->num_trace_enums)
6905 * Modules with bad taint do not have events created, do
6906 * not bother with enums either.
6908 if (trace_module_has_bad_taint(mod))
6911 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6914 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
6915 static void trace_module_remove_enums(struct module *mod)
6917 union trace_enum_map_item *map;
6918 union trace_enum_map_item **last = &trace_enum_maps;
6920 if (!mod->num_trace_enums)
6923 mutex_lock(&trace_enum_mutex);
6925 map = trace_enum_maps;
6928 if (map->head.mod == mod)
6930 map = trace_enum_jmp_to_tail(map);
6931 last = &map->tail.next;
6932 map = map->tail.next;
6937 *last = trace_enum_jmp_to_tail(map)->tail.next;
6940 mutex_unlock(&trace_enum_mutex);
6943 static inline void trace_module_remove_enums(struct module *mod) { }
6944 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6946 static int trace_module_notify(struct notifier_block *self,
6947 unsigned long val, void *data)
6949 struct module *mod = data;
6952 case MODULE_STATE_COMING:
6953 trace_module_add_enums(mod);
6955 case MODULE_STATE_GOING:
6956 trace_module_remove_enums(mod);
6963 static struct notifier_block trace_module_nb = {
6964 .notifier_call = trace_module_notify,
6967 #endif /* CONFIG_MODULES */
6969 static __init int tracer_init_tracefs(void)
6971 struct dentry *d_tracer;
6973 trace_access_lock_init();
6975 d_tracer = tracing_init_dentry();
6976 if (IS_ERR(d_tracer))
6979 init_tracer_tracefs(&global_trace, d_tracer);
6981 trace_create_file("tracing_thresh", 0644, d_tracer,
6982 &global_trace, &tracing_thresh_fops);
6984 trace_create_file("README", 0444, d_tracer,
6985 NULL, &tracing_readme_fops);
6987 trace_create_file("saved_cmdlines", 0444, d_tracer,
6988 NULL, &tracing_saved_cmdlines_fops);
6990 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6991 NULL, &tracing_saved_cmdlines_size_fops);
6995 trace_create_enum_file(d_tracer);
6997 #ifdef CONFIG_MODULES
6998 register_module_notifier(&trace_module_nb);
7001 #ifdef CONFIG_DYNAMIC_FTRACE
7002 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7003 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
7006 create_trace_instances(d_tracer);
7008 update_tracer_options(&global_trace);
7013 static int trace_panic_handler(struct notifier_block *this,
7014 unsigned long event, void *unused)
7016 if (ftrace_dump_on_oops)
7017 ftrace_dump(ftrace_dump_on_oops);
7021 static struct notifier_block trace_panic_notifier = {
7022 .notifier_call = trace_panic_handler,
7024 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7027 static int trace_die_handler(struct notifier_block *self,
7033 if (ftrace_dump_on_oops)
7034 ftrace_dump(ftrace_dump_on_oops);
7042 static struct notifier_block trace_die_notifier = {
7043 .notifier_call = trace_die_handler,
7048 * printk is set to max of 1024, we really don't need it that big.
7049 * Nothing should be printing 1000 characters anyway.
7051 #define TRACE_MAX_PRINT 1000
7054 * Define here KERN_TRACE so that we have one place to modify
7055 * it if we decide to change what log level the ftrace dump
7058 #define KERN_TRACE KERN_EMERG
7061 trace_printk_seq(struct trace_seq *s)
7063 /* Probably should print a warning here. */
7064 if (s->seq.len >= TRACE_MAX_PRINT)
7065 s->seq.len = TRACE_MAX_PRINT;
7068 * More paranoid code. Although the buffer size is set to
7069 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7070 * an extra layer of protection.
7072 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7073 s->seq.len = s->seq.size - 1;
7075 /* should be zero ended, but we are paranoid. */
7076 s->buffer[s->seq.len] = 0;
7078 printk(KERN_TRACE "%s", s->buffer);
7083 void trace_init_global_iter(struct trace_iterator *iter)
7085 iter->tr = &global_trace;
7086 iter->trace = iter->tr->current_trace;
7087 iter->cpu_file = RING_BUFFER_ALL_CPUS;
7088 iter->trace_buffer = &global_trace.trace_buffer;
7090 if (iter->trace && iter->trace->open)
7091 iter->trace->open(iter);
7093 /* Annotate start of buffers if we had overruns */
7094 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7095 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7097 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7098 if (trace_clocks[iter->tr->clock_id].in_ns)
7099 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7102 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7104 /* use static because iter can be a bit big for the stack */
7105 static struct trace_iterator iter;
7106 static atomic_t dump_running;
7107 struct trace_array *tr = &global_trace;
7108 unsigned int old_userobj;
7109 unsigned long flags;
7112 /* Only allow one dump user at a time. */
7113 if (atomic_inc_return(&dump_running) != 1) {
7114 atomic_dec(&dump_running);
7119 * Always turn off tracing when we dump.
7120 * We don't need to show trace output of what happens
7121 * between multiple crashes.
7123 * If the user does a sysrq-z, then they can re-enable
7124 * tracing with echo 1 > tracing_on.
7128 local_irq_save(flags);
7130 /* Simulate the iterator */
7131 trace_init_global_iter(&iter);
7133 for_each_tracing_cpu(cpu) {
7134 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7137 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7139 /* don't look at user memory in panic mode */
7140 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7142 switch (oops_dump_mode) {
7144 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7147 iter.cpu_file = raw_smp_processor_id();
7152 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7153 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7156 printk(KERN_TRACE "Dumping ftrace buffer:\n");
7158 /* Did function tracer already get disabled? */
7159 if (ftrace_is_dead()) {
7160 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7161 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7165 * We need to stop all tracing on all CPUS to read the
7166 * the next buffer. This is a bit expensive, but is
7167 * not done often. We fill all what we can read,
7168 * and then release the locks again.
7171 while (!trace_empty(&iter)) {
7174 printk(KERN_TRACE "---------------------------------\n");
7178 /* reset all but tr, trace, and overruns */
7179 memset(&iter.seq, 0,
7180 sizeof(struct trace_iterator) -
7181 offsetof(struct trace_iterator, seq));
7182 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7185 if (trace_find_next_entry_inc(&iter) != NULL) {
7188 ret = print_trace_line(&iter);
7189 if (ret != TRACE_TYPE_NO_CONSUME)
7190 trace_consume(&iter);
7192 touch_nmi_watchdog();
7194 trace_printk_seq(&iter.seq);
7198 printk(KERN_TRACE " (ftrace buffer empty)\n");
7200 printk(KERN_TRACE "---------------------------------\n");
7203 tr->trace_flags |= old_userobj;
7205 for_each_tracing_cpu(cpu) {
7206 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7208 atomic_dec(&dump_running);
7209 local_irq_restore(flags);
7211 EXPORT_SYMBOL_GPL(ftrace_dump);
7213 __init static int tracer_alloc_buffers(void)
7219 * Make sure we don't accidently add more trace options
7220 * than we have bits for.
7222 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7224 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7227 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7228 goto out_free_buffer_mask;
7230 /* Only allocate trace_printk buffers if a trace_printk exists */
7231 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7232 /* Must be called before global_trace.buffer is allocated */
7233 trace_printk_init_buffers();
7235 /* To save memory, keep the ring buffer size to its minimum */
7236 if (ring_buffer_expanded)
7237 ring_buf_size = trace_buf_size;
7241 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7242 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7244 raw_spin_lock_init(&global_trace.start_lock);
7246 /* Used for event triggers */
7247 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7249 goto out_free_cpumask;
7251 if (trace_create_savedcmd() < 0)
7252 goto out_free_temp_buffer;
7254 /* TODO: make the number of buffers hot pluggable with CPUS */
7255 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7256 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7258 goto out_free_savedcmd;
7261 if (global_trace.buffer_disabled)
7264 if (trace_boot_clock) {
7265 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7267 pr_warning("Trace clock %s not defined, going back to default\n",
7272 * register_tracer() might reference current_trace, so it
7273 * needs to be set before we register anything. This is
7274 * just a bootstrap of current_trace anyway.
7276 global_trace.current_trace = &nop_trace;
7278 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7280 ftrace_init_global_array_ops(&global_trace);
7282 init_trace_flags_index(&global_trace);
7284 register_tracer(&nop_trace);
7286 /* All seems OK, enable tracing */
7287 tracing_disabled = 0;
7289 atomic_notifier_chain_register(&panic_notifier_list,
7290 &trace_panic_notifier);
7292 register_die_notifier(&trace_die_notifier);
7294 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7296 INIT_LIST_HEAD(&global_trace.systems);
7297 INIT_LIST_HEAD(&global_trace.events);
7298 list_add(&global_trace.list, &ftrace_trace_arrays);
7300 apply_trace_boot_options();
7302 register_snapshot_cmd();
7307 free_saved_cmdlines_buffer(savedcmd);
7308 out_free_temp_buffer:
7309 ring_buffer_free(temp_buffer);
7311 free_cpumask_var(global_trace.tracing_cpumask);
7312 out_free_buffer_mask:
7313 free_cpumask_var(tracing_buffer_mask);
7318 void __init trace_init(void)
7320 if (tracepoint_printk) {
7321 tracepoint_print_iter =
7322 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7323 if (WARN_ON(!tracepoint_print_iter))
7324 tracepoint_printk = 0;
7326 tracer_alloc_buffers();
7330 __init static int clear_boot_tracer(void)
7333 * The default tracer at boot buffer is an init section.
7334 * This function is called in lateinit. If we did not
7335 * find the boot tracer, then clear it out, to prevent
7336 * later registration from accessing the buffer that is
7337 * about to be freed.
7339 if (!default_bootup_tracer)
7342 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7343 default_bootup_tracer);
7344 default_bootup_tracer = NULL;
7349 fs_initcall(tracer_init_tracefs);
7350 late_initcall(clear_boot_tracer);