2 * trace irqs off critical timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * From code in the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
16 #include <trace/events/hist.h>
20 static struct trace_array *irqsoff_trace __read_mostly;
21 static int tracer_enabled __read_mostly;
23 static DEFINE_PER_CPU(int, tracing_cpu);
25 static DEFINE_RAW_SPINLOCK(max_trace_lock);
28 TRACER_IRQS_OFF = (1 << 1),
29 TRACER_PREEMPT_OFF = (1 << 2),
32 static int trace_type __read_mostly;
34 static int save_flags;
36 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
37 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
39 #ifdef CONFIG_PREEMPT_TRACER
43 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
46 # define preempt_trace() (0)
49 #ifdef CONFIG_IRQSOFF_TRACER
53 return ((trace_type & TRACER_IRQS_OFF) &&
57 # define irq_trace() (0)
60 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
61 static int irqsoff_display_graph(struct trace_array *tr, int set);
62 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
64 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
68 # define is_graph(tr) false
72 * Sequence count - we record it when starting a measurement and
73 * skip the latency if the sequence has changed - some other section
74 * did a maximum and could disturb our measurement with serial console
75 * printouts, etc. Truly coinciding maximum latencies should be rare
76 * and what happens together happens separately as well, so this doesn't
77 * decrease the validity of the maximum found:
79 static __cacheline_aligned_in_smp unsigned long max_sequence;
81 #ifdef CONFIG_FUNCTION_TRACER
83 * Prologue for the preempt and irqs off function tracers.
85 * Returns 1 if it is OK to continue, and data->disabled is
87 * 0 if the trace is to be ignored, and data->disabled
90 * Note, this function is also used outside this ifdef but
91 * inside the #ifdef of the function graph tracer below.
92 * This is OK, since the function graph tracer is
93 * dependent on the function tracer.
95 static int func_prolog_dec(struct trace_array *tr,
96 struct trace_array_cpu **data,
103 * Does not matter if we preempt. We test the flags
104 * afterward, to see if irqs are disabled or not.
105 * If we preempt and get a false positive, the flags
108 cpu = raw_smp_processor_id();
109 if (likely(!per_cpu(tracing_cpu, cpu)))
112 local_save_flags(*flags);
113 /* slight chance to get a false positive on tracing_cpu */
114 if (!irqs_disabled_flags(*flags))
117 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
118 disabled = atomic_inc_return(&(*data)->disabled);
120 if (likely(disabled == 1))
123 atomic_dec(&(*data)->disabled);
129 * irqsoff uses its own tracer function to keep the overhead down:
132 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
133 struct ftrace_ops *op, struct pt_regs *pt_regs)
135 struct trace_array *tr = irqsoff_trace;
136 struct trace_array_cpu *data;
139 if (!func_prolog_dec(tr, &data, &flags))
142 trace_function(tr, ip, parent_ip, flags, preempt_count());
144 atomic_dec(&data->disabled);
146 #endif /* CONFIG_FUNCTION_TRACER */
148 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
149 static int irqsoff_display_graph(struct trace_array *tr, int set)
153 if (!(is_graph(tr) ^ set))
156 stop_irqsoff_tracer(irqsoff_trace, !set);
158 for_each_possible_cpu(cpu)
159 per_cpu(tracing_cpu, cpu) = 0;
162 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
164 return start_irqsoff_tracer(irqsoff_trace, set);
167 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
169 struct trace_array *tr = irqsoff_trace;
170 struct trace_array_cpu *data;
175 if (!func_prolog_dec(tr, &data, &flags))
178 pc = preempt_count();
179 ret = __trace_graph_entry(tr, trace, flags, pc);
180 atomic_dec(&data->disabled);
185 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
187 struct trace_array *tr = irqsoff_trace;
188 struct trace_array_cpu *data;
192 if (!func_prolog_dec(tr, &data, &flags))
195 pc = preempt_count();
196 __trace_graph_return(tr, trace, flags, pc);
197 atomic_dec(&data->disabled);
200 static void irqsoff_trace_open(struct trace_iterator *iter)
202 if (is_graph(iter->tr))
203 graph_trace_open(iter);
207 static void irqsoff_trace_close(struct trace_iterator *iter)
210 graph_trace_close(iter);
213 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
214 TRACE_GRAPH_PRINT_PROC | \
215 TRACE_GRAPH_PRINT_ABS_TIME | \
216 TRACE_GRAPH_PRINT_DURATION)
218 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
221 * In graph mode call the graph tracer output function,
222 * otherwise go with the TRACE_FN event handler
224 if (is_graph(iter->tr))
225 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
227 return TRACE_TYPE_UNHANDLED;
230 static void irqsoff_print_header(struct seq_file *s)
232 struct trace_array *tr = irqsoff_trace;
235 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
237 trace_default_header(s);
241 __trace_function(struct trace_array *tr,
242 unsigned long ip, unsigned long parent_ip,
243 unsigned long flags, int pc)
246 trace_graph_function(tr, ip, parent_ip, flags, pc);
248 trace_function(tr, ip, parent_ip, flags, pc);
252 #define __trace_function trace_function
254 #ifdef CONFIG_FUNCTION_TRACER
255 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
261 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
263 return TRACE_TYPE_UNHANDLED;
266 static void irqsoff_trace_open(struct trace_iterator *iter) { }
267 static void irqsoff_trace_close(struct trace_iterator *iter) { }
269 #ifdef CONFIG_FUNCTION_TRACER
270 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
271 static void irqsoff_print_header(struct seq_file *s)
273 trace_default_header(s);
276 static void irqsoff_print_header(struct seq_file *s)
278 trace_latency_header(s);
280 #endif /* CONFIG_FUNCTION_TRACER */
281 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
284 * Should this new latency be reported/recorded?
286 static bool report_latency(struct trace_array *tr, cycle_t delta)
288 if (tracing_thresh) {
289 if (delta < tracing_thresh)
292 if (delta <= tr->max_latency)
299 check_critical_timing(struct trace_array *tr,
300 struct trace_array_cpu *data,
301 unsigned long parent_ip,
304 cycle_t T0, T1, delta;
308 T0 = data->preempt_timestamp;
309 T1 = ftrace_now(cpu);
312 local_save_flags(flags);
314 pc = preempt_count();
316 if (!report_latency(tr, delta))
319 raw_spin_lock_irqsave(&max_trace_lock, flags);
321 /* check if we are still the max latency */
322 if (!report_latency(tr, delta))
325 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
326 /* Skip 5 functions to get to the irq/preempt enable function */
327 __trace_stack(tr, flags, 5, pc);
329 if (data->critical_sequence != max_sequence)
332 data->critical_end = parent_ip;
334 if (likely(!is_tracing_stopped())) {
335 tr->max_latency = delta;
336 update_max_tr_single(tr, current, cpu);
342 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
345 data->critical_sequence = max_sequence;
346 data->preempt_timestamp = ftrace_now(cpu);
347 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
351 start_critical_timing(unsigned long ip, unsigned long parent_ip)
354 struct trace_array *tr = irqsoff_trace;
355 struct trace_array_cpu *data;
358 if (!tracer_enabled || !tracing_is_enabled())
361 cpu = raw_smp_processor_id();
363 if (per_cpu(tracing_cpu, cpu))
366 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
368 if (unlikely(!data) || atomic_read(&data->disabled))
371 atomic_inc(&data->disabled);
373 data->critical_sequence = max_sequence;
374 data->preempt_timestamp = ftrace_now(cpu);
375 data->critical_start = parent_ip ? : ip;
377 local_save_flags(flags);
379 __trace_function(tr, ip, parent_ip, flags, preempt_count());
381 per_cpu(tracing_cpu, cpu) = 1;
383 atomic_dec(&data->disabled);
387 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
390 struct trace_array *tr = irqsoff_trace;
391 struct trace_array_cpu *data;
394 cpu = raw_smp_processor_id();
395 /* Always clear the tracing cpu on stopping the trace */
396 if (unlikely(per_cpu(tracing_cpu, cpu)))
397 per_cpu(tracing_cpu, cpu) = 0;
401 if (!tracer_enabled || !tracing_is_enabled())
404 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
406 if (unlikely(!data) ||
407 !data->critical_start || atomic_read(&data->disabled))
410 atomic_inc(&data->disabled);
412 local_save_flags(flags);
413 __trace_function(tr, ip, parent_ip, flags, preempt_count());
414 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
415 data->critical_start = 0;
416 atomic_dec(&data->disabled);
419 /* start and stop critical timings used to for stoppage (in idle) */
420 void start_critical_timings(void)
422 if (preempt_trace() || irq_trace())
423 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
424 trace_preemptirqsoff_hist_rcuidle(TRACE_START, 1);
426 EXPORT_SYMBOL_GPL(start_critical_timings);
428 void stop_critical_timings(void)
430 trace_preemptirqsoff_hist_rcuidle(TRACE_STOP, 0);
431 if (preempt_trace() || irq_trace())
432 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
434 EXPORT_SYMBOL_GPL(stop_critical_timings);
436 #ifdef CONFIG_IRQSOFF_TRACER
437 #ifdef CONFIG_PROVE_LOCKING
438 void time_hardirqs_on(unsigned long a0, unsigned long a1)
440 trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0);
441 if (!preempt_trace() && irq_trace())
442 stop_critical_timing(a0, a1);
445 void time_hardirqs_off(unsigned long a0, unsigned long a1)
447 if (!preempt_trace() && irq_trace())
448 start_critical_timing(a0, a1);
449 trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1);
452 #else /* !CONFIG_PROVE_LOCKING */
458 void trace_softirqs_on(unsigned long ip)
462 void trace_softirqs_off(unsigned long ip)
466 inline void print_irqtrace_events(struct task_struct *curr)
471 * We are only interested in hardirq on/off events:
473 void trace_hardirqs_on(void)
475 trace_preemptirqsoff_hist(IRQS_ON, 0);
476 if (!preempt_trace() && irq_trace())
477 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
479 EXPORT_SYMBOL(trace_hardirqs_on);
481 void trace_hardirqs_off(void)
483 if (!preempt_trace() && irq_trace())
484 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
485 trace_preemptirqsoff_hist(IRQS_OFF, 1);
487 EXPORT_SYMBOL(trace_hardirqs_off);
489 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
491 trace_preemptirqsoff_hist(IRQS_ON, 0);
492 if (!preempt_trace() && irq_trace())
493 stop_critical_timing(CALLER_ADDR0, caller_addr);
495 EXPORT_SYMBOL(trace_hardirqs_on_caller);
497 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
499 if (!preempt_trace() && irq_trace())
500 start_critical_timing(CALLER_ADDR0, caller_addr);
501 trace_preemptirqsoff_hist(IRQS_OFF, 1);
503 EXPORT_SYMBOL(trace_hardirqs_off_caller);
505 #endif /* CONFIG_PROVE_LOCKING */
506 #endif /* CONFIG_IRQSOFF_TRACER */
508 #ifdef CONFIG_PREEMPT_TRACER
509 void trace_preempt_on(unsigned long a0, unsigned long a1)
511 trace_preemptirqsoff_hist(PREEMPT_ON, 0);
512 if (preempt_trace() && !irq_trace())
513 stop_critical_timing(a0, a1);
516 void trace_preempt_off(unsigned long a0, unsigned long a1)
518 trace_preemptirqsoff_hist(PREEMPT_ON, 1);
519 if (preempt_trace() && !irq_trace())
520 start_critical_timing(a0, a1);
522 #endif /* CONFIG_PREEMPT_TRACER */
524 #ifdef CONFIG_FUNCTION_TRACER
525 static bool function_enabled;
527 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
531 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
532 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
536 ret = register_ftrace_graph(&irqsoff_graph_return,
537 &irqsoff_graph_entry);
539 ret = register_ftrace_function(tr->ops);
542 function_enabled = true;
547 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
549 if (!function_enabled)
553 unregister_ftrace_graph();
555 unregister_ftrace_function(tr->ops);
557 function_enabled = false;
560 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
562 if (!(mask & TRACE_ITER_FUNCTION))
566 register_irqsoff_function(tr, is_graph(tr), 1);
568 unregister_irqsoff_function(tr, is_graph(tr));
572 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
576 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
577 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
581 #endif /* CONFIG_FUNCTION_TRACER */
583 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
585 struct tracer *tracer = tr->current_trace;
587 if (irqsoff_function_set(tr, mask, set))
590 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
591 if (mask & TRACE_ITER_DISPLAY_GRAPH)
592 return irqsoff_display_graph(tr, set);
595 return trace_keep_overwrite(tracer, mask, set);
598 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
602 ret = register_irqsoff_function(tr, graph, 0);
604 if (!ret && tracing_is_enabled())
612 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
616 unregister_irqsoff_function(tr, graph);
619 static bool irqsoff_busy;
621 static int __irqsoff_tracer_init(struct trace_array *tr)
626 save_flags = tr->trace_flags;
628 /* non overwrite screws up the latency tracers */
629 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
630 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
634 /* make sure that the tracer is visible */
636 tracing_reset_online_cpus(&tr->trace_buffer);
638 ftrace_init_array_ops(tr, irqsoff_tracer_call);
640 /* Only toplevel instance supports graph tracing */
641 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
643 printk(KERN_ERR "failed to start irqsoff tracer\n");
649 static void irqsoff_tracer_reset(struct trace_array *tr)
651 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
652 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
654 stop_irqsoff_tracer(tr, is_graph(tr));
656 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
657 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
658 ftrace_reset_array_ops(tr);
660 irqsoff_busy = false;
663 static void irqsoff_tracer_start(struct trace_array *tr)
668 static void irqsoff_tracer_stop(struct trace_array *tr)
673 #ifdef CONFIG_IRQSOFF_TRACER
674 static int irqsoff_tracer_init(struct trace_array *tr)
676 trace_type = TRACER_IRQS_OFF;
678 return __irqsoff_tracer_init(tr);
680 static struct tracer irqsoff_tracer __read_mostly =
683 .init = irqsoff_tracer_init,
684 .reset = irqsoff_tracer_reset,
685 .start = irqsoff_tracer_start,
686 .stop = irqsoff_tracer_stop,
688 .print_header = irqsoff_print_header,
689 .print_line = irqsoff_print_line,
690 .flag_changed = irqsoff_flag_changed,
691 #ifdef CONFIG_FTRACE_SELFTEST
692 .selftest = trace_selftest_startup_irqsoff,
694 .open = irqsoff_trace_open,
695 .close = irqsoff_trace_close,
696 .allow_instances = true,
699 # define register_irqsoff(trace) register_tracer(&trace)
701 # define register_irqsoff(trace) do { } while (0)
704 #ifdef CONFIG_PREEMPT_TRACER
705 static int preemptoff_tracer_init(struct trace_array *tr)
707 trace_type = TRACER_PREEMPT_OFF;
709 return __irqsoff_tracer_init(tr);
712 static struct tracer preemptoff_tracer __read_mostly =
714 .name = "preemptoff",
715 .init = preemptoff_tracer_init,
716 .reset = irqsoff_tracer_reset,
717 .start = irqsoff_tracer_start,
718 .stop = irqsoff_tracer_stop,
720 .print_header = irqsoff_print_header,
721 .print_line = irqsoff_print_line,
722 .flag_changed = irqsoff_flag_changed,
723 #ifdef CONFIG_FTRACE_SELFTEST
724 .selftest = trace_selftest_startup_preemptoff,
726 .open = irqsoff_trace_open,
727 .close = irqsoff_trace_close,
728 .allow_instances = true,
731 # define register_preemptoff(trace) register_tracer(&trace)
733 # define register_preemptoff(trace) do { } while (0)
736 #if defined(CONFIG_IRQSOFF_TRACER) && \
737 defined(CONFIG_PREEMPT_TRACER)
739 static int preemptirqsoff_tracer_init(struct trace_array *tr)
741 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
743 return __irqsoff_tracer_init(tr);
746 static struct tracer preemptirqsoff_tracer __read_mostly =
748 .name = "preemptirqsoff",
749 .init = preemptirqsoff_tracer_init,
750 .reset = irqsoff_tracer_reset,
751 .start = irqsoff_tracer_start,
752 .stop = irqsoff_tracer_stop,
754 .print_header = irqsoff_print_header,
755 .print_line = irqsoff_print_line,
756 .flag_changed = irqsoff_flag_changed,
757 #ifdef CONFIG_FTRACE_SELFTEST
758 .selftest = trace_selftest_startup_preemptirqsoff,
760 .open = irqsoff_trace_open,
761 .close = irqsoff_trace_close,
762 .allow_instances = true,
766 # define register_preemptirqsoff(trace) register_tracer(&trace)
768 # define register_preemptirqsoff(trace) do { } while (0)
771 __init static int init_irqsoff_tracer(void)
773 register_irqsoff(irqsoff_tracer);
774 register_preemptoff(preemptoff_tracer);
775 register_preemptirqsoff(preemptirqsoff_tracer);
779 core_initcall(init_irqsoff_tracer);