aaade2efc56097d2a3a4327192567f76d443c7a8
[kvmfornfv.git] / kernel / kernel / trace / trace_irqsoff.c
1 /*
2  * trace irqs off critical timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * From code in the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
16 #include <trace/events/hist.h>
17
18 #include "trace.h"
19
20 static struct trace_array               *irqsoff_trace __read_mostly;
21 static int                              tracer_enabled __read_mostly;
22
23 static DEFINE_PER_CPU(int, tracing_cpu);
24
25 static DEFINE_RAW_SPINLOCK(max_trace_lock);
26
27 enum {
28         TRACER_IRQS_OFF         = (1 << 1),
29         TRACER_PREEMPT_OFF      = (1 << 2),
30 };
31
32 static int trace_type __read_mostly;
33
34 static int save_flags;
35 static bool function_enabled;
36
37 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
39
40 #ifdef CONFIG_PREEMPT_TRACER
41 static inline int
42 preempt_trace(void)
43 {
44         return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
45 }
46 #else
47 # define preempt_trace() (0)
48 #endif
49
50 #ifdef CONFIG_IRQSOFF_TRACER
51 static inline int
52 irq_trace(void)
53 {
54         return ((trace_type & TRACER_IRQS_OFF) &&
55                 irqs_disabled());
56 }
57 #else
58 # define irq_trace() (0)
59 #endif
60
61 #define TRACE_DISPLAY_GRAPH     1
62
63 static struct tracer_opt trace_opts[] = {
64 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
65         /* display latency trace as call graph */
66         { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
67 #endif
68         { } /* Empty entry */
69 };
70
71 static struct tracer_flags tracer_flags = {
72         .val  = 0,
73         .opts = trace_opts,
74 };
75
76 #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
77
78 /*
79  * Sequence count - we record it when starting a measurement and
80  * skip the latency if the sequence has changed - some other section
81  * did a maximum and could disturb our measurement with serial console
82  * printouts, etc. Truly coinciding maximum latencies should be rare
83  * and what happens together happens separately as well, so this doesn't
84  * decrease the validity of the maximum found:
85  */
86 static __cacheline_aligned_in_smp       unsigned long max_sequence;
87
88 #ifdef CONFIG_FUNCTION_TRACER
89 /*
90  * Prologue for the preempt and irqs off function tracers.
91  *
92  * Returns 1 if it is OK to continue, and data->disabled is
93  *            incremented.
94  *         0 if the trace is to be ignored, and data->disabled
95  *            is kept the same.
96  *
97  * Note, this function is also used outside this ifdef but
98  *  inside the #ifdef of the function graph tracer below.
99  *  This is OK, since the function graph tracer is
100  *  dependent on the function tracer.
101  */
102 static int func_prolog_dec(struct trace_array *tr,
103                            struct trace_array_cpu **data,
104                            unsigned long *flags)
105 {
106         long disabled;
107         int cpu;
108
109         /*
110          * Does not matter if we preempt. We test the flags
111          * afterward, to see if irqs are disabled or not.
112          * If we preempt and get a false positive, the flags
113          * test will fail.
114          */
115         cpu = raw_smp_processor_id();
116         if (likely(!per_cpu(tracing_cpu, cpu)))
117                 return 0;
118
119         local_save_flags(*flags);
120         /* slight chance to get a false positive on tracing_cpu */
121         if (!irqs_disabled_flags(*flags))
122                 return 0;
123
124         *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
125         disabled = atomic_inc_return(&(*data)->disabled);
126
127         if (likely(disabled == 1))
128                 return 1;
129
130         atomic_dec(&(*data)->disabled);
131
132         return 0;
133 }
134
135 /*
136  * irqsoff uses its own tracer function to keep the overhead down:
137  */
138 static void
139 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
140                     struct ftrace_ops *op, struct pt_regs *pt_regs)
141 {
142         struct trace_array *tr = irqsoff_trace;
143         struct trace_array_cpu *data;
144         unsigned long flags;
145
146         if (!func_prolog_dec(tr, &data, &flags))
147                 return;
148
149         trace_function(tr, ip, parent_ip, flags, preempt_count());
150
151         atomic_dec(&data->disabled);
152 }
153 #endif /* CONFIG_FUNCTION_TRACER */
154
155 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
156 static int
157 irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
158 {
159         int cpu;
160
161         if (!(bit & TRACE_DISPLAY_GRAPH))
162                 return -EINVAL;
163
164         if (!(is_graph() ^ set))
165                 return 0;
166
167         stop_irqsoff_tracer(irqsoff_trace, !set);
168
169         for_each_possible_cpu(cpu)
170                 per_cpu(tracing_cpu, cpu) = 0;
171
172         tr->max_latency = 0;
173         tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
174
175         return start_irqsoff_tracer(irqsoff_trace, set);
176 }
177
178 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
179 {
180         struct trace_array *tr = irqsoff_trace;
181         struct trace_array_cpu *data;
182         unsigned long flags;
183         int ret;
184         int pc;
185
186         if (!func_prolog_dec(tr, &data, &flags))
187                 return 0;
188
189         pc = preempt_count();
190         ret = __trace_graph_entry(tr, trace, flags, pc);
191         atomic_dec(&data->disabled);
192
193         return ret;
194 }
195
196 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
197 {
198         struct trace_array *tr = irqsoff_trace;
199         struct trace_array_cpu *data;
200         unsigned long flags;
201         int pc;
202
203         if (!func_prolog_dec(tr, &data, &flags))
204                 return;
205
206         pc = preempt_count();
207         __trace_graph_return(tr, trace, flags, pc);
208         atomic_dec(&data->disabled);
209 }
210
211 static void irqsoff_trace_open(struct trace_iterator *iter)
212 {
213         if (is_graph())
214                 graph_trace_open(iter);
215
216 }
217
218 static void irqsoff_trace_close(struct trace_iterator *iter)
219 {
220         if (iter->private)
221                 graph_trace_close(iter);
222 }
223
224 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
225                             TRACE_GRAPH_PRINT_PROC | \
226                             TRACE_GRAPH_PRINT_ABS_TIME | \
227                             TRACE_GRAPH_PRINT_DURATION)
228
229 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
230 {
231         /*
232          * In graph mode call the graph tracer output function,
233          * otherwise go with the TRACE_FN event handler
234          */
235         if (is_graph())
236                 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
237
238         return TRACE_TYPE_UNHANDLED;
239 }
240
241 static void irqsoff_print_header(struct seq_file *s)
242 {
243         if (is_graph())
244                 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
245         else
246                 trace_default_header(s);
247 }
248
249 static void
250 __trace_function(struct trace_array *tr,
251                  unsigned long ip, unsigned long parent_ip,
252                  unsigned long flags, int pc)
253 {
254         if (is_graph())
255                 trace_graph_function(tr, ip, parent_ip, flags, pc);
256         else
257                 trace_function(tr, ip, parent_ip, flags, pc);
258 }
259
260 #else
261 #define __trace_function trace_function
262
263 static int
264 irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
265 {
266         return -EINVAL;
267 }
268
269 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
270 {
271         return -1;
272 }
273
274 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
275 {
276         return TRACE_TYPE_UNHANDLED;
277 }
278
279 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
280 static void irqsoff_trace_open(struct trace_iterator *iter) { }
281 static void irqsoff_trace_close(struct trace_iterator *iter) { }
282
283 #ifdef CONFIG_FUNCTION_TRACER
284 static void irqsoff_print_header(struct seq_file *s)
285 {
286         trace_default_header(s);
287 }
288 #else
289 static void irqsoff_print_header(struct seq_file *s)
290 {
291         trace_latency_header(s);
292 }
293 #endif /* CONFIG_FUNCTION_TRACER */
294 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
295
296 /*
297  * Should this new latency be reported/recorded?
298  */
299 static int report_latency(struct trace_array *tr, cycle_t delta)
300 {
301         if (tracing_thresh) {
302                 if (delta < tracing_thresh)
303                         return 0;
304         } else {
305                 if (delta <= tr->max_latency)
306                         return 0;
307         }
308         return 1;
309 }
310
311 static void
312 check_critical_timing(struct trace_array *tr,
313                       struct trace_array_cpu *data,
314                       unsigned long parent_ip,
315                       int cpu)
316 {
317         cycle_t T0, T1, delta;
318         unsigned long flags;
319         int pc;
320
321         T0 = data->preempt_timestamp;
322         T1 = ftrace_now(cpu);
323         delta = T1-T0;
324
325         local_save_flags(flags);
326
327         pc = preempt_count();
328
329         if (!report_latency(tr, delta))
330                 goto out;
331
332         raw_spin_lock_irqsave(&max_trace_lock, flags);
333
334         /* check if we are still the max latency */
335         if (!report_latency(tr, delta))
336                 goto out_unlock;
337
338         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
339         /* Skip 5 functions to get to the irq/preempt enable function */
340         __trace_stack(tr, flags, 5, pc);
341
342         if (data->critical_sequence != max_sequence)
343                 goto out_unlock;
344
345         data->critical_end = parent_ip;
346
347         if (likely(!is_tracing_stopped())) {
348                 tr->max_latency = delta;
349                 update_max_tr_single(tr, current, cpu);
350         }
351
352         max_sequence++;
353
354 out_unlock:
355         raw_spin_unlock_irqrestore(&max_trace_lock, flags);
356
357 out:
358         data->critical_sequence = max_sequence;
359         data->preempt_timestamp = ftrace_now(cpu);
360         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
361 }
362
363 static inline void
364 start_critical_timing(unsigned long ip, unsigned long parent_ip)
365 {
366         int cpu;
367         struct trace_array *tr = irqsoff_trace;
368         struct trace_array_cpu *data;
369         unsigned long flags;
370
371         if (!tracer_enabled || !tracing_is_enabled())
372                 return;
373
374         cpu = raw_smp_processor_id();
375
376         if (per_cpu(tracing_cpu, cpu))
377                 return;
378
379         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
380
381         if (unlikely(!data) || atomic_read(&data->disabled))
382                 return;
383
384         atomic_inc(&data->disabled);
385
386         data->critical_sequence = max_sequence;
387         data->preempt_timestamp = ftrace_now(cpu);
388         data->critical_start = parent_ip ? : ip;
389
390         local_save_flags(flags);
391
392         __trace_function(tr, ip, parent_ip, flags, preempt_count());
393
394         per_cpu(tracing_cpu, cpu) = 1;
395
396         atomic_dec(&data->disabled);
397 }
398
399 static inline void
400 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
401 {
402         int cpu;
403         struct trace_array *tr = irqsoff_trace;
404         struct trace_array_cpu *data;
405         unsigned long flags;
406
407         cpu = raw_smp_processor_id();
408         /* Always clear the tracing cpu on stopping the trace */
409         if (unlikely(per_cpu(tracing_cpu, cpu)))
410                 per_cpu(tracing_cpu, cpu) = 0;
411         else
412                 return;
413
414         if (!tracer_enabled || !tracing_is_enabled())
415                 return;
416
417         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
418
419         if (unlikely(!data) ||
420             !data->critical_start || atomic_read(&data->disabled))
421                 return;
422
423         atomic_inc(&data->disabled);
424
425         local_save_flags(flags);
426         __trace_function(tr, ip, parent_ip, flags, preempt_count());
427         check_critical_timing(tr, data, parent_ip ? : ip, cpu);
428         data->critical_start = 0;
429         atomic_dec(&data->disabled);
430 }
431
432 /* start and stop critical timings used to for stoppage (in idle) */
433 void start_critical_timings(void)
434 {
435         if (preempt_trace() || irq_trace())
436                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
437         trace_preemptirqsoff_hist(TRACE_START, 1);
438 }
439 EXPORT_SYMBOL_GPL(start_critical_timings);
440
441 void stop_critical_timings(void)
442 {
443         trace_preemptirqsoff_hist(TRACE_STOP, 0);
444         if (preempt_trace() || irq_trace())
445                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
446 }
447 EXPORT_SYMBOL_GPL(stop_critical_timings);
448
449 #ifdef CONFIG_IRQSOFF_TRACER
450 #ifdef CONFIG_PROVE_LOCKING
451 void time_hardirqs_on(unsigned long a0, unsigned long a1)
452 {
453         trace_preemptirqsoff_hist(IRQS_ON, 0);
454         if (!preempt_trace() && irq_trace())
455                 stop_critical_timing(a0, a1);
456 }
457
458 void time_hardirqs_off(unsigned long a0, unsigned long a1)
459 {
460         if (!preempt_trace() && irq_trace())
461                 start_critical_timing(a0, a1);
462         trace_preemptirqsoff_hist(IRQS_OFF, 1);
463 }
464
465 #else /* !CONFIG_PROVE_LOCKING */
466
467 /*
468  * Stubs:
469  */
470
471 void trace_softirqs_on(unsigned long ip)
472 {
473 }
474
475 void trace_softirqs_off(unsigned long ip)
476 {
477 }
478
479 inline void print_irqtrace_events(struct task_struct *curr)
480 {
481 }
482
483 /*
484  * We are only interested in hardirq on/off events:
485  */
486 void trace_hardirqs_on(void)
487 {
488         trace_preemptirqsoff_hist(IRQS_ON, 0);
489         if (!preempt_trace() && irq_trace())
490                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
491 }
492 EXPORT_SYMBOL(trace_hardirqs_on);
493
494 void trace_hardirqs_off(void)
495 {
496         if (!preempt_trace() && irq_trace())
497                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
498         trace_preemptirqsoff_hist(IRQS_OFF, 1);
499 }
500 EXPORT_SYMBOL(trace_hardirqs_off);
501
502 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
503 {
504         trace_preemptirqsoff_hist(IRQS_ON, 0);
505         if (!preempt_trace() && irq_trace())
506                 stop_critical_timing(CALLER_ADDR0, caller_addr);
507 }
508 EXPORT_SYMBOL(trace_hardirqs_on_caller);
509
510 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
511 {
512         if (!preempt_trace() && irq_trace())
513                 start_critical_timing(CALLER_ADDR0, caller_addr);
514         trace_preemptirqsoff_hist(IRQS_OFF, 1);
515 }
516 EXPORT_SYMBOL(trace_hardirqs_off_caller);
517
518 #endif /* CONFIG_PROVE_LOCKING */
519 #endif /*  CONFIG_IRQSOFF_TRACER */
520
521 #ifdef CONFIG_PREEMPT_TRACER
522 void trace_preempt_on(unsigned long a0, unsigned long a1)
523 {
524         trace_preemptirqsoff_hist(PREEMPT_ON, 0);
525         if (preempt_trace() && !irq_trace())
526                 stop_critical_timing(a0, a1);
527 }
528
529 void trace_preempt_off(unsigned long a0, unsigned long a1)
530 {
531         trace_preemptirqsoff_hist(PREEMPT_ON, 1);
532         if (preempt_trace() && !irq_trace())
533                 start_critical_timing(a0, a1);
534 }
535 #endif /* CONFIG_PREEMPT_TRACER */
536
537 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
538 {
539         int ret;
540
541         /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
542         if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
543                 return 0;
544
545         if (graph)
546                 ret = register_ftrace_graph(&irqsoff_graph_return,
547                                             &irqsoff_graph_entry);
548         else
549                 ret = register_ftrace_function(tr->ops);
550
551         if (!ret)
552                 function_enabled = true;
553
554         return ret;
555 }
556
557 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
558 {
559         if (!function_enabled)
560                 return;
561
562         if (graph)
563                 unregister_ftrace_graph();
564         else
565                 unregister_ftrace_function(tr->ops);
566
567         function_enabled = false;
568 }
569
570 static void irqsoff_function_set(struct trace_array *tr, int set)
571 {
572         if (set)
573                 register_irqsoff_function(tr, is_graph(), 1);
574         else
575                 unregister_irqsoff_function(tr, is_graph());
576 }
577
578 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
579 {
580         struct tracer *tracer = tr->current_trace;
581
582         if (mask & TRACE_ITER_FUNCTION)
583                 irqsoff_function_set(tr, set);
584
585         return trace_keep_overwrite(tracer, mask, set);
586 }
587
588 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
589 {
590         int ret;
591
592         ret = register_irqsoff_function(tr, graph, 0);
593
594         if (!ret && tracing_is_enabled())
595                 tracer_enabled = 1;
596         else
597                 tracer_enabled = 0;
598
599         return ret;
600 }
601
602 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
603 {
604         tracer_enabled = 0;
605
606         unregister_irqsoff_function(tr, graph);
607 }
608
609 static bool irqsoff_busy;
610
611 static int __irqsoff_tracer_init(struct trace_array *tr)
612 {
613         if (irqsoff_busy)
614                 return -EBUSY;
615
616         save_flags = trace_flags;
617
618         /* non overwrite screws up the latency tracers */
619         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
620         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
621
622         tr->max_latency = 0;
623         irqsoff_trace = tr;
624         /* make sure that the tracer is visible */
625         smp_wmb();
626         tracing_reset_online_cpus(&tr->trace_buffer);
627
628         ftrace_init_array_ops(tr, irqsoff_tracer_call);
629
630         /* Only toplevel instance supports graph tracing */
631         if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
632                                       is_graph())))
633                 printk(KERN_ERR "failed to start irqsoff tracer\n");
634
635         irqsoff_busy = true;
636         return 0;
637 }
638
639 static void irqsoff_tracer_reset(struct trace_array *tr)
640 {
641         int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
642         int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
643
644         stop_irqsoff_tracer(tr, is_graph());
645
646         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
647         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
648         ftrace_reset_array_ops(tr);
649
650         irqsoff_busy = false;
651 }
652
653 static void irqsoff_tracer_start(struct trace_array *tr)
654 {
655         tracer_enabled = 1;
656 }
657
658 static void irqsoff_tracer_stop(struct trace_array *tr)
659 {
660         tracer_enabled = 0;
661 }
662
663 #ifdef CONFIG_IRQSOFF_TRACER
664 static int irqsoff_tracer_init(struct trace_array *tr)
665 {
666         trace_type = TRACER_IRQS_OFF;
667
668         return __irqsoff_tracer_init(tr);
669 }
670 static struct tracer irqsoff_tracer __read_mostly =
671 {
672         .name           = "irqsoff",
673         .init           = irqsoff_tracer_init,
674         .reset          = irqsoff_tracer_reset,
675         .start          = irqsoff_tracer_start,
676         .stop           = irqsoff_tracer_stop,
677         .print_max      = true,
678         .print_header   = irqsoff_print_header,
679         .print_line     = irqsoff_print_line,
680         .flags          = &tracer_flags,
681         .set_flag       = irqsoff_set_flag,
682         .flag_changed   = irqsoff_flag_changed,
683 #ifdef CONFIG_FTRACE_SELFTEST
684         .selftest    = trace_selftest_startup_irqsoff,
685 #endif
686         .open           = irqsoff_trace_open,
687         .close          = irqsoff_trace_close,
688         .allow_instances = true,
689         .use_max_tr     = true,
690 };
691 # define register_irqsoff(trace) register_tracer(&trace)
692 #else
693 # define register_irqsoff(trace) do { } while (0)
694 #endif
695
696 #ifdef CONFIG_PREEMPT_TRACER
697 static int preemptoff_tracer_init(struct trace_array *tr)
698 {
699         trace_type = TRACER_PREEMPT_OFF;
700
701         return __irqsoff_tracer_init(tr);
702 }
703
704 static struct tracer preemptoff_tracer __read_mostly =
705 {
706         .name           = "preemptoff",
707         .init           = preemptoff_tracer_init,
708         .reset          = irqsoff_tracer_reset,
709         .start          = irqsoff_tracer_start,
710         .stop           = irqsoff_tracer_stop,
711         .print_max      = true,
712         .print_header   = irqsoff_print_header,
713         .print_line     = irqsoff_print_line,
714         .flags          = &tracer_flags,
715         .set_flag       = irqsoff_set_flag,
716         .flag_changed   = irqsoff_flag_changed,
717 #ifdef CONFIG_FTRACE_SELFTEST
718         .selftest    = trace_selftest_startup_preemptoff,
719 #endif
720         .open           = irqsoff_trace_open,
721         .close          = irqsoff_trace_close,
722         .allow_instances = true,
723         .use_max_tr     = true,
724 };
725 # define register_preemptoff(trace) register_tracer(&trace)
726 #else
727 # define register_preemptoff(trace) do { } while (0)
728 #endif
729
730 #if defined(CONFIG_IRQSOFF_TRACER) && \
731         defined(CONFIG_PREEMPT_TRACER)
732
733 static int preemptirqsoff_tracer_init(struct trace_array *tr)
734 {
735         trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
736
737         return __irqsoff_tracer_init(tr);
738 }
739
740 static struct tracer preemptirqsoff_tracer __read_mostly =
741 {
742         .name           = "preemptirqsoff",
743         .init           = preemptirqsoff_tracer_init,
744         .reset          = irqsoff_tracer_reset,
745         .start          = irqsoff_tracer_start,
746         .stop           = irqsoff_tracer_stop,
747         .print_max      = true,
748         .print_header   = irqsoff_print_header,
749         .print_line     = irqsoff_print_line,
750         .flags          = &tracer_flags,
751         .set_flag       = irqsoff_set_flag,
752         .flag_changed   = irqsoff_flag_changed,
753 #ifdef CONFIG_FTRACE_SELFTEST
754         .selftest    = trace_selftest_startup_preemptirqsoff,
755 #endif
756         .open           = irqsoff_trace_open,
757         .close          = irqsoff_trace_close,
758         .allow_instances = true,
759         .use_max_tr     = true,
760 };
761
762 # define register_preemptirqsoff(trace) register_tracer(&trace)
763 #else
764 # define register_preemptirqsoff(trace) do { } while (0)
765 #endif
766
767 __init static int init_irqsoff_tracer(void)
768 {
769         register_irqsoff(irqsoff_tracer);
770         register_preemptoff(preemptoff_tracer);
771         register_preemptirqsoff(preemptirqsoff_tracer);
772
773         return 0;
774 }
775 core_initcall(init_irqsoff_tracer);