These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / trace / trace_irqsoff.c
1 /*
2  * trace irqs off critical timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * From code in the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
16 #include <trace/events/hist.h>
17
18 #include "trace.h"
19
20 static struct trace_array               *irqsoff_trace __read_mostly;
21 static int                              tracer_enabled __read_mostly;
22
23 static DEFINE_PER_CPU(int, tracing_cpu);
24
25 static DEFINE_RAW_SPINLOCK(max_trace_lock);
26
27 enum {
28         TRACER_IRQS_OFF         = (1 << 1),
29         TRACER_PREEMPT_OFF      = (1 << 2),
30 };
31
32 static int trace_type __read_mostly;
33
34 static int save_flags;
35
36 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
37 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
38
39 #ifdef CONFIG_PREEMPT_TRACER
40 static inline int
41 preempt_trace(void)
42 {
43         return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
44 }
45 #else
46 # define preempt_trace() (0)
47 #endif
48
49 #ifdef CONFIG_IRQSOFF_TRACER
50 static inline int
51 irq_trace(void)
52 {
53         return ((trace_type & TRACER_IRQS_OFF) &&
54                 irqs_disabled());
55 }
56 #else
57 # define irq_trace() (0)
58 #endif
59
60 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
61 static int irqsoff_display_graph(struct trace_array *tr, int set);
62 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
63 #else
64 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
65 {
66         return -EINVAL;
67 }
68 # define is_graph(tr) false
69 #endif
70
71 /*
72  * Sequence count - we record it when starting a measurement and
73  * skip the latency if the sequence has changed - some other section
74  * did a maximum and could disturb our measurement with serial console
75  * printouts, etc. Truly coinciding maximum latencies should be rare
76  * and what happens together happens separately as well, so this doesn't
77  * decrease the validity of the maximum found:
78  */
79 static __cacheline_aligned_in_smp       unsigned long max_sequence;
80
81 #ifdef CONFIG_FUNCTION_TRACER
82 /*
83  * Prologue for the preempt and irqs off function tracers.
84  *
85  * Returns 1 if it is OK to continue, and data->disabled is
86  *            incremented.
87  *         0 if the trace is to be ignored, and data->disabled
88  *            is kept the same.
89  *
90  * Note, this function is also used outside this ifdef but
91  *  inside the #ifdef of the function graph tracer below.
92  *  This is OK, since the function graph tracer is
93  *  dependent on the function tracer.
94  */
95 static int func_prolog_dec(struct trace_array *tr,
96                            struct trace_array_cpu **data,
97                            unsigned long *flags)
98 {
99         long disabled;
100         int cpu;
101
102         /*
103          * Does not matter if we preempt. We test the flags
104          * afterward, to see if irqs are disabled or not.
105          * If we preempt and get a false positive, the flags
106          * test will fail.
107          */
108         cpu = raw_smp_processor_id();
109         if (likely(!per_cpu(tracing_cpu, cpu)))
110                 return 0;
111
112         local_save_flags(*flags);
113         /* slight chance to get a false positive on tracing_cpu */
114         if (!irqs_disabled_flags(*flags))
115                 return 0;
116
117         *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
118         disabled = atomic_inc_return(&(*data)->disabled);
119
120         if (likely(disabled == 1))
121                 return 1;
122
123         atomic_dec(&(*data)->disabled);
124
125         return 0;
126 }
127
128 /*
129  * irqsoff uses its own tracer function to keep the overhead down:
130  */
131 static void
132 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
133                     struct ftrace_ops *op, struct pt_regs *pt_regs)
134 {
135         struct trace_array *tr = irqsoff_trace;
136         struct trace_array_cpu *data;
137         unsigned long flags;
138
139         if (!func_prolog_dec(tr, &data, &flags))
140                 return;
141
142         trace_function(tr, ip, parent_ip, flags, preempt_count());
143
144         atomic_dec(&data->disabled);
145 }
146 #endif /* CONFIG_FUNCTION_TRACER */
147
148 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
149 static int irqsoff_display_graph(struct trace_array *tr, int set)
150 {
151         int cpu;
152
153         if (!(is_graph(tr) ^ set))
154                 return 0;
155
156         stop_irqsoff_tracer(irqsoff_trace, !set);
157
158         for_each_possible_cpu(cpu)
159                 per_cpu(tracing_cpu, cpu) = 0;
160
161         tr->max_latency = 0;
162         tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
163
164         return start_irqsoff_tracer(irqsoff_trace, set);
165 }
166
167 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
168 {
169         struct trace_array *tr = irqsoff_trace;
170         struct trace_array_cpu *data;
171         unsigned long flags;
172         int ret;
173         int pc;
174
175         if (!func_prolog_dec(tr, &data, &flags))
176                 return 0;
177
178         pc = preempt_count();
179         ret = __trace_graph_entry(tr, trace, flags, pc);
180         atomic_dec(&data->disabled);
181
182         return ret;
183 }
184
185 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
186 {
187         struct trace_array *tr = irqsoff_trace;
188         struct trace_array_cpu *data;
189         unsigned long flags;
190         int pc;
191
192         if (!func_prolog_dec(tr, &data, &flags))
193                 return;
194
195         pc = preempt_count();
196         __trace_graph_return(tr, trace, flags, pc);
197         atomic_dec(&data->disabled);
198 }
199
200 static void irqsoff_trace_open(struct trace_iterator *iter)
201 {
202         if (is_graph(iter->tr))
203                 graph_trace_open(iter);
204
205 }
206
207 static void irqsoff_trace_close(struct trace_iterator *iter)
208 {
209         if (iter->private)
210                 graph_trace_close(iter);
211 }
212
213 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
214                             TRACE_GRAPH_PRINT_PROC | \
215                             TRACE_GRAPH_PRINT_ABS_TIME | \
216                             TRACE_GRAPH_PRINT_DURATION)
217
218 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
219 {
220         /*
221          * In graph mode call the graph tracer output function,
222          * otherwise go with the TRACE_FN event handler
223          */
224         if (is_graph(iter->tr))
225                 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
226
227         return TRACE_TYPE_UNHANDLED;
228 }
229
230 static void irqsoff_print_header(struct seq_file *s)
231 {
232         struct trace_array *tr = irqsoff_trace;
233
234         if (is_graph(tr))
235                 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
236         else
237                 trace_default_header(s);
238 }
239
240 static void
241 __trace_function(struct trace_array *tr,
242                  unsigned long ip, unsigned long parent_ip,
243                  unsigned long flags, int pc)
244 {
245         if (is_graph(tr))
246                 trace_graph_function(tr, ip, parent_ip, flags, pc);
247         else
248                 trace_function(tr, ip, parent_ip, flags, pc);
249 }
250
251 #else
252 #define __trace_function trace_function
253
254 #ifdef CONFIG_FUNCTION_TRACER
255 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
256 {
257         return -1;
258 }
259 #endif
260
261 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
262 {
263         return TRACE_TYPE_UNHANDLED;
264 }
265
266 static void irqsoff_trace_open(struct trace_iterator *iter) { }
267 static void irqsoff_trace_close(struct trace_iterator *iter) { }
268
269 #ifdef CONFIG_FUNCTION_TRACER
270 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
271 static void irqsoff_print_header(struct seq_file *s)
272 {
273         trace_default_header(s);
274 }
275 #else
276 static void irqsoff_print_header(struct seq_file *s)
277 {
278         trace_latency_header(s);
279 }
280 #endif /* CONFIG_FUNCTION_TRACER */
281 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
282
283 /*
284  * Should this new latency be reported/recorded?
285  */
286 static bool report_latency(struct trace_array *tr, cycle_t delta)
287 {
288         if (tracing_thresh) {
289                 if (delta < tracing_thresh)
290                         return false;
291         } else {
292                 if (delta <= tr->max_latency)
293                         return false;
294         }
295         return true;
296 }
297
298 static void
299 check_critical_timing(struct trace_array *tr,
300                       struct trace_array_cpu *data,
301                       unsigned long parent_ip,
302                       int cpu)
303 {
304         cycle_t T0, T1, delta;
305         unsigned long flags;
306         int pc;
307
308         T0 = data->preempt_timestamp;
309         T1 = ftrace_now(cpu);
310         delta = T1-T0;
311
312         local_save_flags(flags);
313
314         pc = preempt_count();
315
316         if (!report_latency(tr, delta))
317                 goto out;
318
319         raw_spin_lock_irqsave(&max_trace_lock, flags);
320
321         /* check if we are still the max latency */
322         if (!report_latency(tr, delta))
323                 goto out_unlock;
324
325         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
326         /* Skip 5 functions to get to the irq/preempt enable function */
327         __trace_stack(tr, flags, 5, pc);
328
329         if (data->critical_sequence != max_sequence)
330                 goto out_unlock;
331
332         data->critical_end = parent_ip;
333
334         if (likely(!is_tracing_stopped())) {
335                 tr->max_latency = delta;
336                 update_max_tr_single(tr, current, cpu);
337         }
338
339         max_sequence++;
340
341 out_unlock:
342         raw_spin_unlock_irqrestore(&max_trace_lock, flags);
343
344 out:
345         data->critical_sequence = max_sequence;
346         data->preempt_timestamp = ftrace_now(cpu);
347         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
348 }
349
350 static inline void
351 start_critical_timing(unsigned long ip, unsigned long parent_ip)
352 {
353         int cpu;
354         struct trace_array *tr = irqsoff_trace;
355         struct trace_array_cpu *data;
356         unsigned long flags;
357
358         if (!tracer_enabled || !tracing_is_enabled())
359                 return;
360
361         cpu = raw_smp_processor_id();
362
363         if (per_cpu(tracing_cpu, cpu))
364                 return;
365
366         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
367
368         if (unlikely(!data) || atomic_read(&data->disabled))
369                 return;
370
371         atomic_inc(&data->disabled);
372
373         data->critical_sequence = max_sequence;
374         data->preempt_timestamp = ftrace_now(cpu);
375         data->critical_start = parent_ip ? : ip;
376
377         local_save_flags(flags);
378
379         __trace_function(tr, ip, parent_ip, flags, preempt_count());
380
381         per_cpu(tracing_cpu, cpu) = 1;
382
383         atomic_dec(&data->disabled);
384 }
385
386 static inline void
387 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
388 {
389         int cpu;
390         struct trace_array *tr = irqsoff_trace;
391         struct trace_array_cpu *data;
392         unsigned long flags;
393
394         cpu = raw_smp_processor_id();
395         /* Always clear the tracing cpu on stopping the trace */
396         if (unlikely(per_cpu(tracing_cpu, cpu)))
397                 per_cpu(tracing_cpu, cpu) = 0;
398         else
399                 return;
400
401         if (!tracer_enabled || !tracing_is_enabled())
402                 return;
403
404         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
405
406         if (unlikely(!data) ||
407             !data->critical_start || atomic_read(&data->disabled))
408                 return;
409
410         atomic_inc(&data->disabled);
411
412         local_save_flags(flags);
413         __trace_function(tr, ip, parent_ip, flags, preempt_count());
414         check_critical_timing(tr, data, parent_ip ? : ip, cpu);
415         data->critical_start = 0;
416         atomic_dec(&data->disabled);
417 }
418
419 /* start and stop critical timings used to for stoppage (in idle) */
420 void start_critical_timings(void)
421 {
422         if (preempt_trace() || irq_trace())
423                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
424         trace_preemptirqsoff_hist_rcuidle(TRACE_START, 1);
425 }
426 EXPORT_SYMBOL_GPL(start_critical_timings);
427
428 void stop_critical_timings(void)
429 {
430         trace_preemptirqsoff_hist_rcuidle(TRACE_STOP, 0);
431         if (preempt_trace() || irq_trace())
432                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
433 }
434 EXPORT_SYMBOL_GPL(stop_critical_timings);
435
436 #ifdef CONFIG_IRQSOFF_TRACER
437 #ifdef CONFIG_PROVE_LOCKING
438 void time_hardirqs_on(unsigned long a0, unsigned long a1)
439 {
440         trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0);
441         if (!preempt_trace() && irq_trace())
442                 stop_critical_timing(a0, a1);
443 }
444
445 void time_hardirqs_off(unsigned long a0, unsigned long a1)
446 {
447         if (!preempt_trace() && irq_trace())
448                 start_critical_timing(a0, a1);
449         trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1);
450 }
451
452 #else /* !CONFIG_PROVE_LOCKING */
453
454 /*
455  * Stubs:
456  */
457
458 void trace_softirqs_on(unsigned long ip)
459 {
460 }
461
462 void trace_softirqs_off(unsigned long ip)
463 {
464 }
465
466 inline void print_irqtrace_events(struct task_struct *curr)
467 {
468 }
469
470 /*
471  * We are only interested in hardirq on/off events:
472  */
473 void trace_hardirqs_on(void)
474 {
475         trace_preemptirqsoff_hist(IRQS_ON, 0);
476         if (!preempt_trace() && irq_trace())
477                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
478 }
479 EXPORT_SYMBOL(trace_hardirqs_on);
480
481 void trace_hardirqs_off(void)
482 {
483         if (!preempt_trace() && irq_trace())
484                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
485         trace_preemptirqsoff_hist(IRQS_OFF, 1);
486 }
487 EXPORT_SYMBOL(trace_hardirqs_off);
488
489 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
490 {
491         trace_preemptirqsoff_hist(IRQS_ON, 0);
492         if (!preempt_trace() && irq_trace())
493                 stop_critical_timing(CALLER_ADDR0, caller_addr);
494 }
495 EXPORT_SYMBOL(trace_hardirqs_on_caller);
496
497 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
498 {
499         if (!preempt_trace() && irq_trace())
500                 start_critical_timing(CALLER_ADDR0, caller_addr);
501         trace_preemptirqsoff_hist(IRQS_OFF, 1);
502 }
503 EXPORT_SYMBOL(trace_hardirqs_off_caller);
504
505 #endif /* CONFIG_PROVE_LOCKING */
506 #endif /*  CONFIG_IRQSOFF_TRACER */
507
508 #ifdef CONFIG_PREEMPT_TRACER
509 void trace_preempt_on(unsigned long a0, unsigned long a1)
510 {
511         trace_preemptirqsoff_hist(PREEMPT_ON, 0);
512         if (preempt_trace() && !irq_trace())
513                 stop_critical_timing(a0, a1);
514 }
515
516 void trace_preempt_off(unsigned long a0, unsigned long a1)
517 {
518         trace_preemptirqsoff_hist(PREEMPT_ON, 1);
519         if (preempt_trace() && !irq_trace())
520                 start_critical_timing(a0, a1);
521 }
522 #endif /* CONFIG_PREEMPT_TRACER */
523
524 #ifdef CONFIG_FUNCTION_TRACER
525 static bool function_enabled;
526
527 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
528 {
529         int ret;
530
531         /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
532         if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
533                 return 0;
534
535         if (graph)
536                 ret = register_ftrace_graph(&irqsoff_graph_return,
537                                             &irqsoff_graph_entry);
538         else
539                 ret = register_ftrace_function(tr->ops);
540
541         if (!ret)
542                 function_enabled = true;
543
544         return ret;
545 }
546
547 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
548 {
549         if (!function_enabled)
550                 return;
551
552         if (graph)
553                 unregister_ftrace_graph();
554         else
555                 unregister_ftrace_function(tr->ops);
556
557         function_enabled = false;
558 }
559
560 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
561 {
562         if (!(mask & TRACE_ITER_FUNCTION))
563                 return 0;
564
565         if (set)
566                 register_irqsoff_function(tr, is_graph(tr), 1);
567         else
568                 unregister_irqsoff_function(tr, is_graph(tr));
569         return 1;
570 }
571 #else
572 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
573 {
574         return 0;
575 }
576 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
577 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
578 {
579         return 0;
580 }
581 #endif /* CONFIG_FUNCTION_TRACER */
582
583 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
584 {
585         struct tracer *tracer = tr->current_trace;
586
587         if (irqsoff_function_set(tr, mask, set))
588                 return 0;
589
590 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
591         if (mask & TRACE_ITER_DISPLAY_GRAPH)
592                 return irqsoff_display_graph(tr, set);
593 #endif
594
595         return trace_keep_overwrite(tracer, mask, set);
596 }
597
598 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
599 {
600         int ret;
601
602         ret = register_irqsoff_function(tr, graph, 0);
603
604         if (!ret && tracing_is_enabled())
605                 tracer_enabled = 1;
606         else
607                 tracer_enabled = 0;
608
609         return ret;
610 }
611
612 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
613 {
614         tracer_enabled = 0;
615
616         unregister_irqsoff_function(tr, graph);
617 }
618
619 static bool irqsoff_busy;
620
621 static int __irqsoff_tracer_init(struct trace_array *tr)
622 {
623         if (irqsoff_busy)
624                 return -EBUSY;
625
626         save_flags = tr->trace_flags;
627
628         /* non overwrite screws up the latency tracers */
629         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
630         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
631
632         tr->max_latency = 0;
633         irqsoff_trace = tr;
634         /* make sure that the tracer is visible */
635         smp_wmb();
636         tracing_reset_online_cpus(&tr->trace_buffer);
637
638         ftrace_init_array_ops(tr, irqsoff_tracer_call);
639
640         /* Only toplevel instance supports graph tracing */
641         if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
642                                       is_graph(tr))))
643                 printk(KERN_ERR "failed to start irqsoff tracer\n");
644
645         irqsoff_busy = true;
646         return 0;
647 }
648
649 static void irqsoff_tracer_reset(struct trace_array *tr)
650 {
651         int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
652         int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
653
654         stop_irqsoff_tracer(tr, is_graph(tr));
655
656         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
657         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
658         ftrace_reset_array_ops(tr);
659
660         irqsoff_busy = false;
661 }
662
663 static void irqsoff_tracer_start(struct trace_array *tr)
664 {
665         tracer_enabled = 1;
666 }
667
668 static void irqsoff_tracer_stop(struct trace_array *tr)
669 {
670         tracer_enabled = 0;
671 }
672
673 #ifdef CONFIG_IRQSOFF_TRACER
674 static int irqsoff_tracer_init(struct trace_array *tr)
675 {
676         trace_type = TRACER_IRQS_OFF;
677
678         return __irqsoff_tracer_init(tr);
679 }
680 static struct tracer irqsoff_tracer __read_mostly =
681 {
682         .name           = "irqsoff",
683         .init           = irqsoff_tracer_init,
684         .reset          = irqsoff_tracer_reset,
685         .start          = irqsoff_tracer_start,
686         .stop           = irqsoff_tracer_stop,
687         .print_max      = true,
688         .print_header   = irqsoff_print_header,
689         .print_line     = irqsoff_print_line,
690         .flag_changed   = irqsoff_flag_changed,
691 #ifdef CONFIG_FTRACE_SELFTEST
692         .selftest    = trace_selftest_startup_irqsoff,
693 #endif
694         .open           = irqsoff_trace_open,
695         .close          = irqsoff_trace_close,
696         .allow_instances = true,
697         .use_max_tr     = true,
698 };
699 # define register_irqsoff(trace) register_tracer(&trace)
700 #else
701 # define register_irqsoff(trace) do { } while (0)
702 #endif
703
704 #ifdef CONFIG_PREEMPT_TRACER
705 static int preemptoff_tracer_init(struct trace_array *tr)
706 {
707         trace_type = TRACER_PREEMPT_OFF;
708
709         return __irqsoff_tracer_init(tr);
710 }
711
712 static struct tracer preemptoff_tracer __read_mostly =
713 {
714         .name           = "preemptoff",
715         .init           = preemptoff_tracer_init,
716         .reset          = irqsoff_tracer_reset,
717         .start          = irqsoff_tracer_start,
718         .stop           = irqsoff_tracer_stop,
719         .print_max      = true,
720         .print_header   = irqsoff_print_header,
721         .print_line     = irqsoff_print_line,
722         .flag_changed   = irqsoff_flag_changed,
723 #ifdef CONFIG_FTRACE_SELFTEST
724         .selftest    = trace_selftest_startup_preemptoff,
725 #endif
726         .open           = irqsoff_trace_open,
727         .close          = irqsoff_trace_close,
728         .allow_instances = true,
729         .use_max_tr     = true,
730 };
731 # define register_preemptoff(trace) register_tracer(&trace)
732 #else
733 # define register_preemptoff(trace) do { } while (0)
734 #endif
735
736 #if defined(CONFIG_IRQSOFF_TRACER) && \
737         defined(CONFIG_PREEMPT_TRACER)
738
739 static int preemptirqsoff_tracer_init(struct trace_array *tr)
740 {
741         trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
742
743         return __irqsoff_tracer_init(tr);
744 }
745
746 static struct tracer preemptirqsoff_tracer __read_mostly =
747 {
748         .name           = "preemptirqsoff",
749         .init           = preemptirqsoff_tracer_init,
750         .reset          = irqsoff_tracer_reset,
751         .start          = irqsoff_tracer_start,
752         .stop           = irqsoff_tracer_stop,
753         .print_max      = true,
754         .print_header   = irqsoff_print_header,
755         .print_line     = irqsoff_print_line,
756         .flag_changed   = irqsoff_flag_changed,
757 #ifdef CONFIG_FTRACE_SELFTEST
758         .selftest    = trace_selftest_startup_preemptirqsoff,
759 #endif
760         .open           = irqsoff_trace_open,
761         .close          = irqsoff_trace_close,
762         .allow_instances = true,
763         .use_max_tr     = true,
764 };
765
766 # define register_preemptirqsoff(trace) register_tracer(&trace)
767 #else
768 # define register_preemptirqsoff(trace) do { } while (0)
769 #endif
770
771 __init static int init_irqsoff_tracer(void)
772 {
773         register_irqsoff(irqsoff_tracer);
774         register_preemptoff(preemptoff_tracer);
775         register_preemptirqsoff(preemptirqsoff_tracer);
776
777         return 0;
778 }
779 core_initcall(init_irqsoff_tracer);