Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/slab.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15 #include "trace_output.h"
16
17 static bool kill_ftrace_graph;
18
19 /**
20  * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
21  *
22  * ftrace_graph_stop() is called when a severe error is detected in
23  * the function graph tracing. This function is called by the critical
24  * paths of function graph to keep those paths from doing any more harm.
25  */
26 bool ftrace_graph_is_dead(void)
27 {
28         return kill_ftrace_graph;
29 }
30
31 /**
32  * ftrace_graph_stop - set to permanently disable function graph tracincg
33  *
34  * In case of an error int function graph tracing, this is called
35  * to try to keep function graph tracing from causing any more harm.
36  * Usually this is pretty severe and this is called to try to at least
37  * get a warning out to the user.
38  */
39 void ftrace_graph_stop(void)
40 {
41         kill_ftrace_graph = true;
42 }
43
44 /* When set, irq functions will be ignored */
45 static int ftrace_graph_skip_irqs;
46
47 struct fgraph_cpu_data {
48         pid_t           last_pid;
49         int             depth;
50         int             depth_irq;
51         int             ignore;
52         unsigned long   enter_funcs[FTRACE_RETFUNC_DEPTH];
53 };
54
55 struct fgraph_data {
56         struct fgraph_cpu_data __percpu *cpu_data;
57
58         /* Place to preserve last processed entry. */
59         struct ftrace_graph_ent_entry   ent;
60         struct ftrace_graph_ret_entry   ret;
61         int                             failed;
62         int                             cpu;
63 };
64
65 #define TRACE_GRAPH_INDENT      2
66
67 static unsigned int max_depth;
68
69 static struct tracer_opt trace_opts[] = {
70         /* Display overruns? (for self-debug purpose) */
71         { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
72         /* Display CPU ? */
73         { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
74         /* Display Overhead ? */
75         { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
76         /* Display proc name/pid */
77         { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
78         /* Display duration of execution */
79         { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
80         /* Display absolute time of an entry */
81         { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
82         /* Display interrupts */
83         { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
84         /* Display function name after trailing } */
85         { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
86         /* Include sleep time (scheduled out) between entry and return */
87         { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
88         /* Include time within nested functions */
89         { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
90         { } /* Empty entry */
91 };
92
93 static struct tracer_flags tracer_flags = {
94         /* Don't display overruns, proc, or tail by default */
95         .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
96                TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
97                TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
98         .opts = trace_opts
99 };
100
101 static struct trace_array *graph_array;
102
103 /*
104  * DURATION column is being also used to display IRQ signs,
105  * following values are used by print_graph_irq and others
106  * to fill in space into DURATION column.
107  */
108 enum {
109         FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
110         FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
111         FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 };
113
114 static void
115 print_graph_duration(struct trace_array *tr, unsigned long long duration,
116                      struct trace_seq *s, u32 flags);
117
118 /* Add a function return address to the trace stack on thread info.*/
119 int
120 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
121                          unsigned long frame_pointer)
122 {
123         unsigned long long calltime;
124         int index;
125
126         if (unlikely(ftrace_graph_is_dead()))
127                 return -EBUSY;
128
129         if (!current->ret_stack)
130                 return -EBUSY;
131
132         /*
133          * We must make sure the ret_stack is tested before we read
134          * anything else.
135          */
136         smp_rmb();
137
138         /* The return trace stack is full */
139         if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
140                 atomic_inc(&current->trace_overrun);
141                 return -EBUSY;
142         }
143
144         /*
145          * The curr_ret_stack is an index to ftrace return stack of
146          * current task.  Its value should be in [0, FTRACE_RETFUNC_
147          * DEPTH) when the function graph tracer is used.  To support
148          * filtering out specific functions, it makes the index
149          * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
150          * so when it sees a negative index the ftrace will ignore
151          * the record.  And the index gets recovered when returning
152          * from the filtered function by adding the FTRACE_NOTRACE_
153          * DEPTH and then it'll continue to record functions normally.
154          *
155          * The curr_ret_stack is initialized to -1 and get increased
156          * in this function.  So it can be less than -1 only if it was
157          * filtered out via ftrace_graph_notrace_addr() which can be
158          * set from set_graph_notrace file in tracefs by user.
159          */
160         if (current->curr_ret_stack < -1)
161                 return -EBUSY;
162
163         calltime = trace_clock_local();
164
165         index = ++current->curr_ret_stack;
166         if (ftrace_graph_notrace_addr(func))
167                 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
168         barrier();
169         current->ret_stack[index].ret = ret;
170         current->ret_stack[index].func = func;
171         current->ret_stack[index].calltime = calltime;
172         current->ret_stack[index].subtime = 0;
173         current->ret_stack[index].fp = frame_pointer;
174         *depth = current->curr_ret_stack;
175
176         return 0;
177 }
178
179 /* Retrieve a function return address to the trace stack on thread info.*/
180 static void
181 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
182                         unsigned long frame_pointer)
183 {
184         int index;
185
186         index = current->curr_ret_stack;
187
188         /*
189          * A negative index here means that it's just returned from a
190          * notrace'd function.  Recover index to get an original
191          * return address.  See ftrace_push_return_trace().
192          *
193          * TODO: Need to check whether the stack gets corrupted.
194          */
195         if (index < 0)
196                 index += FTRACE_NOTRACE_DEPTH;
197
198         if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
199                 ftrace_graph_stop();
200                 WARN_ON(1);
201                 /* Might as well panic, otherwise we have no where to go */
202                 *ret = (unsigned long)panic;
203                 return;
204         }
205
206 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
207         /*
208          * The arch may choose to record the frame pointer used
209          * and check it here to make sure that it is what we expect it
210          * to be. If gcc does not set the place holder of the return
211          * address in the frame pointer, and does a copy instead, then
212          * the function graph trace will fail. This test detects this
213          * case.
214          *
215          * Currently, x86_32 with optimize for size (-Os) makes the latest
216          * gcc do the above.
217          *
218          * Note, -mfentry does not use frame pointers, and this test
219          *  is not needed if CC_USING_FENTRY is set.
220          */
221         if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
222                 ftrace_graph_stop();
223                 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
224                      "  from func %ps return to %lx\n",
225                      current->ret_stack[index].fp,
226                      frame_pointer,
227                      (void *)current->ret_stack[index].func,
228                      current->ret_stack[index].ret);
229                 *ret = (unsigned long)panic;
230                 return;
231         }
232 #endif
233
234         *ret = current->ret_stack[index].ret;
235         trace->func = current->ret_stack[index].func;
236         trace->calltime = current->ret_stack[index].calltime;
237         trace->overrun = atomic_read(&current->trace_overrun);
238         trace->depth = index;
239 }
240
241 /*
242  * Send the trace to the ring-buffer.
243  * @return the original return address.
244  */
245 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
246 {
247         struct ftrace_graph_ret trace;
248         unsigned long ret;
249
250         ftrace_pop_return_trace(&trace, &ret, frame_pointer);
251         trace.rettime = trace_clock_local();
252         barrier();
253         current->curr_ret_stack--;
254         /*
255          * The curr_ret_stack can be less than -1 only if it was
256          * filtered out and it's about to return from the function.
257          * Recover the index and continue to trace normal functions.
258          */
259         if (current->curr_ret_stack < -1) {
260                 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
261                 return ret;
262         }
263
264         /*
265          * The trace should run after decrementing the ret counter
266          * in case an interrupt were to come in. We don't want to
267          * lose the interrupt if max_depth is set.
268          */
269         ftrace_graph_return(&trace);
270
271         if (unlikely(!ret)) {
272                 ftrace_graph_stop();
273                 WARN_ON(1);
274                 /* Might as well panic. What else to do? */
275                 ret = (unsigned long)panic;
276         }
277
278         return ret;
279 }
280
281 int __trace_graph_entry(struct trace_array *tr,
282                                 struct ftrace_graph_ent *trace,
283                                 unsigned long flags,
284                                 int pc)
285 {
286         struct trace_event_call *call = &event_funcgraph_entry;
287         struct ring_buffer_event *event;
288         struct ring_buffer *buffer = tr->trace_buffer.buffer;
289         struct ftrace_graph_ent_entry *entry;
290
291         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
292                                           sizeof(*entry), flags, pc);
293         if (!event)
294                 return 0;
295         entry   = ring_buffer_event_data(event);
296         entry->graph_ent                        = *trace;
297         if (!call_filter_check_discard(call, entry, buffer, event))
298                 __buffer_unlock_commit(buffer, event);
299
300         return 1;
301 }
302
303 static inline int ftrace_graph_ignore_irqs(void)
304 {
305         if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
306                 return 0;
307
308         return in_irq();
309 }
310
311 int trace_graph_entry(struct ftrace_graph_ent *trace)
312 {
313         struct trace_array *tr = graph_array;
314         struct trace_array_cpu *data;
315         unsigned long flags;
316         long disabled;
317         int ret;
318         int cpu;
319         int pc;
320
321         if (!ftrace_trace_task(current))
322                 return 0;
323
324         /* trace it when it is-nested-in or is a function enabled. */
325         if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
326              ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
327             (max_depth && trace->depth >= max_depth))
328                 return 0;
329
330         /*
331          * Do not trace a function if it's filtered by set_graph_notrace.
332          * Make the index of ret stack negative to indicate that it should
333          * ignore further functions.  But it needs its own ret stack entry
334          * to recover the original index in order to continue tracing after
335          * returning from the function.
336          */
337         if (ftrace_graph_notrace_addr(trace->func))
338                 return 1;
339
340         local_irq_save(flags);
341         cpu = raw_smp_processor_id();
342         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
343         disabled = atomic_inc_return(&data->disabled);
344         if (likely(disabled == 1)) {
345                 pc = preempt_count();
346                 ret = __trace_graph_entry(tr, trace, flags, pc);
347         } else {
348                 ret = 0;
349         }
350
351         atomic_dec(&data->disabled);
352         local_irq_restore(flags);
353
354         return ret;
355 }
356
357 static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
358 {
359         if (tracing_thresh)
360                 return 1;
361         else
362                 return trace_graph_entry(trace);
363 }
364
365 static void
366 __trace_graph_function(struct trace_array *tr,
367                 unsigned long ip, unsigned long flags, int pc)
368 {
369         u64 time = trace_clock_local();
370         struct ftrace_graph_ent ent = {
371                 .func  = ip,
372                 .depth = 0,
373         };
374         struct ftrace_graph_ret ret = {
375                 .func     = ip,
376                 .depth    = 0,
377                 .calltime = time,
378                 .rettime  = time,
379         };
380
381         __trace_graph_entry(tr, &ent, flags, pc);
382         __trace_graph_return(tr, &ret, flags, pc);
383 }
384
385 void
386 trace_graph_function(struct trace_array *tr,
387                 unsigned long ip, unsigned long parent_ip,
388                 unsigned long flags, int pc)
389 {
390         __trace_graph_function(tr, ip, flags, pc);
391 }
392
393 void __trace_graph_return(struct trace_array *tr,
394                                 struct ftrace_graph_ret *trace,
395                                 unsigned long flags,
396                                 int pc)
397 {
398         struct trace_event_call *call = &event_funcgraph_exit;
399         struct ring_buffer_event *event;
400         struct ring_buffer *buffer = tr->trace_buffer.buffer;
401         struct ftrace_graph_ret_entry *entry;
402
403         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
404                                           sizeof(*entry), flags, pc);
405         if (!event)
406                 return;
407         entry   = ring_buffer_event_data(event);
408         entry->ret                              = *trace;
409         if (!call_filter_check_discard(call, entry, buffer, event))
410                 __buffer_unlock_commit(buffer, event);
411 }
412
413 void trace_graph_return(struct ftrace_graph_ret *trace)
414 {
415         struct trace_array *tr = graph_array;
416         struct trace_array_cpu *data;
417         unsigned long flags;
418         long disabled;
419         int cpu;
420         int pc;
421
422         local_irq_save(flags);
423         cpu = raw_smp_processor_id();
424         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
425         disabled = atomic_inc_return(&data->disabled);
426         if (likely(disabled == 1)) {
427                 pc = preempt_count();
428                 __trace_graph_return(tr, trace, flags, pc);
429         }
430         atomic_dec(&data->disabled);
431         local_irq_restore(flags);
432 }
433
434 void set_graph_array(struct trace_array *tr)
435 {
436         graph_array = tr;
437
438         /* Make graph_array visible before we start tracing */
439
440         smp_mb();
441 }
442
443 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
444 {
445         if (tracing_thresh &&
446             (trace->rettime - trace->calltime < tracing_thresh))
447                 return;
448         else
449                 trace_graph_return(trace);
450 }
451
452 static int graph_trace_init(struct trace_array *tr)
453 {
454         int ret;
455
456         set_graph_array(tr);
457         if (tracing_thresh)
458                 ret = register_ftrace_graph(&trace_graph_thresh_return,
459                                             &trace_graph_thresh_entry);
460         else
461                 ret = register_ftrace_graph(&trace_graph_return,
462                                             &trace_graph_entry);
463         if (ret)
464                 return ret;
465         tracing_start_cmdline_record();
466
467         return 0;
468 }
469
470 static void graph_trace_reset(struct trace_array *tr)
471 {
472         tracing_stop_cmdline_record();
473         unregister_ftrace_graph();
474 }
475
476 static int graph_trace_update_thresh(struct trace_array *tr)
477 {
478         graph_trace_reset(tr);
479         return graph_trace_init(tr);
480 }
481
482 static int max_bytes_for_cpu;
483
484 static void print_graph_cpu(struct trace_seq *s, int cpu)
485 {
486         /*
487          * Start with a space character - to make it stand out
488          * to the right a bit when trace output is pasted into
489          * email:
490          */
491         trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
492 }
493
494 #define TRACE_GRAPH_PROCINFO_LENGTH     14
495
496 static void print_graph_proc(struct trace_seq *s, pid_t pid)
497 {
498         char comm[TASK_COMM_LEN];
499         /* sign + log10(MAX_INT) + '\0' */
500         char pid_str[11];
501         int spaces = 0;
502         int len;
503         int i;
504
505         trace_find_cmdline(pid, comm);
506         comm[7] = '\0';
507         sprintf(pid_str, "%d", pid);
508
509         /* 1 stands for the "-" character */
510         len = strlen(comm) + strlen(pid_str) + 1;
511
512         if (len < TRACE_GRAPH_PROCINFO_LENGTH)
513                 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
514
515         /* First spaces to align center */
516         for (i = 0; i < spaces / 2; i++)
517                 trace_seq_putc(s, ' ');
518
519         trace_seq_printf(s, "%s-%s", comm, pid_str);
520
521         /* Last spaces to align center */
522         for (i = 0; i < spaces - (spaces / 2); i++)
523                 trace_seq_putc(s, ' ');
524 }
525
526
527 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
528 {
529         trace_seq_putc(s, ' ');
530         trace_print_lat_fmt(s, entry);
531 }
532
533 /* If the pid changed since the last trace, output this event */
534 static void
535 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
536 {
537         pid_t prev_pid;
538         pid_t *last_pid;
539
540         if (!data)
541                 return;
542
543         last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
544
545         if (*last_pid == pid)
546                 return;
547
548         prev_pid = *last_pid;
549         *last_pid = pid;
550
551         if (prev_pid == -1)
552                 return;
553 /*
554  * Context-switch trace line:
555
556  ------------------------------------------
557  | 1)  migration/0--1  =>  sshd-1755
558  ------------------------------------------
559
560  */
561         trace_seq_puts(s, " ------------------------------------------\n");
562         print_graph_cpu(s, cpu);
563         print_graph_proc(s, prev_pid);
564         trace_seq_puts(s, " => ");
565         print_graph_proc(s, pid);
566         trace_seq_puts(s, "\n ------------------------------------------\n\n");
567 }
568
569 static struct ftrace_graph_ret_entry *
570 get_return_for_leaf(struct trace_iterator *iter,
571                 struct ftrace_graph_ent_entry *curr)
572 {
573         struct fgraph_data *data = iter->private;
574         struct ring_buffer_iter *ring_iter = NULL;
575         struct ring_buffer_event *event;
576         struct ftrace_graph_ret_entry *next;
577
578         /*
579          * If the previous output failed to write to the seq buffer,
580          * then we just reuse the data from before.
581          */
582         if (data && data->failed) {
583                 curr = &data->ent;
584                 next = &data->ret;
585         } else {
586
587                 ring_iter = trace_buffer_iter(iter, iter->cpu);
588
589                 /* First peek to compare current entry and the next one */
590                 if (ring_iter)
591                         event = ring_buffer_iter_peek(ring_iter, NULL);
592                 else {
593                         /*
594                          * We need to consume the current entry to see
595                          * the next one.
596                          */
597                         ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
598                                             NULL, NULL);
599                         event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
600                                                  NULL, NULL);
601                 }
602
603                 if (!event)
604                         return NULL;
605
606                 next = ring_buffer_event_data(event);
607
608                 if (data) {
609                         /*
610                          * Save current and next entries for later reference
611                          * if the output fails.
612                          */
613                         data->ent = *curr;
614                         /*
615                          * If the next event is not a return type, then
616                          * we only care about what type it is. Otherwise we can
617                          * safely copy the entire event.
618                          */
619                         if (next->ent.type == TRACE_GRAPH_RET)
620                                 data->ret = *next;
621                         else
622                                 data->ret.ent.type = next->ent.type;
623                 }
624         }
625
626         if (next->ent.type != TRACE_GRAPH_RET)
627                 return NULL;
628
629         if (curr->ent.pid != next->ent.pid ||
630                         curr->graph_ent.func != next->ret.func)
631                 return NULL;
632
633         /* this is a leaf, now advance the iterator */
634         if (ring_iter)
635                 ring_buffer_read(ring_iter, NULL);
636
637         return next;
638 }
639
640 static void print_graph_abs_time(u64 t, struct trace_seq *s)
641 {
642         unsigned long usecs_rem;
643
644         usecs_rem = do_div(t, NSEC_PER_SEC);
645         usecs_rem /= 1000;
646
647         trace_seq_printf(s, "%5lu.%06lu |  ",
648                          (unsigned long)t, usecs_rem);
649 }
650
651 static void
652 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
653                 enum trace_type type, int cpu, pid_t pid, u32 flags)
654 {
655         struct trace_array *tr = iter->tr;
656         struct trace_seq *s = &iter->seq;
657         struct trace_entry *ent = iter->ent;
658
659         if (addr < (unsigned long)__irqentry_text_start ||
660                 addr >= (unsigned long)__irqentry_text_end)
661                 return;
662
663         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
664                 /* Absolute time */
665                 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
666                         print_graph_abs_time(iter->ts, s);
667
668                 /* Cpu */
669                 if (flags & TRACE_GRAPH_PRINT_CPU)
670                         print_graph_cpu(s, cpu);
671
672                 /* Proc */
673                 if (flags & TRACE_GRAPH_PRINT_PROC) {
674                         print_graph_proc(s, pid);
675                         trace_seq_puts(s, " | ");
676                 }
677
678                 /* Latency format */
679                 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
680                         print_graph_lat_fmt(s, ent);
681         }
682
683         /* No overhead */
684         print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
685
686         if (type == TRACE_GRAPH_ENT)
687                 trace_seq_puts(s, "==========>");
688         else
689                 trace_seq_puts(s, "<==========");
690
691         print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
692         trace_seq_putc(s, '\n');
693 }
694
695 void
696 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
697 {
698         unsigned long nsecs_rem = do_div(duration, 1000);
699         /* log10(ULONG_MAX) + '\0' */
700         char usecs_str[21];
701         char nsecs_str[5];
702         int len;
703         int i;
704
705         sprintf(usecs_str, "%lu", (unsigned long) duration);
706
707         /* Print msecs */
708         trace_seq_printf(s, "%s", usecs_str);
709
710         len = strlen(usecs_str);
711
712         /* Print nsecs (we don't want to exceed 7 numbers) */
713         if (len < 7) {
714                 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
715
716                 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
717                 trace_seq_printf(s, ".%s", nsecs_str);
718                 len += strlen(nsecs_str) + 1;
719         }
720
721         trace_seq_puts(s, " us ");
722
723         /* Print remaining spaces to fit the row's width */
724         for (i = len; i < 8; i++)
725                 trace_seq_putc(s, ' ');
726 }
727
728 static void
729 print_graph_duration(struct trace_array *tr, unsigned long long duration,
730                      struct trace_seq *s, u32 flags)
731 {
732         if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
733             !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
734                 return;
735
736         /* No real adata, just filling the column with spaces */
737         switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
738         case FLAGS_FILL_FULL:
739                 trace_seq_puts(s, "              |  ");
740                 return;
741         case FLAGS_FILL_START:
742                 trace_seq_puts(s, "  ");
743                 return;
744         case FLAGS_FILL_END:
745                 trace_seq_puts(s, " |");
746                 return;
747         }
748
749         /* Signal a overhead of time execution to the output */
750         if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
751                 trace_seq_printf(s, "%c ", trace_find_mark(duration));
752         else
753                 trace_seq_puts(s, "  ");
754
755         trace_print_graph_duration(duration, s);
756         trace_seq_puts(s, "|  ");
757 }
758
759 /* Case of a leaf function on its call entry */
760 static enum print_line_t
761 print_graph_entry_leaf(struct trace_iterator *iter,
762                 struct ftrace_graph_ent_entry *entry,
763                 struct ftrace_graph_ret_entry *ret_entry,
764                 struct trace_seq *s, u32 flags)
765 {
766         struct fgraph_data *data = iter->private;
767         struct trace_array *tr = iter->tr;
768         struct ftrace_graph_ret *graph_ret;
769         struct ftrace_graph_ent *call;
770         unsigned long long duration;
771         int i;
772
773         graph_ret = &ret_entry->ret;
774         call = &entry->graph_ent;
775         duration = graph_ret->rettime - graph_ret->calltime;
776
777         if (data) {
778                 struct fgraph_cpu_data *cpu_data;
779                 int cpu = iter->cpu;
780
781                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
782
783                 /* If a graph tracer ignored set_graph_notrace */
784                 if (call->depth < -1)
785                         call->depth += FTRACE_NOTRACE_DEPTH;
786
787                 /*
788                  * Comments display at + 1 to depth. Since
789                  * this is a leaf function, keep the comments
790                  * equal to this depth.
791                  */
792                 cpu_data->depth = call->depth - 1;
793
794                 /* No need to keep this function around for this depth */
795                 if (call->depth < FTRACE_RETFUNC_DEPTH &&
796                     !WARN_ON_ONCE(call->depth < 0))
797                         cpu_data->enter_funcs[call->depth] = 0;
798         }
799
800         /* Overhead and duration */
801         print_graph_duration(tr, duration, s, flags);
802
803         /* Function */
804         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
805                 trace_seq_putc(s, ' ');
806
807         trace_seq_printf(s, "%ps();\n", (void *)call->func);
808
809         return trace_handle_return(s);
810 }
811
812 static enum print_line_t
813 print_graph_entry_nested(struct trace_iterator *iter,
814                          struct ftrace_graph_ent_entry *entry,
815                          struct trace_seq *s, int cpu, u32 flags)
816 {
817         struct ftrace_graph_ent *call = &entry->graph_ent;
818         struct fgraph_data *data = iter->private;
819         struct trace_array *tr = iter->tr;
820         int i;
821
822         if (data) {
823                 struct fgraph_cpu_data *cpu_data;
824                 int cpu = iter->cpu;
825
826                 /* If a graph tracer ignored set_graph_notrace */
827                 if (call->depth < -1)
828                         call->depth += FTRACE_NOTRACE_DEPTH;
829
830                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
831                 cpu_data->depth = call->depth;
832
833                 /* Save this function pointer to see if the exit matches */
834                 if (call->depth < FTRACE_RETFUNC_DEPTH &&
835                     !WARN_ON_ONCE(call->depth < 0))
836                         cpu_data->enter_funcs[call->depth] = call->func;
837         }
838
839         /* No time */
840         print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
841
842         /* Function */
843         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
844                 trace_seq_putc(s, ' ');
845
846         trace_seq_printf(s, "%ps() {\n", (void *)call->func);
847
848         if (trace_seq_has_overflowed(s))
849                 return TRACE_TYPE_PARTIAL_LINE;
850
851         /*
852          * we already consumed the current entry to check the next one
853          * and see if this is a leaf.
854          */
855         return TRACE_TYPE_NO_CONSUME;
856 }
857
858 static void
859 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
860                      int type, unsigned long addr, u32 flags)
861 {
862         struct fgraph_data *data = iter->private;
863         struct trace_entry *ent = iter->ent;
864         struct trace_array *tr = iter->tr;
865         int cpu = iter->cpu;
866
867         /* Pid */
868         verif_pid(s, ent->pid, cpu, data);
869
870         if (type)
871                 /* Interrupt */
872                 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
873
874         if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
875                 return;
876
877         /* Absolute time */
878         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
879                 print_graph_abs_time(iter->ts, s);
880
881         /* Cpu */
882         if (flags & TRACE_GRAPH_PRINT_CPU)
883                 print_graph_cpu(s, cpu);
884
885         /* Proc */
886         if (flags & TRACE_GRAPH_PRINT_PROC) {
887                 print_graph_proc(s, ent->pid);
888                 trace_seq_puts(s, " | ");
889         }
890
891         /* Latency format */
892         if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
893                 print_graph_lat_fmt(s, ent);
894
895         return;
896 }
897
898 /*
899  * Entry check for irq code
900  *
901  * returns 1 if
902  *  - we are inside irq code
903  *  - we just entered irq code
904  *
905  * retunns 0 if
906  *  - funcgraph-interrupts option is set
907  *  - we are not inside irq code
908  */
909 static int
910 check_irq_entry(struct trace_iterator *iter, u32 flags,
911                 unsigned long addr, int depth)
912 {
913         int cpu = iter->cpu;
914         int *depth_irq;
915         struct fgraph_data *data = iter->private;
916
917         /*
918          * If we are either displaying irqs, or we got called as
919          * a graph event and private data does not exist,
920          * then we bypass the irq check.
921          */
922         if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
923             (!data))
924                 return 0;
925
926         depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
927
928         /*
929          * We are inside the irq code
930          */
931         if (*depth_irq >= 0)
932                 return 1;
933
934         if ((addr < (unsigned long)__irqentry_text_start) ||
935             (addr >= (unsigned long)__irqentry_text_end))
936                 return 0;
937
938         /*
939          * We are entering irq code.
940          */
941         *depth_irq = depth;
942         return 1;
943 }
944
945 /*
946  * Return check for irq code
947  *
948  * returns 1 if
949  *  - we are inside irq code
950  *  - we just left irq code
951  *
952  * returns 0 if
953  *  - funcgraph-interrupts option is set
954  *  - we are not inside irq code
955  */
956 static int
957 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
958 {
959         int cpu = iter->cpu;
960         int *depth_irq;
961         struct fgraph_data *data = iter->private;
962
963         /*
964          * If we are either displaying irqs, or we got called as
965          * a graph event and private data does not exist,
966          * then we bypass the irq check.
967          */
968         if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
969             (!data))
970                 return 0;
971
972         depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
973
974         /*
975          * We are not inside the irq code.
976          */
977         if (*depth_irq == -1)
978                 return 0;
979
980         /*
981          * We are inside the irq code, and this is returning entry.
982          * Let's not trace it and clear the entry depth, since
983          * we are out of irq code.
984          *
985          * This condition ensures that we 'leave the irq code' once
986          * we are out of the entry depth. Thus protecting us from
987          * the RETURN entry loss.
988          */
989         if (*depth_irq >= depth) {
990                 *depth_irq = -1;
991                 return 1;
992         }
993
994         /*
995          * We are inside the irq code, and this is not the entry.
996          */
997         return 1;
998 }
999
1000 static enum print_line_t
1001 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1002                         struct trace_iterator *iter, u32 flags)
1003 {
1004         struct fgraph_data *data = iter->private;
1005         struct ftrace_graph_ent *call = &field->graph_ent;
1006         struct ftrace_graph_ret_entry *leaf_ret;
1007         static enum print_line_t ret;
1008         int cpu = iter->cpu;
1009
1010         if (check_irq_entry(iter, flags, call->func, call->depth))
1011                 return TRACE_TYPE_HANDLED;
1012
1013         print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1014
1015         leaf_ret = get_return_for_leaf(iter, field);
1016         if (leaf_ret)
1017                 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1018         else
1019                 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1020
1021         if (data) {
1022                 /*
1023                  * If we failed to write our output, then we need to make
1024                  * note of it. Because we already consumed our entry.
1025                  */
1026                 if (s->full) {
1027                         data->failed = 1;
1028                         data->cpu = cpu;
1029                 } else
1030                         data->failed = 0;
1031         }
1032
1033         return ret;
1034 }
1035
1036 static enum print_line_t
1037 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1038                    struct trace_entry *ent, struct trace_iterator *iter,
1039                    u32 flags)
1040 {
1041         unsigned long long duration = trace->rettime - trace->calltime;
1042         struct fgraph_data *data = iter->private;
1043         struct trace_array *tr = iter->tr;
1044         pid_t pid = ent->pid;
1045         int cpu = iter->cpu;
1046         int func_match = 1;
1047         int i;
1048
1049         if (check_irq_return(iter, flags, trace->depth))
1050                 return TRACE_TYPE_HANDLED;
1051
1052         if (data) {
1053                 struct fgraph_cpu_data *cpu_data;
1054                 int cpu = iter->cpu;
1055
1056                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1057
1058                 /*
1059                  * Comments display at + 1 to depth. This is the
1060                  * return from a function, we now want the comments
1061                  * to display at the same level of the bracket.
1062                  */
1063                 cpu_data->depth = trace->depth - 1;
1064
1065                 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1066                     !WARN_ON_ONCE(trace->depth < 0)) {
1067                         if (cpu_data->enter_funcs[trace->depth] != trace->func)
1068                                 func_match = 0;
1069                         cpu_data->enter_funcs[trace->depth] = 0;
1070                 }
1071         }
1072
1073         print_graph_prologue(iter, s, 0, 0, flags);
1074
1075         /* Overhead and duration */
1076         print_graph_duration(tr, duration, s, flags);
1077
1078         /* Closing brace */
1079         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1080                 trace_seq_putc(s, ' ');
1081
1082         /*
1083          * If the return function does not have a matching entry,
1084          * then the entry was lost. Instead of just printing
1085          * the '}' and letting the user guess what function this
1086          * belongs to, write out the function name. Always do
1087          * that if the funcgraph-tail option is enabled.
1088          */
1089         if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1090                 trace_seq_puts(s, "}\n");
1091         else
1092                 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1093
1094         /* Overrun */
1095         if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1096                 trace_seq_printf(s, " (Overruns: %lu)\n",
1097                                  trace->overrun);
1098
1099         print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1100                         cpu, pid, flags);
1101
1102         return trace_handle_return(s);
1103 }
1104
1105 static enum print_line_t
1106 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1107                     struct trace_iterator *iter, u32 flags)
1108 {
1109         struct trace_array *tr = iter->tr;
1110         unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1111         struct fgraph_data *data = iter->private;
1112         struct trace_event *event;
1113         int depth = 0;
1114         int ret;
1115         int i;
1116
1117         if (data)
1118                 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1119
1120         print_graph_prologue(iter, s, 0, 0, flags);
1121
1122         /* No time */
1123         print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1124
1125         /* Indentation */
1126         if (depth > 0)
1127                 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1128                         trace_seq_putc(s, ' ');
1129
1130         /* The comment */
1131         trace_seq_puts(s, "/* ");
1132
1133         switch (iter->ent->type) {
1134         case TRACE_BPRINT:
1135                 ret = trace_print_bprintk_msg_only(iter);
1136                 if (ret != TRACE_TYPE_HANDLED)
1137                         return ret;
1138                 break;
1139         case TRACE_PRINT:
1140                 ret = trace_print_printk_msg_only(iter);
1141                 if (ret != TRACE_TYPE_HANDLED)
1142                         return ret;
1143                 break;
1144         default:
1145                 event = ftrace_find_event(ent->type);
1146                 if (!event)
1147                         return TRACE_TYPE_UNHANDLED;
1148
1149                 ret = event->funcs->trace(iter, sym_flags, event);
1150                 if (ret != TRACE_TYPE_HANDLED)
1151                         return ret;
1152         }
1153
1154         if (trace_seq_has_overflowed(s))
1155                 goto out;
1156
1157         /* Strip ending newline */
1158         if (s->buffer[s->seq.len - 1] == '\n') {
1159                 s->buffer[s->seq.len - 1] = '\0';
1160                 s->seq.len--;
1161         }
1162
1163         trace_seq_puts(s, " */\n");
1164  out:
1165         return trace_handle_return(s);
1166 }
1167
1168
1169 enum print_line_t
1170 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1171 {
1172         struct ftrace_graph_ent_entry *field;
1173         struct fgraph_data *data = iter->private;
1174         struct trace_entry *entry = iter->ent;
1175         struct trace_seq *s = &iter->seq;
1176         int cpu = iter->cpu;
1177         int ret;
1178
1179         if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1180                 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1181                 return TRACE_TYPE_HANDLED;
1182         }
1183
1184         /*
1185          * If the last output failed, there's a possibility we need
1186          * to print out the missing entry which would never go out.
1187          */
1188         if (data && data->failed) {
1189                 field = &data->ent;
1190                 iter->cpu = data->cpu;
1191                 ret = print_graph_entry(field, s, iter, flags);
1192                 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1193                         per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1194                         ret = TRACE_TYPE_NO_CONSUME;
1195                 }
1196                 iter->cpu = cpu;
1197                 return ret;
1198         }
1199
1200         switch (entry->type) {
1201         case TRACE_GRAPH_ENT: {
1202                 /*
1203                  * print_graph_entry() may consume the current event,
1204                  * thus @field may become invalid, so we need to save it.
1205                  * sizeof(struct ftrace_graph_ent_entry) is very small,
1206                  * it can be safely saved at the stack.
1207                  */
1208                 struct ftrace_graph_ent_entry saved;
1209                 trace_assign_type(field, entry);
1210                 saved = *field;
1211                 return print_graph_entry(&saved, s, iter, flags);
1212         }
1213         case TRACE_GRAPH_RET: {
1214                 struct ftrace_graph_ret_entry *field;
1215                 trace_assign_type(field, entry);
1216                 return print_graph_return(&field->ret, s, entry, iter, flags);
1217         }
1218         case TRACE_STACK:
1219         case TRACE_FN:
1220                 /* dont trace stack and functions as comments */
1221                 return TRACE_TYPE_UNHANDLED;
1222
1223         default:
1224                 return print_graph_comment(s, entry, iter, flags);
1225         }
1226
1227         return TRACE_TYPE_HANDLED;
1228 }
1229
1230 static enum print_line_t
1231 print_graph_function(struct trace_iterator *iter)
1232 {
1233         return print_graph_function_flags(iter, tracer_flags.val);
1234 }
1235
1236 static enum print_line_t
1237 print_graph_function_event(struct trace_iterator *iter, int flags,
1238                            struct trace_event *event)
1239 {
1240         return print_graph_function(iter);
1241 }
1242
1243 static void print_lat_header(struct seq_file *s, u32 flags)
1244 {
1245         static const char spaces[] = "                " /* 16 spaces */
1246                 "    "                                  /* 4 spaces */
1247                 "                 ";                    /* 17 spaces */
1248         int size = 0;
1249
1250         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1251                 size += 16;
1252         if (flags & TRACE_GRAPH_PRINT_CPU)
1253                 size += 4;
1254         if (flags & TRACE_GRAPH_PRINT_PROC)
1255                 size += 17;
1256
1257         seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1258         seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1259         seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1260         seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1261         seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1262 }
1263
1264 static void __print_graph_headers_flags(struct trace_array *tr,
1265                                         struct seq_file *s, u32 flags)
1266 {
1267         int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1268
1269         if (lat)
1270                 print_lat_header(s, flags);
1271
1272         /* 1st line */
1273         seq_putc(s, '#');
1274         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1275                 seq_puts(s, "     TIME       ");
1276         if (flags & TRACE_GRAPH_PRINT_CPU)
1277                 seq_puts(s, " CPU");
1278         if (flags & TRACE_GRAPH_PRINT_PROC)
1279                 seq_puts(s, "  TASK/PID       ");
1280         if (lat)
1281                 seq_puts(s, "||||");
1282         if (flags & TRACE_GRAPH_PRINT_DURATION)
1283                 seq_puts(s, "  DURATION   ");
1284         seq_puts(s, "               FUNCTION CALLS\n");
1285
1286         /* 2nd line */
1287         seq_putc(s, '#');
1288         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1289                 seq_puts(s, "      |         ");
1290         if (flags & TRACE_GRAPH_PRINT_CPU)
1291                 seq_puts(s, " |  ");
1292         if (flags & TRACE_GRAPH_PRINT_PROC)
1293                 seq_puts(s, "   |    |        ");
1294         if (lat)
1295                 seq_puts(s, "||||");
1296         if (flags & TRACE_GRAPH_PRINT_DURATION)
1297                 seq_puts(s, "   |   |      ");
1298         seq_puts(s, "               |   |   |   |\n");
1299 }
1300
1301 static void print_graph_headers(struct seq_file *s)
1302 {
1303         print_graph_headers_flags(s, tracer_flags.val);
1304 }
1305
1306 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1307 {
1308         struct trace_iterator *iter = s->private;
1309         struct trace_array *tr = iter->tr;
1310
1311         if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1312                 return;
1313
1314         if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1315                 /* print nothing if the buffers are empty */
1316                 if (trace_empty(iter))
1317                         return;
1318
1319                 print_trace_header(s, iter);
1320         }
1321
1322         __print_graph_headers_flags(tr, s, flags);
1323 }
1324
1325 void graph_trace_open(struct trace_iterator *iter)
1326 {
1327         /* pid and depth on the last trace processed */
1328         struct fgraph_data *data;
1329         gfp_t gfpflags;
1330         int cpu;
1331
1332         iter->private = NULL;
1333
1334         /* We can be called in atomic context via ftrace_dump() */
1335         gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1336
1337         data = kzalloc(sizeof(*data), gfpflags);
1338         if (!data)
1339                 goto out_err;
1340
1341         data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1342         if (!data->cpu_data)
1343                 goto out_err_free;
1344
1345         for_each_possible_cpu(cpu) {
1346                 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1347                 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1348                 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1349                 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1350
1351                 *pid = -1;
1352                 *depth = 0;
1353                 *ignore = 0;
1354                 *depth_irq = -1;
1355         }
1356
1357         iter->private = data;
1358
1359         return;
1360
1361  out_err_free:
1362         kfree(data);
1363  out_err:
1364         pr_warning("function graph tracer: not enough memory\n");
1365 }
1366
1367 void graph_trace_close(struct trace_iterator *iter)
1368 {
1369         struct fgraph_data *data = iter->private;
1370
1371         if (data) {
1372                 free_percpu(data->cpu_data);
1373                 kfree(data);
1374         }
1375 }
1376
1377 static int
1378 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1379 {
1380         if (bit == TRACE_GRAPH_PRINT_IRQS)
1381                 ftrace_graph_skip_irqs = !set;
1382
1383         if (bit == TRACE_GRAPH_SLEEP_TIME)
1384                 ftrace_graph_sleep_time_control(set);
1385
1386         if (bit == TRACE_GRAPH_GRAPH_TIME)
1387                 ftrace_graph_graph_time_control(set);
1388
1389         return 0;
1390 }
1391
1392 static struct trace_event_functions graph_functions = {
1393         .trace          = print_graph_function_event,
1394 };
1395
1396 static struct trace_event graph_trace_entry_event = {
1397         .type           = TRACE_GRAPH_ENT,
1398         .funcs          = &graph_functions,
1399 };
1400
1401 static struct trace_event graph_trace_ret_event = {
1402         .type           = TRACE_GRAPH_RET,
1403         .funcs          = &graph_functions
1404 };
1405
1406 static struct tracer graph_trace __tracer_data = {
1407         .name           = "function_graph",
1408         .update_thresh  = graph_trace_update_thresh,
1409         .open           = graph_trace_open,
1410         .pipe_open      = graph_trace_open,
1411         .close          = graph_trace_close,
1412         .pipe_close     = graph_trace_close,
1413         .init           = graph_trace_init,
1414         .reset          = graph_trace_reset,
1415         .print_line     = print_graph_function,
1416         .print_header   = print_graph_headers,
1417         .flags          = &tracer_flags,
1418         .set_flag       = func_graph_set_flag,
1419 #ifdef CONFIG_FTRACE_SELFTEST
1420         .selftest       = trace_selftest_startup_function_graph,
1421 #endif
1422 };
1423
1424
1425 static ssize_t
1426 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1427                   loff_t *ppos)
1428 {
1429         unsigned long val;
1430         int ret;
1431
1432         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1433         if (ret)
1434                 return ret;
1435
1436         max_depth = val;
1437
1438         *ppos += cnt;
1439
1440         return cnt;
1441 }
1442
1443 static ssize_t
1444 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1445                  loff_t *ppos)
1446 {
1447         char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1448         int n;
1449
1450         n = sprintf(buf, "%d\n", max_depth);
1451
1452         return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1453 }
1454
1455 static const struct file_operations graph_depth_fops = {
1456         .open           = tracing_open_generic,
1457         .write          = graph_depth_write,
1458         .read           = graph_depth_read,
1459         .llseek         = generic_file_llseek,
1460 };
1461
1462 static __init int init_graph_tracefs(void)
1463 {
1464         struct dentry *d_tracer;
1465
1466         d_tracer = tracing_init_dentry();
1467         if (IS_ERR(d_tracer))
1468                 return 0;
1469
1470         trace_create_file("max_graph_depth", 0644, d_tracer,
1471                           NULL, &graph_depth_fops);
1472
1473         return 0;
1474 }
1475 fs_initcall(init_graph_tracefs);
1476
1477 static __init int init_graph_trace(void)
1478 {
1479         max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1480
1481         if (!register_trace_event(&graph_trace_entry_event)) {
1482                 pr_warning("Warning: could not register graph trace events\n");
1483                 return 1;
1484         }
1485
1486         if (!register_trace_event(&graph_trace_ret_event)) {
1487                 pr_warning("Warning: could not register graph trace events\n");
1488                 return 1;
1489         }
1490
1491         return register_tracer(&graph_trace);
1492 }
1493
1494 core_initcall(init_graph_trace);