These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / trace / trace_stack.c
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/module.h>
12 #include <linux/sysctl.h>
13 #include <linux/init.h>
14
15 #include <asm/setup.h>
16
17 #include "trace.h"
18
19 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
20          { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
21 unsigned stack_trace_index[STACK_TRACE_ENTRIES];
22
23 /*
24  * Reserve one entry for the passed in ip. This will allow
25  * us to remove most or all of the stack size overhead
26  * added by the stack tracer itself.
27  */
28 struct stack_trace stack_trace_max = {
29         .max_entries            = STACK_TRACE_ENTRIES - 1,
30         .entries                = &stack_dump_trace[0],
31 };
32
33 unsigned long stack_trace_max_size;
34 arch_spinlock_t stack_trace_max_lock =
35         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
36
37 static DEFINE_PER_CPU(int, trace_active);
38 static DEFINE_MUTEX(stack_sysctl_mutex);
39
40 int stack_tracer_enabled;
41 static int last_stack_tracer_enabled;
42
43 void stack_trace_print(void)
44 {
45         long i;
46         int size;
47
48         pr_emerg("        Depth    Size   Location    (%d entries)\n"
49                            "        -----    ----   --------\n",
50                            stack_trace_max.nr_entries);
51
52         for (i = 0; i < stack_trace_max.nr_entries; i++) {
53                 if (stack_dump_trace[i] == ULONG_MAX)
54                         break;
55                 if (i+1 == stack_trace_max.nr_entries ||
56                                 stack_dump_trace[i+1] == ULONG_MAX)
57                         size = stack_trace_index[i];
58                 else
59                         size = stack_trace_index[i] - stack_trace_index[i+1];
60
61                 pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_trace_index[i],
62                                 size, (void *)stack_dump_trace[i]);
63         }
64 }
65
66 /*
67  * When arch-specific code overides this function, the following
68  * data should be filled up, assuming stack_trace_max_lock is held to
69  * prevent concurrent updates.
70  *     stack_trace_index[]
71  *     stack_trace_max
72  *     stack_trace_max_size
73  */
74 void __weak
75 check_stack(unsigned long ip, unsigned long *stack)
76 {
77         unsigned long this_size, flags; unsigned long *p, *top, *start;
78         static int tracer_frame;
79         int frame_size = ACCESS_ONCE(tracer_frame);
80         int i, x;
81
82         this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
83         this_size = THREAD_SIZE - this_size;
84         /* Remove the frame of the tracer */
85         this_size -= frame_size;
86
87         if (this_size <= stack_trace_max_size)
88                 return;
89
90         /* we do not handle interrupt stacks yet */
91         if (!object_is_on_stack(stack))
92                 return;
93
94         /* Can't do this from NMI context (can cause deadlocks) */
95         if (in_nmi())
96                 return;
97
98         local_irq_save(flags);
99         arch_spin_lock(&stack_trace_max_lock);
100
101         /*
102          * RCU may not be watching, make it see us.
103          * The stack trace code uses rcu_sched.
104          */
105         rcu_irq_enter();
106
107         /* In case another CPU set the tracer_frame on us */
108         if (unlikely(!frame_size))
109                 this_size -= tracer_frame;
110
111         /* a race could have already updated it */
112         if (this_size <= stack_trace_max_size)
113                 goto out;
114
115         stack_trace_max_size = this_size;
116
117         stack_trace_max.nr_entries = 0;
118         stack_trace_max.skip = 3;
119
120         save_stack_trace(&stack_trace_max);
121
122         /* Skip over the overhead of the stack tracer itself */
123         for (i = 0; i < stack_trace_max.nr_entries; i++) {
124                 if (stack_dump_trace[i] == ip)
125                         break;
126         }
127
128         /*
129          * Some archs may not have the passed in ip in the dump.
130          * If that happens, we need to show everything.
131          */
132         if (i == stack_trace_max.nr_entries)
133                 i = 0;
134
135         /*
136          * Now find where in the stack these are.
137          */
138         x = 0;
139         start = stack;
140         top = (unsigned long *)
141                 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
142
143         /*
144          * Loop through all the entries. One of the entries may
145          * for some reason be missed on the stack, so we may
146          * have to account for them. If they are all there, this
147          * loop will only happen once. This code only takes place
148          * on a new max, so it is far from a fast path.
149          */
150         while (i < stack_trace_max.nr_entries) {
151                 int found = 0;
152
153                 stack_trace_index[x] = this_size;
154                 p = start;
155
156                 for (; p < top && i < stack_trace_max.nr_entries; p++) {
157                         if (stack_dump_trace[i] == ULONG_MAX)
158                                 break;
159                         if (*p == stack_dump_trace[i]) {
160                                 stack_dump_trace[x] = stack_dump_trace[i++];
161                                 this_size = stack_trace_index[x++] =
162                                         (top - p) * sizeof(unsigned long);
163                                 found = 1;
164                                 /* Start the search from here */
165                                 start = p + 1;
166                                 /*
167                                  * We do not want to show the overhead
168                                  * of the stack tracer stack in the
169                                  * max stack. If we haven't figured
170                                  * out what that is, then figure it out
171                                  * now.
172                                  */
173                                 if (unlikely(!tracer_frame)) {
174                                         tracer_frame = (p - stack) *
175                                                 sizeof(unsigned long);
176                                         stack_trace_max_size -= tracer_frame;
177                                 }
178                         }
179                 }
180
181                 if (!found)
182                         i++;
183         }
184
185         stack_trace_max.nr_entries = x;
186         for (; x < i; x++)
187                 stack_dump_trace[x] = ULONG_MAX;
188
189         if (task_stack_end_corrupted(current)) {
190                 stack_trace_print();
191                 BUG();
192         }
193
194  out:
195         rcu_irq_exit();
196         arch_spin_unlock(&stack_trace_max_lock);
197         local_irq_restore(flags);
198 }
199
200 static void
201 stack_trace_call(unsigned long ip, unsigned long parent_ip,
202                  struct ftrace_ops *op, struct pt_regs *pt_regs)
203 {
204         unsigned long stack;
205         int cpu;
206
207         preempt_disable_notrace();
208
209         cpu = raw_smp_processor_id();
210         /* no atomic needed, we only modify this variable by this cpu */
211         if (per_cpu(trace_active, cpu)++ != 0)
212                 goto out;
213
214         ip += MCOUNT_INSN_SIZE;
215
216         check_stack(ip, &stack);
217
218  out:
219         per_cpu(trace_active, cpu)--;
220         /* prevent recursion in schedule */
221         preempt_enable_notrace();
222 }
223
224 static struct ftrace_ops trace_ops __read_mostly =
225 {
226         .func = stack_trace_call,
227         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
228 };
229
230 static ssize_t
231 stack_max_size_read(struct file *filp, char __user *ubuf,
232                     size_t count, loff_t *ppos)
233 {
234         unsigned long *ptr = filp->private_data;
235         char buf[64];
236         int r;
237
238         r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
239         if (r > sizeof(buf))
240                 r = sizeof(buf);
241         return simple_read_from_buffer(ubuf, count, ppos, buf, r);
242 }
243
244 static ssize_t
245 stack_max_size_write(struct file *filp, const char __user *ubuf,
246                      size_t count, loff_t *ppos)
247 {
248         long *ptr = filp->private_data;
249         unsigned long val, flags;
250         int ret;
251         int cpu;
252
253         ret = kstrtoul_from_user(ubuf, count, 10, &val);
254         if (ret)
255                 return ret;
256
257         local_irq_save(flags);
258
259         /*
260          * In case we trace inside arch_spin_lock() or after (NMI),
261          * we will cause circular lock, so we also need to increase
262          * the percpu trace_active here.
263          */
264         cpu = smp_processor_id();
265         per_cpu(trace_active, cpu)++;
266
267         arch_spin_lock(&stack_trace_max_lock);
268         *ptr = val;
269         arch_spin_unlock(&stack_trace_max_lock);
270
271         per_cpu(trace_active, cpu)--;
272         local_irq_restore(flags);
273
274         return count;
275 }
276
277 static const struct file_operations stack_max_size_fops = {
278         .open           = tracing_open_generic,
279         .read           = stack_max_size_read,
280         .write          = stack_max_size_write,
281         .llseek         = default_llseek,
282 };
283
284 static void *
285 __next(struct seq_file *m, loff_t *pos)
286 {
287         long n = *pos - 1;
288
289         if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
290                 return NULL;
291
292         m->private = (void *)n;
293         return &m->private;
294 }
295
296 static void *
297 t_next(struct seq_file *m, void *v, loff_t *pos)
298 {
299         (*pos)++;
300         return __next(m, pos);
301 }
302
303 static void *t_start(struct seq_file *m, loff_t *pos)
304 {
305         int cpu;
306
307         local_irq_disable();
308
309         cpu = smp_processor_id();
310         per_cpu(trace_active, cpu)++;
311
312         arch_spin_lock(&stack_trace_max_lock);
313
314         if (*pos == 0)
315                 return SEQ_START_TOKEN;
316
317         return __next(m, pos);
318 }
319
320 static void t_stop(struct seq_file *m, void *p)
321 {
322         int cpu;
323
324         arch_spin_unlock(&stack_trace_max_lock);
325
326         cpu = smp_processor_id();
327         per_cpu(trace_active, cpu)--;
328
329         local_irq_enable();
330 }
331
332 static void trace_lookup_stack(struct seq_file *m, long i)
333 {
334         unsigned long addr = stack_dump_trace[i];
335
336         seq_printf(m, "%pS\n", (void *)addr);
337 }
338
339 static void print_disabled(struct seq_file *m)
340 {
341         seq_puts(m, "#\n"
342                  "#  Stack tracer disabled\n"
343                  "#\n"
344                  "# To enable the stack tracer, either add 'stacktrace' to the\n"
345                  "# kernel command line\n"
346                  "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
347                  "#\n");
348 }
349
350 static int t_show(struct seq_file *m, void *v)
351 {
352         long i;
353         int size;
354
355         if (v == SEQ_START_TOKEN) {
356                 seq_printf(m, "        Depth    Size   Location"
357                            "    (%d entries)\n"
358                            "        -----    ----   --------\n",
359                            stack_trace_max.nr_entries);
360
361                 if (!stack_tracer_enabled && !stack_trace_max_size)
362                         print_disabled(m);
363
364                 return 0;
365         }
366
367         i = *(long *)v;
368
369         if (i >= stack_trace_max.nr_entries ||
370             stack_dump_trace[i] == ULONG_MAX)
371                 return 0;
372
373         if (i+1 == stack_trace_max.nr_entries ||
374             stack_dump_trace[i+1] == ULONG_MAX)
375                 size = stack_trace_index[i];
376         else
377                 size = stack_trace_index[i] - stack_trace_index[i+1];
378
379         seq_printf(m, "%3ld) %8d   %5d   ", i, stack_trace_index[i], size);
380
381         trace_lookup_stack(m, i);
382
383         return 0;
384 }
385
386 static const struct seq_operations stack_trace_seq_ops = {
387         .start          = t_start,
388         .next           = t_next,
389         .stop           = t_stop,
390         .show           = t_show,
391 };
392
393 static int stack_trace_open(struct inode *inode, struct file *file)
394 {
395         return seq_open(file, &stack_trace_seq_ops);
396 }
397
398 static const struct file_operations stack_trace_fops = {
399         .open           = stack_trace_open,
400         .read           = seq_read,
401         .llseek         = seq_lseek,
402         .release        = seq_release,
403 };
404
405 static int
406 stack_trace_filter_open(struct inode *inode, struct file *file)
407 {
408         return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
409                                  inode, file);
410 }
411
412 static const struct file_operations stack_trace_filter_fops = {
413         .open = stack_trace_filter_open,
414         .read = seq_read,
415         .write = ftrace_filter_write,
416         .llseek = tracing_lseek,
417         .release = ftrace_regex_release,
418 };
419
420 int
421 stack_trace_sysctl(struct ctl_table *table, int write,
422                    void __user *buffer, size_t *lenp,
423                    loff_t *ppos)
424 {
425         int ret;
426
427         mutex_lock(&stack_sysctl_mutex);
428
429         ret = proc_dointvec(table, write, buffer, lenp, ppos);
430
431         if (ret || !write ||
432             (last_stack_tracer_enabled == !!stack_tracer_enabled))
433                 goto out;
434
435         last_stack_tracer_enabled = !!stack_tracer_enabled;
436
437         if (stack_tracer_enabled)
438                 register_ftrace_function(&trace_ops);
439         else
440                 unregister_ftrace_function(&trace_ops);
441
442  out:
443         mutex_unlock(&stack_sysctl_mutex);
444         return ret;
445 }
446
447 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
448
449 static __init int enable_stacktrace(char *str)
450 {
451         if (strncmp(str, "_filter=", 8) == 0)
452                 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
453
454         stack_tracer_enabled = 1;
455         last_stack_tracer_enabled = 1;
456         return 1;
457 }
458 __setup("stacktrace", enable_stacktrace);
459
460 static __init int stack_trace_init(void)
461 {
462         struct dentry *d_tracer;
463
464         d_tracer = tracing_init_dentry();
465         if (IS_ERR(d_tracer))
466                 return 0;
467
468         trace_create_file("stack_max_size", 0644, d_tracer,
469                         &stack_trace_max_size, &stack_max_size_fops);
470
471         trace_create_file("stack_trace", 0444, d_tracer,
472                         NULL, &stack_trace_fops);
473
474         trace_create_file("stack_trace_filter", 0444, d_tracer,
475                         NULL, &stack_trace_filter_fops);
476
477         if (stack_trace_filter_buf[0])
478                 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
479
480         if (stack_tracer_enabled)
481                 register_ftrace_function(&trace_ops);
482
483         return 0;
484 }
485
486 device_initcall(stack_trace_init);