Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / kernel / trace / trace_events.c
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10
11 #define pr_fmt(fmt) fmt
12
13 #include <linux/workqueue.h>
14 #include <linux/spinlock.h>
15 #include <linux/kthread.h>
16 #include <linux/tracefs.h>
17 #include <linux/uaccess.h>
18 #include <linux/module.h>
19 #include <linux/ctype.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22
23 #include <asm/setup.h>
24
25 #include "trace_output.h"
26
27 #undef TRACE_SYSTEM
28 #define TRACE_SYSTEM "TRACE_SYSTEM"
29
30 DEFINE_MUTEX(event_mutex);
31
32 LIST_HEAD(ftrace_events);
33 static LIST_HEAD(ftrace_common_fields);
34
35 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
36
37 static struct kmem_cache *field_cachep;
38 static struct kmem_cache *file_cachep;
39
40 #define SYSTEM_FL_FREE_NAME             (1 << 31)
41
42 static inline int system_refcount(struct event_subsystem *system)
43 {
44         return system->ref_count & ~SYSTEM_FL_FREE_NAME;
45 }
46
47 static int system_refcount_inc(struct event_subsystem *system)
48 {
49         return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
50 }
51
52 static int system_refcount_dec(struct event_subsystem *system)
53 {
54         return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
55 }
56
57 /* Double loops, do not use break, only goto's work */
58 #define do_for_each_event_file(tr, file)                        \
59         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
60                 list_for_each_entry(file, &tr->events, list)
61
62 #define do_for_each_event_file_safe(tr, file)                   \
63         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
64                 struct ftrace_event_file *___n;                         \
65                 list_for_each_entry_safe(file, ___n, &tr->events, list)
66
67 #define while_for_each_event_file()             \
68         }
69
70 static struct list_head *
71 trace_get_fields(struct ftrace_event_call *event_call)
72 {
73         if (!event_call->class->get_fields)
74                 return &event_call->class->fields;
75         return event_call->class->get_fields(event_call);
76 }
77
78 static struct ftrace_event_field *
79 __find_event_field(struct list_head *head, char *name)
80 {
81         struct ftrace_event_field *field;
82
83         list_for_each_entry(field, head, link) {
84                 if (!strcmp(field->name, name))
85                         return field;
86         }
87
88         return NULL;
89 }
90
91 struct ftrace_event_field *
92 trace_find_event_field(struct ftrace_event_call *call, char *name)
93 {
94         struct ftrace_event_field *field;
95         struct list_head *head;
96
97         field = __find_event_field(&ftrace_common_fields, name);
98         if (field)
99                 return field;
100
101         head = trace_get_fields(call);
102         return __find_event_field(head, name);
103 }
104
105 static int __trace_define_field(struct list_head *head, const char *type,
106                                 const char *name, int offset, int size,
107                                 int is_signed, int filter_type)
108 {
109         struct ftrace_event_field *field;
110
111         field = kmem_cache_alloc(field_cachep, GFP_TRACE);
112         if (!field)
113                 return -ENOMEM;
114
115         field->name = name;
116         field->type = type;
117
118         if (filter_type == FILTER_OTHER)
119                 field->filter_type = filter_assign_type(type);
120         else
121                 field->filter_type = filter_type;
122
123         field->offset = offset;
124         field->size = size;
125         field->is_signed = is_signed;
126
127         list_add(&field->link, head);
128
129         return 0;
130 }
131
132 int trace_define_field(struct ftrace_event_call *call, const char *type,
133                        const char *name, int offset, int size, int is_signed,
134                        int filter_type)
135 {
136         struct list_head *head;
137
138         if (WARN_ON(!call->class))
139                 return 0;
140
141         head = trace_get_fields(call);
142         return __trace_define_field(head, type, name, offset, size,
143                                     is_signed, filter_type);
144 }
145 EXPORT_SYMBOL_GPL(trace_define_field);
146
147 #define __common_field(type, item)                                      \
148         ret = __trace_define_field(&ftrace_common_fields, #type,        \
149                                    "common_" #item,                     \
150                                    offsetof(typeof(ent), item),         \
151                                    sizeof(ent.item),                    \
152                                    is_signed_type(type), FILTER_OTHER); \
153         if (ret)                                                        \
154                 return ret;
155
156 static int trace_define_common_fields(void)
157 {
158         int ret;
159         struct trace_entry ent;
160
161         __common_field(unsigned short, type);
162         __common_field(unsigned char, flags);
163         __common_field(unsigned char, preempt_count);
164         __common_field(int, pid);
165         __common_field(unsigned short, migrate_disable);
166         __common_field(unsigned short, padding);
167
168         return ret;
169 }
170
171 static void trace_destroy_fields(struct ftrace_event_call *call)
172 {
173         struct ftrace_event_field *field, *next;
174         struct list_head *head;
175
176         head = trace_get_fields(call);
177         list_for_each_entry_safe(field, next, head, link) {
178                 list_del(&field->link);
179                 kmem_cache_free(field_cachep, field);
180         }
181 }
182
183 int trace_event_raw_init(struct ftrace_event_call *call)
184 {
185         int id;
186
187         id = register_ftrace_event(&call->event);
188         if (!id)
189                 return -ENODEV;
190
191         return 0;
192 }
193 EXPORT_SYMBOL_GPL(trace_event_raw_init);
194
195 void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
196                                   struct ftrace_event_file *ftrace_file,
197                                   unsigned long len)
198 {
199         struct ftrace_event_call *event_call = ftrace_file->event_call;
200
201         local_save_flags(fbuffer->flags);
202         fbuffer->pc = preempt_count();
203         fbuffer->ftrace_file = ftrace_file;
204
205         fbuffer->event =
206                 trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
207                                                 event_call->event.type, len,
208                                                 fbuffer->flags, fbuffer->pc);
209         if (!fbuffer->event)
210                 return NULL;
211
212         fbuffer->entry = ring_buffer_event_data(fbuffer->event);
213         return fbuffer->entry;
214 }
215 EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
216
217 static DEFINE_SPINLOCK(tracepoint_iter_lock);
218
219 static void output_printk(struct ftrace_event_buffer *fbuffer)
220 {
221         struct ftrace_event_call *event_call;
222         struct trace_event *event;
223         unsigned long flags;
224         struct trace_iterator *iter = tracepoint_print_iter;
225
226         if (!iter)
227                 return;
228
229         event_call = fbuffer->ftrace_file->event_call;
230         if (!event_call || !event_call->event.funcs ||
231             !event_call->event.funcs->trace)
232                 return;
233
234         event = &fbuffer->ftrace_file->event_call->event;
235
236         spin_lock_irqsave(&tracepoint_iter_lock, flags);
237         trace_seq_init(&iter->seq);
238         iter->ent = fbuffer->entry;
239         event_call->event.funcs->trace(iter, 0, event);
240         trace_seq_putc(&iter->seq, 0);
241         printk("%s", iter->seq.buffer);
242
243         spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
244 }
245
246 void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
247 {
248         if (tracepoint_printk)
249                 output_printk(fbuffer);
250
251         event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
252                                     fbuffer->event, fbuffer->entry,
253                                     fbuffer->flags, fbuffer->pc);
254 }
255 EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
256
257 int ftrace_event_reg(struct ftrace_event_call *call,
258                      enum trace_reg type, void *data)
259 {
260         struct ftrace_event_file *file = data;
261
262         WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
263         switch (type) {
264         case TRACE_REG_REGISTER:
265                 return tracepoint_probe_register(call->tp,
266                                                  call->class->probe,
267                                                  file);
268         case TRACE_REG_UNREGISTER:
269                 tracepoint_probe_unregister(call->tp,
270                                             call->class->probe,
271                                             file);
272                 return 0;
273
274 #ifdef CONFIG_PERF_EVENTS
275         case TRACE_REG_PERF_REGISTER:
276                 return tracepoint_probe_register(call->tp,
277                                                  call->class->perf_probe,
278                                                  call);
279         case TRACE_REG_PERF_UNREGISTER:
280                 tracepoint_probe_unregister(call->tp,
281                                             call->class->perf_probe,
282                                             call);
283                 return 0;
284         case TRACE_REG_PERF_OPEN:
285         case TRACE_REG_PERF_CLOSE:
286         case TRACE_REG_PERF_ADD:
287         case TRACE_REG_PERF_DEL:
288                 return 0;
289 #endif
290         }
291         return 0;
292 }
293 EXPORT_SYMBOL_GPL(ftrace_event_reg);
294
295 void trace_event_enable_cmd_record(bool enable)
296 {
297         struct ftrace_event_file *file;
298         struct trace_array *tr;
299
300         mutex_lock(&event_mutex);
301         do_for_each_event_file(tr, file) {
302
303                 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
304                         continue;
305
306                 if (enable) {
307                         tracing_start_cmdline_record();
308                         set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
309                 } else {
310                         tracing_stop_cmdline_record();
311                         clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
312                 }
313         } while_for_each_event_file();
314         mutex_unlock(&event_mutex);
315 }
316
317 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
318                                          int enable, int soft_disable)
319 {
320         struct ftrace_event_call *call = file->event_call;
321         int ret = 0;
322         int disable;
323
324         switch (enable) {
325         case 0:
326                 /*
327                  * When soft_disable is set and enable is cleared, the sm_ref
328                  * reference counter is decremented. If it reaches 0, we want
329                  * to clear the SOFT_DISABLED flag but leave the event in the
330                  * state that it was. That is, if the event was enabled and
331                  * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
332                  * is set we do not want the event to be enabled before we
333                  * clear the bit.
334                  *
335                  * When soft_disable is not set but the SOFT_MODE flag is,
336                  * we do nothing. Do not disable the tracepoint, otherwise
337                  * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
338                  */
339                 if (soft_disable) {
340                         if (atomic_dec_return(&file->sm_ref) > 0)
341                                 break;
342                         disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
343                         clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
344                 } else
345                         disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
346
347                 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
348                         clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
349                         if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
350                                 tracing_stop_cmdline_record();
351                                 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
352                         }
353                         call->class->reg(call, TRACE_REG_UNREGISTER, file);
354                 }
355                 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
356                 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
357                         set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
358                 else
359                         clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
360                 break;
361         case 1:
362                 /*
363                  * When soft_disable is set and enable is set, we want to
364                  * register the tracepoint for the event, but leave the event
365                  * as is. That means, if the event was already enabled, we do
366                  * nothing (but set SOFT_MODE). If the event is disabled, we
367                  * set SOFT_DISABLED before enabling the event tracepoint, so
368                  * it still seems to be disabled.
369                  */
370                 if (!soft_disable)
371                         clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
372                 else {
373                         if (atomic_inc_return(&file->sm_ref) > 1)
374                                 break;
375                         set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
376                 }
377
378                 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
379
380                         /* Keep the event disabled, when going to SOFT_MODE. */
381                         if (soft_disable)
382                                 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
383
384                         if (trace_flags & TRACE_ITER_RECORD_CMD) {
385                                 tracing_start_cmdline_record();
386                                 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
387                         }
388                         ret = call->class->reg(call, TRACE_REG_REGISTER, file);
389                         if (ret) {
390                                 tracing_stop_cmdline_record();
391                                 pr_info("event trace: Could not enable event "
392                                         "%s\n", ftrace_event_name(call));
393                                 break;
394                         }
395                         set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
396
397                         /* WAS_ENABLED gets set but never cleared. */
398                         call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
399                 }
400                 break;
401         }
402
403         return ret;
404 }
405
406 int trace_event_enable_disable(struct ftrace_event_file *file,
407                                int enable, int soft_disable)
408 {
409         return __ftrace_event_enable_disable(file, enable, soft_disable);
410 }
411
412 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
413                                        int enable)
414 {
415         return __ftrace_event_enable_disable(file, enable, 0);
416 }
417
418 static void ftrace_clear_events(struct trace_array *tr)
419 {
420         struct ftrace_event_file *file;
421
422         mutex_lock(&event_mutex);
423         list_for_each_entry(file, &tr->events, list) {
424                 ftrace_event_enable_disable(file, 0);
425         }
426         mutex_unlock(&event_mutex);
427 }
428
429 static void __put_system(struct event_subsystem *system)
430 {
431         struct event_filter *filter = system->filter;
432
433         WARN_ON_ONCE(system_refcount(system) == 0);
434         if (system_refcount_dec(system))
435                 return;
436
437         list_del(&system->list);
438
439         if (filter) {
440                 kfree(filter->filter_string);
441                 kfree(filter);
442         }
443         if (system->ref_count & SYSTEM_FL_FREE_NAME)
444                 kfree(system->name);
445         kfree(system);
446 }
447
448 static void __get_system(struct event_subsystem *system)
449 {
450         WARN_ON_ONCE(system_refcount(system) == 0);
451         system_refcount_inc(system);
452 }
453
454 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
455 {
456         WARN_ON_ONCE(dir->ref_count == 0);
457         dir->ref_count++;
458         __get_system(dir->subsystem);
459 }
460
461 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
462 {
463         WARN_ON_ONCE(dir->ref_count == 0);
464         /* If the subsystem is about to be freed, the dir must be too */
465         WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
466
467         __put_system(dir->subsystem);
468         if (!--dir->ref_count)
469                 kfree(dir);
470 }
471
472 static void put_system(struct ftrace_subsystem_dir *dir)
473 {
474         mutex_lock(&event_mutex);
475         __put_system_dir(dir);
476         mutex_unlock(&event_mutex);
477 }
478
479 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
480 {
481         if (!dir)
482                 return;
483
484         if (!--dir->nr_events) {
485                 tracefs_remove_recursive(dir->entry);
486                 list_del(&dir->list);
487                 __put_system_dir(dir);
488         }
489 }
490
491 static void remove_event_file_dir(struct ftrace_event_file *file)
492 {
493         struct dentry *dir = file->dir;
494         struct dentry *child;
495
496         if (dir) {
497                 spin_lock(&dir->d_lock);        /* probably unneeded */
498                 list_for_each_entry(child, &dir->d_subdirs, d_child) {
499                         if (d_really_is_positive(child))        /* probably unneeded */
500                                 d_inode(child)->i_private = NULL;
501                 }
502                 spin_unlock(&dir->d_lock);
503
504                 tracefs_remove_recursive(dir);
505         }
506
507         list_del(&file->list);
508         remove_subsystem(file->system);
509         free_event_filter(file->filter);
510         kmem_cache_free(file_cachep, file);
511 }
512
513 /*
514  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
515  */
516 static int
517 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
518                               const char *sub, const char *event, int set)
519 {
520         struct ftrace_event_file *file;
521         struct ftrace_event_call *call;
522         const char *name;
523         int ret = -EINVAL;
524
525         list_for_each_entry(file, &tr->events, list) {
526
527                 call = file->event_call;
528                 name = ftrace_event_name(call);
529
530                 if (!name || !call->class || !call->class->reg)
531                         continue;
532
533                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
534                         continue;
535
536                 if (match &&
537                     strcmp(match, name) != 0 &&
538                     strcmp(match, call->class->system) != 0)
539                         continue;
540
541                 if (sub && strcmp(sub, call->class->system) != 0)
542                         continue;
543
544                 if (event && strcmp(event, name) != 0)
545                         continue;
546
547                 ftrace_event_enable_disable(file, set);
548
549                 ret = 0;
550         }
551
552         return ret;
553 }
554
555 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
556                                   const char *sub, const char *event, int set)
557 {
558         int ret;
559
560         mutex_lock(&event_mutex);
561         ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
562         mutex_unlock(&event_mutex);
563
564         return ret;
565 }
566
567 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
568 {
569         char *event = NULL, *sub = NULL, *match;
570         int ret;
571
572         /*
573          * The buf format can be <subsystem>:<event-name>
574          *  *:<event-name> means any event by that name.
575          *  :<event-name> is the same.
576          *
577          *  <subsystem>:* means all events in that subsystem
578          *  <subsystem>: means the same.
579          *
580          *  <name> (no ':') means all events in a subsystem with
581          *  the name <name> or any event that matches <name>
582          */
583
584         match = strsep(&buf, ":");
585         if (buf) {
586                 sub = match;
587                 event = buf;
588                 match = NULL;
589
590                 if (!strlen(sub) || strcmp(sub, "*") == 0)
591                         sub = NULL;
592                 if (!strlen(event) || strcmp(event, "*") == 0)
593                         event = NULL;
594         }
595
596         ret = __ftrace_set_clr_event(tr, match, sub, event, set);
597
598         /* Put back the colon to allow this to be called again */
599         if (buf)
600                 *(buf - 1) = ':';
601
602         return ret;
603 }
604
605 /**
606  * trace_set_clr_event - enable or disable an event
607  * @system: system name to match (NULL for any system)
608  * @event: event name to match (NULL for all events, within system)
609  * @set: 1 to enable, 0 to disable
610  *
611  * This is a way for other parts of the kernel to enable or disable
612  * event recording.
613  *
614  * Returns 0 on success, -EINVAL if the parameters do not match any
615  * registered events.
616  */
617 int trace_set_clr_event(const char *system, const char *event, int set)
618 {
619         struct trace_array *tr = top_trace_array();
620
621         if (!tr)
622                 return -ENODEV;
623
624         return __ftrace_set_clr_event(tr, NULL, system, event, set);
625 }
626 EXPORT_SYMBOL_GPL(trace_set_clr_event);
627
628 /* 128 should be much more than enough */
629 #define EVENT_BUF_SIZE          127
630
631 static ssize_t
632 ftrace_event_write(struct file *file, const char __user *ubuf,
633                    size_t cnt, loff_t *ppos)
634 {
635         struct trace_parser parser;
636         struct seq_file *m = file->private_data;
637         struct trace_array *tr = m->private;
638         ssize_t read, ret;
639
640         if (!cnt)
641                 return 0;
642
643         ret = tracing_update_buffers();
644         if (ret < 0)
645                 return ret;
646
647         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
648                 return -ENOMEM;
649
650         read = trace_get_user(&parser, ubuf, cnt, ppos);
651
652         if (read >= 0 && trace_parser_loaded((&parser))) {
653                 int set = 1;
654
655                 if (*parser.buffer == '!')
656                         set = 0;
657
658                 parser.buffer[parser.idx] = 0;
659
660                 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
661                 if (ret)
662                         goto out_put;
663         }
664
665         ret = read;
666
667  out_put:
668         trace_parser_put(&parser);
669
670         return ret;
671 }
672
673 static void *
674 t_next(struct seq_file *m, void *v, loff_t *pos)
675 {
676         struct ftrace_event_file *file = v;
677         struct ftrace_event_call *call;
678         struct trace_array *tr = m->private;
679
680         (*pos)++;
681
682         list_for_each_entry_continue(file, &tr->events, list) {
683                 call = file->event_call;
684                 /*
685                  * The ftrace subsystem is for showing formats only.
686                  * They can not be enabled or disabled via the event files.
687                  */
688                 if (call->class && call->class->reg)
689                         return file;
690         }
691
692         return NULL;
693 }
694
695 static void *t_start(struct seq_file *m, loff_t *pos)
696 {
697         struct ftrace_event_file *file;
698         struct trace_array *tr = m->private;
699         loff_t l;
700
701         mutex_lock(&event_mutex);
702
703         file = list_entry(&tr->events, struct ftrace_event_file, list);
704         for (l = 0; l <= *pos; ) {
705                 file = t_next(m, file, &l);
706                 if (!file)
707                         break;
708         }
709         return file;
710 }
711
712 static void *
713 s_next(struct seq_file *m, void *v, loff_t *pos)
714 {
715         struct ftrace_event_file *file = v;
716         struct trace_array *tr = m->private;
717
718         (*pos)++;
719
720         list_for_each_entry_continue(file, &tr->events, list) {
721                 if (file->flags & FTRACE_EVENT_FL_ENABLED)
722                         return file;
723         }
724
725         return NULL;
726 }
727
728 static void *s_start(struct seq_file *m, loff_t *pos)
729 {
730         struct ftrace_event_file *file;
731         struct trace_array *tr = m->private;
732         loff_t l;
733
734         mutex_lock(&event_mutex);
735
736         file = list_entry(&tr->events, struct ftrace_event_file, list);
737         for (l = 0; l <= *pos; ) {
738                 file = s_next(m, file, &l);
739                 if (!file)
740                         break;
741         }
742         return file;
743 }
744
745 static int t_show(struct seq_file *m, void *v)
746 {
747         struct ftrace_event_file *file = v;
748         struct ftrace_event_call *call = file->event_call;
749
750         if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
751                 seq_printf(m, "%s:", call->class->system);
752         seq_printf(m, "%s\n", ftrace_event_name(call));
753
754         return 0;
755 }
756
757 static void t_stop(struct seq_file *m, void *p)
758 {
759         mutex_unlock(&event_mutex);
760 }
761
762 static ssize_t
763 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
764                   loff_t *ppos)
765 {
766         struct ftrace_event_file *file;
767         unsigned long flags;
768         char buf[4] = "0";
769
770         mutex_lock(&event_mutex);
771         file = event_file_data(filp);
772         if (likely(file))
773                 flags = file->flags;
774         mutex_unlock(&event_mutex);
775
776         if (!file)
777                 return -ENODEV;
778
779         if (flags & FTRACE_EVENT_FL_ENABLED &&
780             !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
781                 strcpy(buf, "1");
782
783         if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
784             flags & FTRACE_EVENT_FL_SOFT_MODE)
785                 strcat(buf, "*");
786
787         strcat(buf, "\n");
788
789         return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
790 }
791
792 static ssize_t
793 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
794                    loff_t *ppos)
795 {
796         struct ftrace_event_file *file;
797         unsigned long val;
798         int ret;
799
800         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
801         if (ret)
802                 return ret;
803
804         ret = tracing_update_buffers();
805         if (ret < 0)
806                 return ret;
807
808         switch (val) {
809         case 0:
810         case 1:
811                 ret = -ENODEV;
812                 mutex_lock(&event_mutex);
813                 file = event_file_data(filp);
814                 if (likely(file))
815                         ret = ftrace_event_enable_disable(file, val);
816                 mutex_unlock(&event_mutex);
817                 break;
818
819         default:
820                 return -EINVAL;
821         }
822
823         *ppos += cnt;
824
825         return ret ? ret : cnt;
826 }
827
828 static ssize_t
829 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
830                    loff_t *ppos)
831 {
832         const char set_to_char[4] = { '?', '0', '1', 'X' };
833         struct ftrace_subsystem_dir *dir = filp->private_data;
834         struct event_subsystem *system = dir->subsystem;
835         struct ftrace_event_call *call;
836         struct ftrace_event_file *file;
837         struct trace_array *tr = dir->tr;
838         char buf[2];
839         int set = 0;
840         int ret;
841
842         mutex_lock(&event_mutex);
843         list_for_each_entry(file, &tr->events, list) {
844                 call = file->event_call;
845                 if (!ftrace_event_name(call) || !call->class || !call->class->reg)
846                         continue;
847
848                 if (system && strcmp(call->class->system, system->name) != 0)
849                         continue;
850
851                 /*
852                  * We need to find out if all the events are set
853                  * or if all events or cleared, or if we have
854                  * a mixture.
855                  */
856                 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
857
858                 /*
859                  * If we have a mixture, no need to look further.
860                  */
861                 if (set == 3)
862                         break;
863         }
864         mutex_unlock(&event_mutex);
865
866         buf[0] = set_to_char[set];
867         buf[1] = '\n';
868
869         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
870
871         return ret;
872 }
873
874 static ssize_t
875 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
876                     loff_t *ppos)
877 {
878         struct ftrace_subsystem_dir *dir = filp->private_data;
879         struct event_subsystem *system = dir->subsystem;
880         const char *name = NULL;
881         unsigned long val;
882         ssize_t ret;
883
884         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
885         if (ret)
886                 return ret;
887
888         ret = tracing_update_buffers();
889         if (ret < 0)
890                 return ret;
891
892         if (val != 0 && val != 1)
893                 return -EINVAL;
894
895         /*
896          * Opening of "enable" adds a ref count to system,
897          * so the name is safe to use.
898          */
899         if (system)
900                 name = system->name;
901
902         ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
903         if (ret)
904                 goto out;
905
906         ret = cnt;
907
908 out:
909         *ppos += cnt;
910
911         return ret;
912 }
913
914 enum {
915         FORMAT_HEADER           = 1,
916         FORMAT_FIELD_SEPERATOR  = 2,
917         FORMAT_PRINTFMT         = 3,
918 };
919
920 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
921 {
922         struct ftrace_event_call *call = event_file_data(m->private);
923         struct list_head *common_head = &ftrace_common_fields;
924         struct list_head *head = trace_get_fields(call);
925         struct list_head *node = v;
926
927         (*pos)++;
928
929         switch ((unsigned long)v) {
930         case FORMAT_HEADER:
931                 node = common_head;
932                 break;
933
934         case FORMAT_FIELD_SEPERATOR:
935                 node = head;
936                 break;
937
938         case FORMAT_PRINTFMT:
939                 /* all done */
940                 return NULL;
941         }
942
943         node = node->prev;
944         if (node == common_head)
945                 return (void *)FORMAT_FIELD_SEPERATOR;
946         else if (node == head)
947                 return (void *)FORMAT_PRINTFMT;
948         else
949                 return node;
950 }
951
952 static int f_show(struct seq_file *m, void *v)
953 {
954         struct ftrace_event_call *call = event_file_data(m->private);
955         struct ftrace_event_field *field;
956         const char *array_descriptor;
957
958         switch ((unsigned long)v) {
959         case FORMAT_HEADER:
960                 seq_printf(m, "name: %s\n", ftrace_event_name(call));
961                 seq_printf(m, "ID: %d\n", call->event.type);
962                 seq_puts(m, "format:\n");
963                 return 0;
964
965         case FORMAT_FIELD_SEPERATOR:
966                 seq_putc(m, '\n');
967                 return 0;
968
969         case FORMAT_PRINTFMT:
970                 seq_printf(m, "\nprint fmt: %s\n",
971                            call->print_fmt);
972                 return 0;
973         }
974
975         field = list_entry(v, struct ftrace_event_field, link);
976         /*
977          * Smartly shows the array type(except dynamic array).
978          * Normal:
979          *      field:TYPE VAR
980          * If TYPE := TYPE[LEN], it is shown:
981          *      field:TYPE VAR[LEN]
982          */
983         array_descriptor = strchr(field->type, '[');
984
985         if (!strncmp(field->type, "__data_loc", 10))
986                 array_descriptor = NULL;
987
988         if (!array_descriptor)
989                 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
990                            field->type, field->name, field->offset,
991                            field->size, !!field->is_signed);
992         else
993                 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
994                            (int)(array_descriptor - field->type),
995                            field->type, field->name,
996                            array_descriptor, field->offset,
997                            field->size, !!field->is_signed);
998
999         return 0;
1000 }
1001
1002 static void *f_start(struct seq_file *m, loff_t *pos)
1003 {
1004         void *p = (void *)FORMAT_HEADER;
1005         loff_t l = 0;
1006
1007         /* ->stop() is called even if ->start() fails */
1008         mutex_lock(&event_mutex);
1009         if (!event_file_data(m->private))
1010                 return ERR_PTR(-ENODEV);
1011
1012         while (l < *pos && p)
1013                 p = f_next(m, p, &l);
1014
1015         return p;
1016 }
1017
1018 static void f_stop(struct seq_file *m, void *p)
1019 {
1020         mutex_unlock(&event_mutex);
1021 }
1022
1023 static const struct seq_operations trace_format_seq_ops = {
1024         .start          = f_start,
1025         .next           = f_next,
1026         .stop           = f_stop,
1027         .show           = f_show,
1028 };
1029
1030 static int trace_format_open(struct inode *inode, struct file *file)
1031 {
1032         struct seq_file *m;
1033         int ret;
1034
1035         ret = seq_open(file, &trace_format_seq_ops);
1036         if (ret < 0)
1037                 return ret;
1038
1039         m = file->private_data;
1040         m->private = file;
1041
1042         return 0;
1043 }
1044
1045 static ssize_t
1046 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1047 {
1048         int id = (long)event_file_data(filp);
1049         char buf[32];
1050         int len;
1051
1052         if (*ppos)
1053                 return 0;
1054
1055         if (unlikely(!id))
1056                 return -ENODEV;
1057
1058         len = sprintf(buf, "%d\n", id);
1059
1060         return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1061 }
1062
1063 static ssize_t
1064 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1065                   loff_t *ppos)
1066 {
1067         struct ftrace_event_file *file;
1068         struct trace_seq *s;
1069         int r = -ENODEV;
1070
1071         if (*ppos)
1072                 return 0;
1073
1074         s = kmalloc(sizeof(*s), GFP_KERNEL);
1075
1076         if (!s)
1077                 return -ENOMEM;
1078
1079         trace_seq_init(s);
1080
1081         mutex_lock(&event_mutex);
1082         file = event_file_data(filp);
1083         if (file)
1084                 print_event_filter(file, s);
1085         mutex_unlock(&event_mutex);
1086
1087         if (file)
1088                 r = simple_read_from_buffer(ubuf, cnt, ppos,
1089                                             s->buffer, trace_seq_used(s));
1090
1091         kfree(s);
1092
1093         return r;
1094 }
1095
1096 static ssize_t
1097 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1098                    loff_t *ppos)
1099 {
1100         struct ftrace_event_file *file;
1101         char *buf;
1102         int err = -ENODEV;
1103
1104         if (cnt >= PAGE_SIZE)
1105                 return -EINVAL;
1106
1107         buf = (char *)__get_free_page(GFP_TEMPORARY);
1108         if (!buf)
1109                 return -ENOMEM;
1110
1111         if (copy_from_user(buf, ubuf, cnt)) {
1112                 free_page((unsigned long) buf);
1113                 return -EFAULT;
1114         }
1115         buf[cnt] = '\0';
1116
1117         mutex_lock(&event_mutex);
1118         file = event_file_data(filp);
1119         if (file)
1120                 err = apply_event_filter(file, buf);
1121         mutex_unlock(&event_mutex);
1122
1123         free_page((unsigned long) buf);
1124         if (err < 0)
1125                 return err;
1126
1127         *ppos += cnt;
1128
1129         return cnt;
1130 }
1131
1132 static LIST_HEAD(event_subsystems);
1133
1134 static int subsystem_open(struct inode *inode, struct file *filp)
1135 {
1136         struct event_subsystem *system = NULL;
1137         struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1138         struct trace_array *tr;
1139         int ret;
1140
1141         if (tracing_is_disabled())
1142                 return -ENODEV;
1143
1144         /* Make sure the system still exists */
1145         mutex_lock(&trace_types_lock);
1146         mutex_lock(&event_mutex);
1147         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1148                 list_for_each_entry(dir, &tr->systems, list) {
1149                         if (dir == inode->i_private) {
1150                                 /* Don't open systems with no events */
1151                                 if (dir->nr_events) {
1152                                         __get_system_dir(dir);
1153                                         system = dir->subsystem;
1154                                 }
1155                                 goto exit_loop;
1156                         }
1157                 }
1158         }
1159  exit_loop:
1160         mutex_unlock(&event_mutex);
1161         mutex_unlock(&trace_types_lock);
1162
1163         if (!system)
1164                 return -ENODEV;
1165
1166         /* Some versions of gcc think dir can be uninitialized here */
1167         WARN_ON(!dir);
1168
1169         /* Still need to increment the ref count of the system */
1170         if (trace_array_get(tr) < 0) {
1171                 put_system(dir);
1172                 return -ENODEV;
1173         }
1174
1175         ret = tracing_open_generic(inode, filp);
1176         if (ret < 0) {
1177                 trace_array_put(tr);
1178                 put_system(dir);
1179         }
1180
1181         return ret;
1182 }
1183
1184 static int system_tr_open(struct inode *inode, struct file *filp)
1185 {
1186         struct ftrace_subsystem_dir *dir;
1187         struct trace_array *tr = inode->i_private;
1188         int ret;
1189
1190         if (tracing_is_disabled())
1191                 return -ENODEV;
1192
1193         if (trace_array_get(tr) < 0)
1194                 return -ENODEV;
1195
1196         /* Make a temporary dir that has no system but points to tr */
1197         dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1198         if (!dir) {
1199                 trace_array_put(tr);
1200                 return -ENOMEM;
1201         }
1202
1203         dir->tr = tr;
1204
1205         ret = tracing_open_generic(inode, filp);
1206         if (ret < 0) {
1207                 trace_array_put(tr);
1208                 kfree(dir);
1209                 return ret;
1210         }
1211
1212         filp->private_data = dir;
1213
1214         return 0;
1215 }
1216
1217 static int subsystem_release(struct inode *inode, struct file *file)
1218 {
1219         struct ftrace_subsystem_dir *dir = file->private_data;
1220
1221         trace_array_put(dir->tr);
1222
1223         /*
1224          * If dir->subsystem is NULL, then this is a temporary
1225          * descriptor that was made for a trace_array to enable
1226          * all subsystems.
1227          */
1228         if (dir->subsystem)
1229                 put_system(dir);
1230         else
1231                 kfree(dir);
1232
1233         return 0;
1234 }
1235
1236 static ssize_t
1237 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1238                       loff_t *ppos)
1239 {
1240         struct ftrace_subsystem_dir *dir = filp->private_data;
1241         struct event_subsystem *system = dir->subsystem;
1242         struct trace_seq *s;
1243         int r;
1244
1245         if (*ppos)
1246                 return 0;
1247
1248         s = kmalloc(sizeof(*s), GFP_KERNEL);
1249         if (!s)
1250                 return -ENOMEM;
1251
1252         trace_seq_init(s);
1253
1254         print_subsystem_event_filter(system, s);
1255         r = simple_read_from_buffer(ubuf, cnt, ppos,
1256                                     s->buffer, trace_seq_used(s));
1257
1258         kfree(s);
1259
1260         return r;
1261 }
1262
1263 static ssize_t
1264 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1265                        loff_t *ppos)
1266 {
1267         struct ftrace_subsystem_dir *dir = filp->private_data;
1268         char *buf;
1269         int err;
1270
1271         if (cnt >= PAGE_SIZE)
1272                 return -EINVAL;
1273
1274         buf = (char *)__get_free_page(GFP_TEMPORARY);
1275         if (!buf)
1276                 return -ENOMEM;
1277
1278         if (copy_from_user(buf, ubuf, cnt)) {
1279                 free_page((unsigned long) buf);
1280                 return -EFAULT;
1281         }
1282         buf[cnt] = '\0';
1283
1284         err = apply_subsystem_event_filter(dir, buf);
1285         free_page((unsigned long) buf);
1286         if (err < 0)
1287                 return err;
1288
1289         *ppos += cnt;
1290
1291         return cnt;
1292 }
1293
1294 static ssize_t
1295 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1296 {
1297         int (*func)(struct trace_seq *s) = filp->private_data;
1298         struct trace_seq *s;
1299         int r;
1300
1301         if (*ppos)
1302                 return 0;
1303
1304         s = kmalloc(sizeof(*s), GFP_KERNEL);
1305         if (!s)
1306                 return -ENOMEM;
1307
1308         trace_seq_init(s);
1309
1310         func(s);
1311         r = simple_read_from_buffer(ubuf, cnt, ppos,
1312                                     s->buffer, trace_seq_used(s));
1313
1314         kfree(s);
1315
1316         return r;
1317 }
1318
1319 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1320 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1321 static int ftrace_event_release(struct inode *inode, struct file *file);
1322
1323 static const struct seq_operations show_event_seq_ops = {
1324         .start = t_start,
1325         .next = t_next,
1326         .show = t_show,
1327         .stop = t_stop,
1328 };
1329
1330 static const struct seq_operations show_set_event_seq_ops = {
1331         .start = s_start,
1332         .next = s_next,
1333         .show = t_show,
1334         .stop = t_stop,
1335 };
1336
1337 static const struct file_operations ftrace_avail_fops = {
1338         .open = ftrace_event_avail_open,
1339         .read = seq_read,
1340         .llseek = seq_lseek,
1341         .release = seq_release,
1342 };
1343
1344 static const struct file_operations ftrace_set_event_fops = {
1345         .open = ftrace_event_set_open,
1346         .read = seq_read,
1347         .write = ftrace_event_write,
1348         .llseek = seq_lseek,
1349         .release = ftrace_event_release,
1350 };
1351
1352 static const struct file_operations ftrace_enable_fops = {
1353         .open = tracing_open_generic,
1354         .read = event_enable_read,
1355         .write = event_enable_write,
1356         .llseek = default_llseek,
1357 };
1358
1359 static const struct file_operations ftrace_event_format_fops = {
1360         .open = trace_format_open,
1361         .read = seq_read,
1362         .llseek = seq_lseek,
1363         .release = seq_release,
1364 };
1365
1366 static const struct file_operations ftrace_event_id_fops = {
1367         .read = event_id_read,
1368         .llseek = default_llseek,
1369 };
1370
1371 static const struct file_operations ftrace_event_filter_fops = {
1372         .open = tracing_open_generic,
1373         .read = event_filter_read,
1374         .write = event_filter_write,
1375         .llseek = default_llseek,
1376 };
1377
1378 static const struct file_operations ftrace_subsystem_filter_fops = {
1379         .open = subsystem_open,
1380         .read = subsystem_filter_read,
1381         .write = subsystem_filter_write,
1382         .llseek = default_llseek,
1383         .release = subsystem_release,
1384 };
1385
1386 static const struct file_operations ftrace_system_enable_fops = {
1387         .open = subsystem_open,
1388         .read = system_enable_read,
1389         .write = system_enable_write,
1390         .llseek = default_llseek,
1391         .release = subsystem_release,
1392 };
1393
1394 static const struct file_operations ftrace_tr_enable_fops = {
1395         .open = system_tr_open,
1396         .read = system_enable_read,
1397         .write = system_enable_write,
1398         .llseek = default_llseek,
1399         .release = subsystem_release,
1400 };
1401
1402 static const struct file_operations ftrace_show_header_fops = {
1403         .open = tracing_open_generic,
1404         .read = show_header,
1405         .llseek = default_llseek,
1406 };
1407
1408 static int
1409 ftrace_event_open(struct inode *inode, struct file *file,
1410                   const struct seq_operations *seq_ops)
1411 {
1412         struct seq_file *m;
1413         int ret;
1414
1415         ret = seq_open(file, seq_ops);
1416         if (ret < 0)
1417                 return ret;
1418         m = file->private_data;
1419         /* copy tr over to seq ops */
1420         m->private = inode->i_private;
1421
1422         return ret;
1423 }
1424
1425 static int ftrace_event_release(struct inode *inode, struct file *file)
1426 {
1427         struct trace_array *tr = inode->i_private;
1428
1429         trace_array_put(tr);
1430
1431         return seq_release(inode, file);
1432 }
1433
1434 static int
1435 ftrace_event_avail_open(struct inode *inode, struct file *file)
1436 {
1437         const struct seq_operations *seq_ops = &show_event_seq_ops;
1438
1439         return ftrace_event_open(inode, file, seq_ops);
1440 }
1441
1442 static int
1443 ftrace_event_set_open(struct inode *inode, struct file *file)
1444 {
1445         const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1446         struct trace_array *tr = inode->i_private;
1447         int ret;
1448
1449         if (trace_array_get(tr) < 0)
1450                 return -ENODEV;
1451
1452         if ((file->f_mode & FMODE_WRITE) &&
1453             (file->f_flags & O_TRUNC))
1454                 ftrace_clear_events(tr);
1455
1456         ret = ftrace_event_open(inode, file, seq_ops);
1457         if (ret < 0)
1458                 trace_array_put(tr);
1459         return ret;
1460 }
1461
1462 static struct event_subsystem *
1463 create_new_subsystem(const char *name)
1464 {
1465         struct event_subsystem *system;
1466
1467         /* need to create new entry */
1468         system = kmalloc(sizeof(*system), GFP_KERNEL);
1469         if (!system)
1470                 return NULL;
1471
1472         system->ref_count = 1;
1473
1474         /* Only allocate if dynamic (kprobes and modules) */
1475         if (!core_kernel_data((unsigned long)name)) {
1476                 system->ref_count |= SYSTEM_FL_FREE_NAME;
1477                 system->name = kstrdup(name, GFP_KERNEL);
1478                 if (!system->name)
1479                         goto out_free;
1480         } else
1481                 system->name = name;
1482
1483         system->filter = NULL;
1484
1485         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1486         if (!system->filter)
1487                 goto out_free;
1488
1489         list_add(&system->list, &event_subsystems);
1490
1491         return system;
1492
1493  out_free:
1494         if (system->ref_count & SYSTEM_FL_FREE_NAME)
1495                 kfree(system->name);
1496         kfree(system);
1497         return NULL;
1498 }
1499
1500 static struct dentry *
1501 event_subsystem_dir(struct trace_array *tr, const char *name,
1502                     struct ftrace_event_file *file, struct dentry *parent)
1503 {
1504         struct ftrace_subsystem_dir *dir;
1505         struct event_subsystem *system;
1506         struct dentry *entry;
1507
1508         /* First see if we did not already create this dir */
1509         list_for_each_entry(dir, &tr->systems, list) {
1510                 system = dir->subsystem;
1511                 if (strcmp(system->name, name) == 0) {
1512                         dir->nr_events++;
1513                         file->system = dir;
1514                         return dir->entry;
1515                 }
1516         }
1517
1518         /* Now see if the system itself exists. */
1519         list_for_each_entry(system, &event_subsystems, list) {
1520                 if (strcmp(system->name, name) == 0)
1521                         break;
1522         }
1523         /* Reset system variable when not found */
1524         if (&system->list == &event_subsystems)
1525                 system = NULL;
1526
1527         dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1528         if (!dir)
1529                 goto out_fail;
1530
1531         if (!system) {
1532                 system = create_new_subsystem(name);
1533                 if (!system)
1534                         goto out_free;
1535         } else
1536                 __get_system(system);
1537
1538         dir->entry = tracefs_create_dir(name, parent);
1539         if (!dir->entry) {
1540                 pr_warn("Failed to create system directory %s\n", name);
1541                 __put_system(system);
1542                 goto out_free;
1543         }
1544
1545         dir->tr = tr;
1546         dir->ref_count = 1;
1547         dir->nr_events = 1;
1548         dir->subsystem = system;
1549         file->system = dir;
1550
1551         entry = tracefs_create_file("filter", 0644, dir->entry, dir,
1552                                     &ftrace_subsystem_filter_fops);
1553         if (!entry) {
1554                 kfree(system->filter);
1555                 system->filter = NULL;
1556                 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
1557         }
1558
1559         trace_create_file("enable", 0644, dir->entry, dir,
1560                           &ftrace_system_enable_fops);
1561
1562         list_add(&dir->list, &tr->systems);
1563
1564         return dir->entry;
1565
1566  out_free:
1567         kfree(dir);
1568  out_fail:
1569         /* Only print this message if failed on memory allocation */
1570         if (!dir || !system)
1571                 pr_warn("No memory to create event subsystem %s\n", name);
1572         return NULL;
1573 }
1574
1575 static int
1576 event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1577 {
1578         struct ftrace_event_call *call = file->event_call;
1579         struct trace_array *tr = file->tr;
1580         struct list_head *head;
1581         struct dentry *d_events;
1582         const char *name;
1583         int ret;
1584
1585         /*
1586          * If the trace point header did not define TRACE_SYSTEM
1587          * then the system would be called "TRACE_SYSTEM".
1588          */
1589         if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1590                 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1591                 if (!d_events)
1592                         return -ENOMEM;
1593         } else
1594                 d_events = parent;
1595
1596         name = ftrace_event_name(call);
1597         file->dir = tracefs_create_dir(name, d_events);
1598         if (!file->dir) {
1599                 pr_warn("Could not create tracefs '%s' directory\n", name);
1600                 return -1;
1601         }
1602
1603         if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1604                 trace_create_file("enable", 0644, file->dir, file,
1605                                   &ftrace_enable_fops);
1606
1607 #ifdef CONFIG_PERF_EVENTS
1608         if (call->event.type && call->class->reg)
1609                 trace_create_file("id", 0444, file->dir,
1610                                   (void *)(long)call->event.type,
1611                                   &ftrace_event_id_fops);
1612 #endif
1613
1614         /*
1615          * Other events may have the same class. Only update
1616          * the fields if they are not already defined.
1617          */
1618         head = trace_get_fields(call);
1619         if (list_empty(head)) {
1620                 ret = call->class->define_fields(call);
1621                 if (ret < 0) {
1622                         pr_warn("Could not initialize trace point events/%s\n",
1623                                 name);
1624                         return -1;
1625                 }
1626         }
1627         trace_create_file("filter", 0644, file->dir, file,
1628                           &ftrace_event_filter_fops);
1629
1630         trace_create_file("trigger", 0644, file->dir, file,
1631                           &event_trigger_fops);
1632
1633         trace_create_file("format", 0444, file->dir, call,
1634                           &ftrace_event_format_fops);
1635
1636         return 0;
1637 }
1638
1639 static void remove_event_from_tracers(struct ftrace_event_call *call)
1640 {
1641         struct ftrace_event_file *file;
1642         struct trace_array *tr;
1643
1644         do_for_each_event_file_safe(tr, file) {
1645                 if (file->event_call != call)
1646                         continue;
1647
1648                 remove_event_file_dir(file);
1649                 /*
1650                  * The do_for_each_event_file_safe() is
1651                  * a double loop. After finding the call for this
1652                  * trace_array, we use break to jump to the next
1653                  * trace_array.
1654                  */
1655                 break;
1656         } while_for_each_event_file();
1657 }
1658
1659 static void event_remove(struct ftrace_event_call *call)
1660 {
1661         struct trace_array *tr;
1662         struct ftrace_event_file *file;
1663
1664         do_for_each_event_file(tr, file) {
1665                 if (file->event_call != call)
1666                         continue;
1667                 ftrace_event_enable_disable(file, 0);
1668                 /*
1669                  * The do_for_each_event_file() is
1670                  * a double loop. After finding the call for this
1671                  * trace_array, we use break to jump to the next
1672                  * trace_array.
1673                  */
1674                 break;
1675         } while_for_each_event_file();
1676
1677         if (call->event.funcs)
1678                 __unregister_ftrace_event(&call->event);
1679         remove_event_from_tracers(call);
1680         list_del(&call->list);
1681 }
1682
1683 static int event_init(struct ftrace_event_call *call)
1684 {
1685         int ret = 0;
1686         const char *name;
1687
1688         name = ftrace_event_name(call);
1689         if (WARN_ON(!name))
1690                 return -EINVAL;
1691
1692         if (call->class->raw_init) {
1693                 ret = call->class->raw_init(call);
1694                 if (ret < 0 && ret != -ENOSYS)
1695                         pr_warn("Could not initialize trace events/%s\n", name);
1696         }
1697
1698         return ret;
1699 }
1700
1701 static int
1702 __register_event(struct ftrace_event_call *call, struct module *mod)
1703 {
1704         int ret;
1705
1706         ret = event_init(call);
1707         if (ret < 0)
1708                 return ret;
1709
1710         list_add(&call->list, &ftrace_events);
1711         call->mod = mod;
1712
1713         return 0;
1714 }
1715
1716 static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
1717 {
1718         int rlen;
1719         int elen;
1720
1721         /* Find the length of the enum value as a string */
1722         elen = snprintf(ptr, 0, "%ld", map->enum_value);
1723         /* Make sure there's enough room to replace the string with the value */
1724         if (len < elen)
1725                 return NULL;
1726
1727         snprintf(ptr, elen + 1, "%ld", map->enum_value);
1728
1729         /* Get the rest of the string of ptr */
1730         rlen = strlen(ptr + len);
1731         memmove(ptr + elen, ptr + len, rlen);
1732         /* Make sure we end the new string */
1733         ptr[elen + rlen] = 0;
1734
1735         return ptr + elen;
1736 }
1737
1738 static void update_event_printk(struct ftrace_event_call *call,
1739                                 struct trace_enum_map *map)
1740 {
1741         char *ptr;
1742         int quote = 0;
1743         int len = strlen(map->enum_string);
1744
1745         for (ptr = call->print_fmt; *ptr; ptr++) {
1746                 if (*ptr == '\\') {
1747                         ptr++;
1748                         /* paranoid */
1749                         if (!*ptr)
1750                                 break;
1751                         continue;
1752                 }
1753                 if (*ptr == '"') {
1754                         quote ^= 1;
1755                         continue;
1756                 }
1757                 if (quote)
1758                         continue;
1759                 if (isdigit(*ptr)) {
1760                         /* skip numbers */
1761                         do {
1762                                 ptr++;
1763                                 /* Check for alpha chars like ULL */
1764                         } while (isalnum(*ptr));
1765                         if (!*ptr)
1766                                 break;
1767                         /*
1768                          * A number must have some kind of delimiter after
1769                          * it, and we can ignore that too.
1770                          */
1771                         continue;
1772                 }
1773                 if (isalpha(*ptr) || *ptr == '_') {
1774                         if (strncmp(map->enum_string, ptr, len) == 0 &&
1775                             !isalnum(ptr[len]) && ptr[len] != '_') {
1776                                 ptr = enum_replace(ptr, map, len);
1777                                 /* Hmm, enum string smaller than value */
1778                                 if (WARN_ON_ONCE(!ptr))
1779                                         return;
1780                                 /*
1781                                  * No need to decrement here, as enum_replace()
1782                                  * returns the pointer to the character passed
1783                                  * the enum, and two enums can not be placed
1784                                  * back to back without something in between.
1785                                  * We can skip that something in between.
1786                                  */
1787                                 continue;
1788                         }
1789                 skip_more:
1790                         do {
1791                                 ptr++;
1792                         } while (isalnum(*ptr) || *ptr == '_');
1793                         if (!*ptr)
1794                                 break;
1795                         /*
1796                          * If what comes after this variable is a '.' or
1797                          * '->' then we can continue to ignore that string.
1798                          */
1799                         if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
1800                                 ptr += *ptr == '.' ? 1 : 2;
1801                                 if (!*ptr)
1802                                         break;
1803                                 goto skip_more;
1804                         }
1805                         /*
1806                          * Once again, we can skip the delimiter that came
1807                          * after the string.
1808                          */
1809                         continue;
1810                 }
1811         }
1812 }
1813
1814 void trace_event_enum_update(struct trace_enum_map **map, int len)
1815 {
1816         struct ftrace_event_call *call, *p;
1817         const char *last_system = NULL;
1818         int last_i;
1819         int i;
1820
1821         down_write(&trace_event_sem);
1822         list_for_each_entry_safe(call, p, &ftrace_events, list) {
1823                 /* events are usually grouped together with systems */
1824                 if (!last_system || call->class->system != last_system) {
1825                         last_i = 0;
1826                         last_system = call->class->system;
1827                 }
1828
1829                 for (i = last_i; i < len; i++) {
1830                         if (call->class->system == map[i]->system) {
1831                                 /* Save the first system if need be */
1832                                 if (!last_i)
1833                                         last_i = i;
1834                                 update_event_printk(call, map[i]);
1835                         }
1836                 }
1837         }
1838         up_write(&trace_event_sem);
1839 }
1840
1841 static struct ftrace_event_file *
1842 trace_create_new_event(struct ftrace_event_call *call,
1843                        struct trace_array *tr)
1844 {
1845         struct ftrace_event_file *file;
1846
1847         file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1848         if (!file)
1849                 return NULL;
1850
1851         file->event_call = call;
1852         file->tr = tr;
1853         atomic_set(&file->sm_ref, 0);
1854         atomic_set(&file->tm_ref, 0);
1855         INIT_LIST_HEAD(&file->triggers);
1856         list_add(&file->list, &tr->events);
1857
1858         return file;
1859 }
1860
1861 /* Add an event to a trace directory */
1862 static int
1863 __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
1864 {
1865         struct ftrace_event_file *file;
1866
1867         file = trace_create_new_event(call, tr);
1868         if (!file)
1869                 return -ENOMEM;
1870
1871         return event_create_dir(tr->event_dir, file);
1872 }
1873
1874 /*
1875  * Just create a decriptor for early init. A descriptor is required
1876  * for enabling events at boot. We want to enable events before
1877  * the filesystem is initialized.
1878  */
1879 static __init int
1880 __trace_early_add_new_event(struct ftrace_event_call *call,
1881                             struct trace_array *tr)
1882 {
1883         struct ftrace_event_file *file;
1884
1885         file = trace_create_new_event(call, tr);
1886         if (!file)
1887                 return -ENOMEM;
1888
1889         return 0;
1890 }
1891
1892 struct ftrace_module_file_ops;
1893 static void __add_event_to_tracers(struct ftrace_event_call *call);
1894
1895 /* Add an additional event_call dynamically */
1896 int trace_add_event_call(struct ftrace_event_call *call)
1897 {
1898         int ret;
1899         mutex_lock(&trace_types_lock);
1900         mutex_lock(&event_mutex);
1901
1902         ret = __register_event(call, NULL);
1903         if (ret >= 0)
1904                 __add_event_to_tracers(call);
1905
1906         mutex_unlock(&event_mutex);
1907         mutex_unlock(&trace_types_lock);
1908         return ret;
1909 }
1910
1911 /*
1912  * Must be called under locking of trace_types_lock, event_mutex and
1913  * trace_event_sem.
1914  */
1915 static void __trace_remove_event_call(struct ftrace_event_call *call)
1916 {
1917         event_remove(call);
1918         trace_destroy_fields(call);
1919         free_event_filter(call->filter);
1920         call->filter = NULL;
1921 }
1922
1923 static int probe_remove_event_call(struct ftrace_event_call *call)
1924 {
1925         struct trace_array *tr;
1926         struct ftrace_event_file *file;
1927
1928 #ifdef CONFIG_PERF_EVENTS
1929         if (call->perf_refcount)
1930                 return -EBUSY;
1931 #endif
1932         do_for_each_event_file(tr, file) {
1933                 if (file->event_call != call)
1934                         continue;
1935                 /*
1936                  * We can't rely on ftrace_event_enable_disable(enable => 0)
1937                  * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1938                  * TRACE_REG_UNREGISTER.
1939                  */
1940                 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1941                         return -EBUSY;
1942                 /*
1943                  * The do_for_each_event_file_safe() is
1944                  * a double loop. After finding the call for this
1945                  * trace_array, we use break to jump to the next
1946                  * trace_array.
1947                  */
1948                 break;
1949         } while_for_each_event_file();
1950
1951         __trace_remove_event_call(call);
1952
1953         return 0;
1954 }
1955
1956 /* Remove an event_call */
1957 int trace_remove_event_call(struct ftrace_event_call *call)
1958 {
1959         int ret;
1960
1961         mutex_lock(&trace_types_lock);
1962         mutex_lock(&event_mutex);
1963         down_write(&trace_event_sem);
1964         ret = probe_remove_event_call(call);
1965         up_write(&trace_event_sem);
1966         mutex_unlock(&event_mutex);
1967         mutex_unlock(&trace_types_lock);
1968
1969         return ret;
1970 }
1971
1972 #define for_each_event(event, start, end)                       \
1973         for (event = start;                                     \
1974              (unsigned long)event < (unsigned long)end;         \
1975              event++)
1976
1977 #ifdef CONFIG_MODULES
1978
1979 static void trace_module_add_events(struct module *mod)
1980 {
1981         struct ftrace_event_call **call, **start, **end;
1982
1983         if (!mod->num_trace_events)
1984                 return;
1985
1986         /* Don't add infrastructure for mods without tracepoints */
1987         if (trace_module_has_bad_taint(mod)) {
1988                 pr_err("%s: module has bad taint, not creating trace events\n",
1989                        mod->name);
1990                 return;
1991         }
1992
1993         start = mod->trace_events;
1994         end = mod->trace_events + mod->num_trace_events;
1995
1996         for_each_event(call, start, end) {
1997                 __register_event(*call, mod);
1998                 __add_event_to_tracers(*call);
1999         }
2000 }
2001
2002 static void trace_module_remove_events(struct module *mod)
2003 {
2004         struct ftrace_event_call *call, *p;
2005         bool clear_trace = false;
2006
2007         down_write(&trace_event_sem);
2008         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2009                 if (call->mod == mod) {
2010                         if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2011                                 clear_trace = true;
2012                         __trace_remove_event_call(call);
2013                 }
2014         }
2015         up_write(&trace_event_sem);
2016
2017         /*
2018          * It is safest to reset the ring buffer if the module being unloaded
2019          * registered any events that were used. The only worry is if
2020          * a new module gets loaded, and takes on the same id as the events
2021          * of this module. When printing out the buffer, traced events left
2022          * over from this module may be passed to the new module events and
2023          * unexpected results may occur.
2024          */
2025         if (clear_trace)
2026                 tracing_reset_all_online_cpus();
2027 }
2028
2029 static int trace_module_notify(struct notifier_block *self,
2030                                unsigned long val, void *data)
2031 {
2032         struct module *mod = data;
2033
2034         mutex_lock(&trace_types_lock);
2035         mutex_lock(&event_mutex);
2036         switch (val) {
2037         case MODULE_STATE_COMING:
2038                 trace_module_add_events(mod);
2039                 break;
2040         case MODULE_STATE_GOING:
2041                 trace_module_remove_events(mod);
2042                 break;
2043         }
2044         mutex_unlock(&event_mutex);
2045         mutex_unlock(&trace_types_lock);
2046
2047         return 0;
2048 }
2049
2050 static struct notifier_block trace_module_nb = {
2051         .notifier_call = trace_module_notify,
2052         .priority = 1, /* higher than trace.c module notify */
2053 };
2054 #endif /* CONFIG_MODULES */
2055
2056 /* Create a new event directory structure for a trace directory. */
2057 static void
2058 __trace_add_event_dirs(struct trace_array *tr)
2059 {
2060         struct ftrace_event_call *call;
2061         int ret;
2062
2063         list_for_each_entry(call, &ftrace_events, list) {
2064                 ret = __trace_add_new_event(call, tr);
2065                 if (ret < 0)
2066                         pr_warn("Could not create directory for event %s\n",
2067                                 ftrace_event_name(call));
2068         }
2069 }
2070
2071 struct ftrace_event_file *
2072 find_event_file(struct trace_array *tr, const char *system,  const char *event)
2073 {
2074         struct ftrace_event_file *file;
2075         struct ftrace_event_call *call;
2076         const char *name;
2077
2078         list_for_each_entry(file, &tr->events, list) {
2079
2080                 call = file->event_call;
2081                 name = ftrace_event_name(call);
2082
2083                 if (!name || !call->class || !call->class->reg)
2084                         continue;
2085
2086                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2087                         continue;
2088
2089                 if (strcmp(event, name) == 0 &&
2090                     strcmp(system, call->class->system) == 0)
2091                         return file;
2092         }
2093         return NULL;
2094 }
2095
2096 #ifdef CONFIG_DYNAMIC_FTRACE
2097
2098 /* Avoid typos */
2099 #define ENABLE_EVENT_STR        "enable_event"
2100 #define DISABLE_EVENT_STR       "disable_event"
2101
2102 struct event_probe_data {
2103         struct ftrace_event_file        *file;
2104         unsigned long                   count;
2105         int                             ref;
2106         bool                            enable;
2107 };
2108
2109 static void
2110 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2111 {
2112         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2113         struct event_probe_data *data = *pdata;
2114
2115         if (!data)
2116                 return;
2117
2118         if (data->enable)
2119                 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2120         else
2121                 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2122 }
2123
2124 static void
2125 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2126 {
2127         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2128         struct event_probe_data *data = *pdata;
2129
2130         if (!data)
2131                 return;
2132
2133         if (!data->count)
2134                 return;
2135
2136         /* Skip if the event is in a state we want to switch to */
2137         if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
2138                 return;
2139
2140         if (data->count != -1)
2141                 (data->count)--;
2142
2143         event_enable_probe(ip, parent_ip, _data);
2144 }
2145
2146 static int
2147 event_enable_print(struct seq_file *m, unsigned long ip,
2148                       struct ftrace_probe_ops *ops, void *_data)
2149 {
2150         struct event_probe_data *data = _data;
2151
2152         seq_printf(m, "%ps:", (void *)ip);
2153
2154         seq_printf(m, "%s:%s:%s",
2155                    data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2156                    data->file->event_call->class->system,
2157                    ftrace_event_name(data->file->event_call));
2158
2159         if (data->count == -1)
2160                 seq_puts(m, ":unlimited\n");
2161         else
2162                 seq_printf(m, ":count=%ld\n", data->count);
2163
2164         return 0;
2165 }
2166
2167 static int
2168 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2169                   void **_data)
2170 {
2171         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2172         struct event_probe_data *data = *pdata;
2173
2174         data->ref++;
2175         return 0;
2176 }
2177
2178 static void
2179 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2180                   void **_data)
2181 {
2182         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2183         struct event_probe_data *data = *pdata;
2184
2185         if (WARN_ON_ONCE(data->ref <= 0))
2186                 return;
2187
2188         data->ref--;
2189         if (!data->ref) {
2190                 /* Remove the SOFT_MODE flag */
2191                 __ftrace_event_enable_disable(data->file, 0, 1);
2192                 module_put(data->file->event_call->mod);
2193                 kfree(data);
2194         }
2195         *pdata = NULL;
2196 }
2197
2198 static struct ftrace_probe_ops event_enable_probe_ops = {
2199         .func                   = event_enable_probe,
2200         .print                  = event_enable_print,
2201         .init                   = event_enable_init,
2202         .free                   = event_enable_free,
2203 };
2204
2205 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2206         .func                   = event_enable_count_probe,
2207         .print                  = event_enable_print,
2208         .init                   = event_enable_init,
2209         .free                   = event_enable_free,
2210 };
2211
2212 static struct ftrace_probe_ops event_disable_probe_ops = {
2213         .func                   = event_enable_probe,
2214         .print                  = event_enable_print,
2215         .init                   = event_enable_init,
2216         .free                   = event_enable_free,
2217 };
2218
2219 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2220         .func                   = event_enable_count_probe,
2221         .print                  = event_enable_print,
2222         .init                   = event_enable_init,
2223         .free                   = event_enable_free,
2224 };
2225
2226 static int
2227 event_enable_func(struct ftrace_hash *hash,
2228                   char *glob, char *cmd, char *param, int enabled)
2229 {
2230         struct trace_array *tr = top_trace_array();
2231         struct ftrace_event_file *file;
2232         struct ftrace_probe_ops *ops;
2233         struct event_probe_data *data;
2234         const char *system;
2235         const char *event;
2236         char *number;
2237         bool enable;
2238         int ret;
2239
2240         if (!tr)
2241                 return -ENODEV;
2242
2243         /* hash funcs only work with set_ftrace_filter */
2244         if (!enabled || !param)
2245                 return -EINVAL;
2246
2247         system = strsep(&param, ":");
2248         if (!param)
2249                 return -EINVAL;
2250
2251         event = strsep(&param, ":");
2252
2253         mutex_lock(&event_mutex);
2254
2255         ret = -EINVAL;
2256         file = find_event_file(tr, system, event);
2257         if (!file)
2258                 goto out;
2259
2260         enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2261
2262         if (enable)
2263                 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2264         else
2265                 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2266
2267         if (glob[0] == '!') {
2268                 unregister_ftrace_function_probe_func(glob+1, ops);
2269                 ret = 0;
2270                 goto out;
2271         }
2272
2273         ret = -ENOMEM;
2274         data = kzalloc(sizeof(*data), GFP_KERNEL);
2275         if (!data)
2276                 goto out;
2277
2278         data->enable = enable;
2279         data->count = -1;
2280         data->file = file;
2281
2282         if (!param)
2283                 goto out_reg;
2284
2285         number = strsep(&param, ":");
2286
2287         ret = -EINVAL;
2288         if (!strlen(number))
2289                 goto out_free;
2290
2291         /*
2292          * We use the callback data field (which is a pointer)
2293          * as our counter.
2294          */
2295         ret = kstrtoul(number, 0, &data->count);
2296         if (ret)
2297                 goto out_free;
2298
2299  out_reg:
2300         /* Don't let event modules unload while probe registered */
2301         ret = try_module_get(file->event_call->mod);
2302         if (!ret) {
2303                 ret = -EBUSY;
2304                 goto out_free;
2305         }
2306
2307         ret = __ftrace_event_enable_disable(file, 1, 1);
2308         if (ret < 0)
2309                 goto out_put;
2310         ret = register_ftrace_function_probe(glob, ops, data);
2311         /*
2312          * The above returns on success the # of functions enabled,
2313          * but if it didn't find any functions it returns zero.
2314          * Consider no functions a failure too.
2315          */
2316         if (!ret) {
2317                 ret = -ENOENT;
2318                 goto out_disable;
2319         } else if (ret < 0)
2320                 goto out_disable;
2321         /* Just return zero, not the number of enabled functions */
2322         ret = 0;
2323  out:
2324         mutex_unlock(&event_mutex);
2325         return ret;
2326
2327  out_disable:
2328         __ftrace_event_enable_disable(file, 0, 1);
2329  out_put:
2330         module_put(file->event_call->mod);
2331  out_free:
2332         kfree(data);
2333         goto out;
2334 }
2335
2336 static struct ftrace_func_command event_enable_cmd = {
2337         .name                   = ENABLE_EVENT_STR,
2338         .func                   = event_enable_func,
2339 };
2340
2341 static struct ftrace_func_command event_disable_cmd = {
2342         .name                   = DISABLE_EVENT_STR,
2343         .func                   = event_enable_func,
2344 };
2345
2346 static __init int register_event_cmds(void)
2347 {
2348         int ret;
2349
2350         ret = register_ftrace_command(&event_enable_cmd);
2351         if (WARN_ON(ret < 0))
2352                 return ret;
2353         ret = register_ftrace_command(&event_disable_cmd);
2354         if (WARN_ON(ret < 0))
2355                 unregister_ftrace_command(&event_enable_cmd);
2356         return ret;
2357 }
2358 #else
2359 static inline int register_event_cmds(void) { return 0; }
2360 #endif /* CONFIG_DYNAMIC_FTRACE */
2361
2362 /*
2363  * The top level array has already had its ftrace_event_file
2364  * descriptors created in order to allow for early events to
2365  * be recorded. This function is called after the tracefs has been
2366  * initialized, and we now have to create the files associated
2367  * to the events.
2368  */
2369 static __init void
2370 __trace_early_add_event_dirs(struct trace_array *tr)
2371 {
2372         struct ftrace_event_file *file;
2373         int ret;
2374
2375
2376         list_for_each_entry(file, &tr->events, list) {
2377                 ret = event_create_dir(tr->event_dir, file);
2378                 if (ret < 0)
2379                         pr_warn("Could not create directory for event %s\n",
2380                                 ftrace_event_name(file->event_call));
2381         }
2382 }
2383
2384 /*
2385  * For early boot up, the top trace array requires to have
2386  * a list of events that can be enabled. This must be done before
2387  * the filesystem is set up in order to allow events to be traced
2388  * early.
2389  */
2390 static __init void
2391 __trace_early_add_events(struct trace_array *tr)
2392 {
2393         struct ftrace_event_call *call;
2394         int ret;
2395
2396         list_for_each_entry(call, &ftrace_events, list) {
2397                 /* Early boot up should not have any modules loaded */
2398                 if (WARN_ON_ONCE(call->mod))
2399                         continue;
2400
2401                 ret = __trace_early_add_new_event(call, tr);
2402                 if (ret < 0)
2403                         pr_warn("Could not create early event %s\n",
2404                                 ftrace_event_name(call));
2405         }
2406 }
2407
2408 /* Remove the event directory structure for a trace directory. */
2409 static void
2410 __trace_remove_event_dirs(struct trace_array *tr)
2411 {
2412         struct ftrace_event_file *file, *next;
2413
2414         list_for_each_entry_safe(file, next, &tr->events, list)
2415                 remove_event_file_dir(file);
2416 }
2417
2418 static void __add_event_to_tracers(struct ftrace_event_call *call)
2419 {
2420         struct trace_array *tr;
2421
2422         list_for_each_entry(tr, &ftrace_trace_arrays, list)
2423                 __trace_add_new_event(call, tr);
2424 }
2425
2426 extern struct ftrace_event_call *__start_ftrace_events[];
2427 extern struct ftrace_event_call *__stop_ftrace_events[];
2428
2429 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2430
2431 static __init int setup_trace_event(char *str)
2432 {
2433         strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2434         ring_buffer_expanded = true;
2435         tracing_selftest_disabled = true;
2436
2437         return 1;
2438 }
2439 __setup("trace_event=", setup_trace_event);
2440
2441 /* Expects to have event_mutex held when called */
2442 static int
2443 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2444 {
2445         struct dentry *d_events;
2446         struct dentry *entry;
2447
2448         entry = tracefs_create_file("set_event", 0644, parent,
2449                                     tr, &ftrace_set_event_fops);
2450         if (!entry) {
2451                 pr_warn("Could not create tracefs 'set_event' entry\n");
2452                 return -ENOMEM;
2453         }
2454
2455         d_events = tracefs_create_dir("events", parent);
2456         if (!d_events) {
2457                 pr_warn("Could not create tracefs 'events' directory\n");
2458                 return -ENOMEM;
2459         }
2460
2461         /* ring buffer internal formats */
2462         trace_create_file("header_page", 0444, d_events,
2463                           ring_buffer_print_page_header,
2464                           &ftrace_show_header_fops);
2465
2466         trace_create_file("header_event", 0444, d_events,
2467                           ring_buffer_print_entry_header,
2468                           &ftrace_show_header_fops);
2469
2470         trace_create_file("enable", 0644, d_events,
2471                           tr, &ftrace_tr_enable_fops);
2472
2473         tr->event_dir = d_events;
2474
2475         return 0;
2476 }
2477
2478 /**
2479  * event_trace_add_tracer - add a instance of a trace_array to events
2480  * @parent: The parent dentry to place the files/directories for events in
2481  * @tr: The trace array associated with these events
2482  *
2483  * When a new instance is created, it needs to set up its events
2484  * directory, as well as other files associated with events. It also
2485  * creates the event hierachry in the @parent/events directory.
2486  *
2487  * Returns 0 on success.
2488  */
2489 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2490 {
2491         int ret;
2492
2493         mutex_lock(&event_mutex);
2494
2495         ret = create_event_toplevel_files(parent, tr);
2496         if (ret)
2497                 goto out_unlock;
2498
2499         down_write(&trace_event_sem);
2500         __trace_add_event_dirs(tr);
2501         up_write(&trace_event_sem);
2502
2503  out_unlock:
2504         mutex_unlock(&event_mutex);
2505
2506         return ret;
2507 }
2508
2509 /*
2510  * The top trace array already had its file descriptors created.
2511  * Now the files themselves need to be created.
2512  */
2513 static __init int
2514 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2515 {
2516         int ret;
2517
2518         mutex_lock(&event_mutex);
2519
2520         ret = create_event_toplevel_files(parent, tr);
2521         if (ret)
2522                 goto out_unlock;
2523
2524         down_write(&trace_event_sem);
2525         __trace_early_add_event_dirs(tr);
2526         up_write(&trace_event_sem);
2527
2528  out_unlock:
2529         mutex_unlock(&event_mutex);
2530
2531         return ret;
2532 }
2533
2534 int event_trace_del_tracer(struct trace_array *tr)
2535 {
2536         mutex_lock(&event_mutex);
2537
2538         /* Disable any event triggers and associated soft-disabled events */
2539         clear_event_triggers(tr);
2540
2541         /* Disable any running events */
2542         __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2543
2544         /* Access to events are within rcu_read_lock_sched() */
2545         synchronize_sched();
2546
2547         down_write(&trace_event_sem);
2548         __trace_remove_event_dirs(tr);
2549         tracefs_remove_recursive(tr->event_dir);
2550         up_write(&trace_event_sem);
2551
2552         tr->event_dir = NULL;
2553
2554         mutex_unlock(&event_mutex);
2555
2556         return 0;
2557 }
2558
2559 static __init int event_trace_memsetup(void)
2560 {
2561         field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2562         file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2563         return 0;
2564 }
2565
2566 static __init void
2567 early_enable_events(struct trace_array *tr, bool disable_first)
2568 {
2569         char *buf = bootup_event_buf;
2570         char *token;
2571         int ret;
2572
2573         while (true) {
2574                 token = strsep(&buf, ",");
2575
2576                 if (!token)
2577                         break;
2578                 if (!*token)
2579                         continue;
2580
2581                 /* Restarting syscalls requires that we stop them first */
2582                 if (disable_first)
2583                         ftrace_set_clr_event(tr, token, 0);
2584
2585                 ret = ftrace_set_clr_event(tr, token, 1);
2586                 if (ret)
2587                         pr_warn("Failed to enable trace event: %s\n", token);
2588
2589                 /* Put back the comma to allow this to be called again */
2590                 if (buf)
2591                         *(buf - 1) = ',';
2592         }
2593 }
2594
2595 static __init int event_trace_enable(void)
2596 {
2597         struct trace_array *tr = top_trace_array();
2598         struct ftrace_event_call **iter, *call;
2599         int ret;
2600
2601         if (!tr)
2602                 return -ENODEV;
2603
2604         for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2605
2606                 call = *iter;
2607                 ret = event_init(call);
2608                 if (!ret)
2609                         list_add(&call->list, &ftrace_events);
2610         }
2611
2612         /*
2613          * We need the top trace array to have a working set of trace
2614          * points at early init, before the debug files and directories
2615          * are created. Create the file entries now, and attach them
2616          * to the actual file dentries later.
2617          */
2618         __trace_early_add_events(tr);
2619
2620         early_enable_events(tr, false);
2621
2622         trace_printk_start_comm();
2623
2624         register_event_cmds();
2625
2626         register_trigger_cmds();
2627
2628         return 0;
2629 }
2630
2631 /*
2632  * event_trace_enable() is called from trace_event_init() first to
2633  * initialize events and perhaps start any events that are on the
2634  * command line. Unfortunately, there are some events that will not
2635  * start this early, like the system call tracepoints that need
2636  * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
2637  * is called before pid 1 starts, and this flag is never set, making
2638  * the syscall tracepoint never get reached, but the event is enabled
2639  * regardless (and not doing anything).
2640  */
2641 static __init int event_trace_enable_again(void)
2642 {
2643         struct trace_array *tr;
2644
2645         tr = top_trace_array();
2646         if (!tr)
2647                 return -ENODEV;
2648
2649         early_enable_events(tr, true);
2650
2651         return 0;
2652 }
2653
2654 early_initcall(event_trace_enable_again);
2655
2656 static __init int event_trace_init(void)
2657 {
2658         struct trace_array *tr;
2659         struct dentry *d_tracer;
2660         struct dentry *entry;
2661         int ret;
2662
2663         tr = top_trace_array();
2664         if (!tr)
2665                 return -ENODEV;
2666
2667         d_tracer = tracing_init_dentry();
2668         if (IS_ERR(d_tracer))
2669                 return 0;
2670
2671         entry = tracefs_create_file("available_events", 0444, d_tracer,
2672                                     tr, &ftrace_avail_fops);
2673         if (!entry)
2674                 pr_warn("Could not create tracefs 'available_events' entry\n");
2675
2676         if (trace_define_common_fields())
2677                 pr_warn("tracing: Failed to allocate common fields");
2678
2679         ret = early_event_add_tracer(d_tracer, tr);
2680         if (ret)
2681                 return ret;
2682
2683 #ifdef CONFIG_MODULES
2684         ret = register_module_notifier(&trace_module_nb);
2685         if (ret)
2686                 pr_warn("Failed to register trace events module notifier\n");
2687 #endif
2688         return 0;
2689 }
2690
2691 void __init trace_event_init(void)
2692 {
2693         event_trace_memsetup();
2694         init_ftrace_syscalls();
2695         event_trace_enable();
2696 }
2697
2698 fs_initcall(event_trace_init);
2699
2700 #ifdef CONFIG_FTRACE_STARTUP_TEST
2701
2702 static DEFINE_SPINLOCK(test_spinlock);
2703 static DEFINE_SPINLOCK(test_spinlock_irq);
2704 static DEFINE_MUTEX(test_mutex);
2705
2706 static __init void test_work(struct work_struct *dummy)
2707 {
2708         spin_lock(&test_spinlock);
2709         spin_lock_irq(&test_spinlock_irq);
2710         udelay(1);
2711         spin_unlock_irq(&test_spinlock_irq);
2712         spin_unlock(&test_spinlock);
2713
2714         mutex_lock(&test_mutex);
2715         msleep(1);
2716         mutex_unlock(&test_mutex);
2717 }
2718
2719 static __init int event_test_thread(void *unused)
2720 {
2721         void *test_malloc;
2722
2723         test_malloc = kmalloc(1234, GFP_KERNEL);
2724         if (!test_malloc)
2725                 pr_info("failed to kmalloc\n");
2726
2727         schedule_on_each_cpu(test_work);
2728
2729         kfree(test_malloc);
2730
2731         set_current_state(TASK_INTERRUPTIBLE);
2732         while (!kthread_should_stop()) {
2733                 schedule();
2734                 set_current_state(TASK_INTERRUPTIBLE);
2735         }
2736         __set_current_state(TASK_RUNNING);
2737
2738         return 0;
2739 }
2740
2741 /*
2742  * Do various things that may trigger events.
2743  */
2744 static __init void event_test_stuff(void)
2745 {
2746         struct task_struct *test_thread;
2747
2748         test_thread = kthread_run(event_test_thread, NULL, "test-events");
2749         msleep(1);
2750         kthread_stop(test_thread);
2751 }
2752
2753 /*
2754  * For every trace event defined, we will test each trace point separately,
2755  * and then by groups, and finally all trace points.
2756  */
2757 static __init void event_trace_self_tests(void)
2758 {
2759         struct ftrace_subsystem_dir *dir;
2760         struct ftrace_event_file *file;
2761         struct ftrace_event_call *call;
2762         struct event_subsystem *system;
2763         struct trace_array *tr;
2764         int ret;
2765
2766         tr = top_trace_array();
2767         if (!tr)
2768                 return;
2769
2770         pr_info("Running tests on trace events:\n");
2771
2772         list_for_each_entry(file, &tr->events, list) {
2773
2774                 call = file->event_call;
2775
2776                 /* Only test those that have a probe */
2777                 if (!call->class || !call->class->probe)
2778                         continue;
2779
2780 /*
2781  * Testing syscall events here is pretty useless, but
2782  * we still do it if configured. But this is time consuming.
2783  * What we really need is a user thread to perform the
2784  * syscalls as we test.
2785  */
2786 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2787                 if (call->class->system &&
2788                     strcmp(call->class->system, "syscalls") == 0)
2789                         continue;
2790 #endif
2791
2792                 pr_info("Testing event %s: ", ftrace_event_name(call));
2793
2794                 /*
2795                  * If an event is already enabled, someone is using
2796                  * it and the self test should not be on.
2797                  */
2798                 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2799                         pr_warn("Enabled event during self test!\n");
2800                         WARN_ON_ONCE(1);
2801                         continue;
2802                 }
2803
2804                 ftrace_event_enable_disable(file, 1);
2805                 event_test_stuff();
2806                 ftrace_event_enable_disable(file, 0);
2807
2808                 pr_cont("OK\n");
2809         }
2810
2811         /* Now test at the sub system level */
2812
2813         pr_info("Running tests on trace event systems:\n");
2814
2815         list_for_each_entry(dir, &tr->systems, list) {
2816
2817                 system = dir->subsystem;
2818
2819                 /* the ftrace system is special, skip it */
2820                 if (strcmp(system->name, "ftrace") == 0)
2821                         continue;
2822
2823                 pr_info("Testing event system %s: ", system->name);
2824
2825                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2826                 if (WARN_ON_ONCE(ret)) {
2827                         pr_warn("error enabling system %s\n",
2828                                 system->name);
2829                         continue;
2830                 }
2831
2832                 event_test_stuff();
2833
2834                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2835                 if (WARN_ON_ONCE(ret)) {
2836                         pr_warn("error disabling system %s\n",
2837                                 system->name);
2838                         continue;
2839                 }
2840
2841                 pr_cont("OK\n");
2842         }
2843
2844         /* Test with all events enabled */
2845
2846         pr_info("Running tests on all trace events:\n");
2847         pr_info("Testing all events: ");
2848
2849         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2850         if (WARN_ON_ONCE(ret)) {
2851                 pr_warn("error enabling all events\n");
2852                 return;
2853         }
2854
2855         event_test_stuff();
2856
2857         /* reset sysname */
2858         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2859         if (WARN_ON_ONCE(ret)) {
2860                 pr_warn("error disabling all events\n");
2861                 return;
2862         }
2863
2864         pr_cont("OK\n");
2865 }
2866
2867 #ifdef CONFIG_FUNCTION_TRACER
2868
2869 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2870
2871 static void
2872 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2873                           struct ftrace_ops *op, struct pt_regs *pt_regs)
2874 {
2875         struct ring_buffer_event *event;
2876         struct ring_buffer *buffer;
2877         struct ftrace_entry *entry;
2878         unsigned long flags;
2879         long disabled;
2880         int cpu;
2881         int pc;
2882
2883         pc = preempt_count();
2884         preempt_disable_notrace();
2885         cpu = raw_smp_processor_id();
2886         disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2887
2888         if (disabled != 1)
2889                 goto out;
2890
2891         local_save_flags(flags);
2892
2893         event = trace_current_buffer_lock_reserve(&buffer,
2894                                                   TRACE_FN, sizeof(*entry),
2895                                                   flags, pc);
2896         if (!event)
2897                 goto out;
2898         entry   = ring_buffer_event_data(event);
2899         entry->ip                       = ip;
2900         entry->parent_ip                = parent_ip;
2901
2902         trace_buffer_unlock_commit(buffer, event, flags, pc);
2903
2904  out:
2905         atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2906         preempt_enable_notrace();
2907 }
2908
2909 static struct ftrace_ops trace_ops __initdata  =
2910 {
2911         .func = function_test_events_call,
2912         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2913 };
2914
2915 static __init void event_trace_self_test_with_function(void)
2916 {
2917         int ret;
2918         ret = register_ftrace_function(&trace_ops);
2919         if (WARN_ON(ret < 0)) {
2920                 pr_info("Failed to enable function tracer for event tests\n");
2921                 return;
2922         }
2923         pr_info("Running tests again, along with the function tracer\n");
2924         event_trace_self_tests();
2925         unregister_ftrace_function(&trace_ops);
2926 }
2927 #else
2928 static __init void event_trace_self_test_with_function(void)
2929 {
2930 }
2931 #endif
2932
2933 static __init int event_trace_self_tests_init(void)
2934 {
2935         if (!tracing_selftest_disabled) {
2936                 event_trace_self_tests();
2937                 event_trace_self_test_with_function();
2938         }
2939
2940         return 0;
2941 }
2942
2943 late_initcall(event_trace_self_tests_init);
2944
2945 #endif