These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / trace / trace_events.c
index 711529b..0e508e9 100644 (file)
 #include <linux/kthread.h>
 #include <linux/tracefs.h>
 #include <linux/uaccess.h>
+#include <linux/bsearch.h>
 #include <linux/module.h>
 #include <linux/ctype.h>
+#include <linux/sort.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 
+#include <trace/events/sched.h>
+
 #include <asm/setup.h>
 
 #include "trace_output.h"
@@ -30,6 +34,7 @@
 DEFINE_MUTEX(event_mutex);
 
 LIST_HEAD(ftrace_events);
+static LIST_HEAD(ftrace_generic_fields);
 static LIST_HEAD(ftrace_common_fields);
 
 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
@@ -37,21 +42,19 @@ static LIST_HEAD(ftrace_common_fields);
 static struct kmem_cache *field_cachep;
 static struct kmem_cache *file_cachep;
 
-#define SYSTEM_FL_FREE_NAME            (1 << 31)
-
 static inline int system_refcount(struct event_subsystem *system)
 {
-       return system->ref_count & ~SYSTEM_FL_FREE_NAME;
+       return system->ref_count;
 }
 
 static int system_refcount_inc(struct event_subsystem *system)
 {
-       return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
+       return system->ref_count++;
 }
 
 static int system_refcount_dec(struct event_subsystem *system)
 {
-       return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
+       return --system->ref_count;
 }
 
 /* Double loops, do not use break, only goto's work */
@@ -61,14 +64,14 @@ static int system_refcount_dec(struct event_subsystem *system)
 
 #define do_for_each_event_file_safe(tr, file)                  \
        list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
-               struct ftrace_event_file *___n;                         \
+               struct trace_event_file *___n;                          \
                list_for_each_entry_safe(file, ___n, &tr->events, list)
 
 #define while_for_each_event_file()            \
        }
 
 static struct list_head *
-trace_get_fields(struct ftrace_event_call *event_call)
+trace_get_fields(struct trace_event_call *event_call)
 {
        if (!event_call->class->get_fields)
                return &event_call->class->fields;
@@ -89,17 +92,21 @@ __find_event_field(struct list_head *head, char *name)
 }
 
 struct ftrace_event_field *
-trace_find_event_field(struct ftrace_event_call *call, char *name)
+trace_find_event_field(struct trace_event_call *call, char *name)
 {
        struct ftrace_event_field *field;
        struct list_head *head;
 
-       field = __find_event_field(&ftrace_common_fields, name);
+       head = trace_get_fields(call);
+       field = __find_event_field(head, name);
        if (field)
                return field;
 
-       head = trace_get_fields(call);
-       return __find_event_field(head, name);
+       field = __find_event_field(&ftrace_generic_fields, name);
+       if (field)
+               return field;
+
+       return __find_event_field(&ftrace_common_fields, name);
 }
 
 static int __trace_define_field(struct list_head *head, const char *type,
@@ -129,7 +136,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
        return 0;
 }
 
-int trace_define_field(struct ftrace_event_call *call, const char *type,
+int trace_define_field(struct trace_event_call *call, const char *type,
                       const char *name, int offset, int size, int is_signed,
                       int filter_type)
 {
@@ -144,6 +151,13 @@ int trace_define_field(struct ftrace_event_call *call, const char *type,
 }
 EXPORT_SYMBOL_GPL(trace_define_field);
 
+#define __generic_field(type, item, filter_type)                       \
+       ret = __trace_define_field(&ftrace_generic_fields, #type,       \
+                                  #item, 0, 0, is_signed_type(type),   \
+                                  filter_type);                        \
+       if (ret)                                                        \
+               return ret;
+
 #define __common_field(type, item)                                     \
        ret = __trace_define_field(&ftrace_common_fields, #type,        \
                                   "common_" #item,                     \
@@ -153,6 +167,18 @@ EXPORT_SYMBOL_GPL(trace_define_field);
        if (ret)                                                        \
                return ret;
 
+static int trace_define_generic_fields(void)
+{
+       int ret;
+
+       __generic_field(int, CPU, FILTER_CPU);
+       __generic_field(int, cpu, FILTER_CPU);
+       __generic_field(char *, COMM, FILTER_COMM);
+       __generic_field(char *, comm, FILTER_COMM);
+
+       return ret;
+}
+
 static int trace_define_common_fields(void)
 {
        int ret;
@@ -168,7 +194,7 @@ static int trace_define_common_fields(void)
        return ret;
 }
 
-static void trace_destroy_fields(struct ftrace_event_call *call)
+static void trace_destroy_fields(struct trace_event_call *call)
 {
        struct ftrace_event_field *field, *next;
        struct list_head *head;
@@ -180,11 +206,11 @@ static void trace_destroy_fields(struct ftrace_event_call *call)
        }
 }
 
-int trace_event_raw_init(struct ftrace_event_call *call)
+int trace_event_raw_init(struct trace_event_call *call)
 {
        int id;
 
-       id = register_ftrace_event(&call->event);
+       id = register_trace_event(&call->event);
        if (!id)
                return -ENODEV;
 
@@ -192,18 +218,38 @@ int trace_event_raw_init(struct ftrace_event_call *call)
 }
 EXPORT_SYMBOL_GPL(trace_event_raw_init);
 
-void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
-                                 struct ftrace_event_file *ftrace_file,
-                                 unsigned long len)
+bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
+{
+       struct trace_array *tr = trace_file->tr;
+       struct trace_array_cpu *data;
+       struct trace_pid_list *pid_list;
+
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       if (!pid_list)
+               return false;
+
+       data = this_cpu_ptr(tr->trace_buffer.data);
+
+       return data->ignore_pid;
+}
+EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
+
+void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
+                                struct trace_event_file *trace_file,
+                                unsigned long len)
 {
-       struct ftrace_event_call *event_call = ftrace_file->event_call;
+       struct trace_event_call *event_call = trace_file->event_call;
+
+       if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
+           trace_event_ignore_this_pid(trace_file))
+               return NULL;
 
        local_save_flags(fbuffer->flags);
        fbuffer->pc = preempt_count();
-       fbuffer->ftrace_file = ftrace_file;
+       fbuffer->trace_file = trace_file;
 
        fbuffer->event =
-               trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
+               trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
                                                event_call->event.type, len,
                                                fbuffer->flags, fbuffer->pc);
        if (!fbuffer->event)
@@ -212,13 +258,13 @@ void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
        fbuffer->entry = ring_buffer_event_data(fbuffer->event);
        return fbuffer->entry;
 }
-EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
+EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
 
 static DEFINE_SPINLOCK(tracepoint_iter_lock);
 
-static void output_printk(struct ftrace_event_buffer *fbuffer)
+static void output_printk(struct trace_event_buffer *fbuffer)
 {
-       struct ftrace_event_call *event_call;
+       struct trace_event_call *event_call;
        struct trace_event *event;
        unsigned long flags;
        struct trace_iterator *iter = tracepoint_print_iter;
@@ -226,12 +272,12 @@ static void output_printk(struct ftrace_event_buffer *fbuffer)
        if (!iter)
                return;
 
-       event_call = fbuffer->ftrace_file->event_call;
+       event_call = fbuffer->trace_file->event_call;
        if (!event_call || !event_call->event.funcs ||
            !event_call->event.funcs->trace)
                return;
 
-       event = &fbuffer->ftrace_file->event_call->event;
+       event = &fbuffer->trace_file->event_call->event;
 
        spin_lock_irqsave(&tracepoint_iter_lock, flags);
        trace_seq_init(&iter->seq);
@@ -243,21 +289,21 @@ static void output_printk(struct ftrace_event_buffer *fbuffer)
        spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
 }
 
-void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
 {
        if (tracepoint_printk)
                output_printk(fbuffer);
 
-       event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
+       event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
                                    fbuffer->event, fbuffer->entry,
                                    fbuffer->flags, fbuffer->pc);
 }
-EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
+EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 
-int ftrace_event_reg(struct ftrace_event_call *call,
-                    enum trace_reg type, void *data)
+int trace_event_reg(struct trace_event_call *call,
+                   enum trace_reg type, void *data)
 {
-       struct ftrace_event_file *file = data;
+       struct trace_event_file *file = data;
 
        WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
        switch (type) {
@@ -290,34 +336,35 @@ int ftrace_event_reg(struct ftrace_event_call *call,
        }
        return 0;
 }
-EXPORT_SYMBOL_GPL(ftrace_event_reg);
+EXPORT_SYMBOL_GPL(trace_event_reg);
 
 void trace_event_enable_cmd_record(bool enable)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
        struct trace_array *tr;
 
        mutex_lock(&event_mutex);
        do_for_each_event_file(tr, file) {
 
-               if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
+               if (!(file->flags & EVENT_FILE_FL_ENABLED))
                        continue;
 
                if (enable) {
                        tracing_start_cmdline_record();
-                       set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
+                       set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
                } else {
                        tracing_stop_cmdline_record();
-                       clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
+                       clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
                }
        } while_for_each_event_file();
        mutex_unlock(&event_mutex);
 }
 
-static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
+static int __ftrace_event_enable_disable(struct trace_event_file *file,
                                         int enable, int soft_disable)
 {
-       struct ftrace_event_call *call = file->event_call;
+       struct trace_event_call *call = file->event_call;
+       struct trace_array *tr = file->tr;
        int ret = 0;
        int disable;
 
@@ -339,24 +386,24 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
                if (soft_disable) {
                        if (atomic_dec_return(&file->sm_ref) > 0)
                                break;
-                       disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
-                       clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
+                       disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
+                       clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
                } else
-                       disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
+                       disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
 
-               if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
-                       clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
-                       if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
+               if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
+                       clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
+                       if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
                                tracing_stop_cmdline_record();
-                               clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
+                               clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
                        }
                        call->class->reg(call, TRACE_REG_UNREGISTER, file);
                }
                /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
-               if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
-                       set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
+               if (file->flags & EVENT_FILE_FL_SOFT_MODE)
+                       set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
                else
-                       clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
+                       clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
                break;
        case 1:
                /*
@@ -368,31 +415,31 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
                 * it still seems to be disabled.
                 */
                if (!soft_disable)
-                       clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
+                       clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
                else {
                        if (atomic_inc_return(&file->sm_ref) > 1)
                                break;
-                       set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
+                       set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
                }
 
-               if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
+               if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
 
                        /* Keep the event disabled, when going to SOFT_MODE. */
                        if (soft_disable)
-                               set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
+                               set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
 
-                       if (trace_flags & TRACE_ITER_RECORD_CMD) {
+                       if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
                                tracing_start_cmdline_record();
-                               set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
+                               set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
                        }
                        ret = call->class->reg(call, TRACE_REG_REGISTER, file);
                        if (ret) {
                                tracing_stop_cmdline_record();
                                pr_info("event trace: Could not enable event "
-                                       "%s\n", ftrace_event_name(call));
+                                       "%s\n", trace_event_name(call));
                                break;
                        }
-                       set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
+                       set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
 
                        /* WAS_ENABLED gets set but never cleared. */
                        call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
@@ -403,13 +450,13 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
        return ret;
 }
 
-int trace_event_enable_disable(struct ftrace_event_file *file,
+int trace_event_enable_disable(struct trace_event_file *file,
                               int enable, int soft_disable)
 {
        return __ftrace_event_enable_disable(file, enable, soft_disable);
 }
 
-static int ftrace_event_enable_disable(struct ftrace_event_file *file,
+static int ftrace_event_enable_disable(struct trace_event_file *file,
                                       int enable)
 {
        return __ftrace_event_enable_disable(file, enable, 0);
@@ -417,7 +464,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_file *file,
 
 static void ftrace_clear_events(struct trace_array *tr)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
 
        mutex_lock(&event_mutex);
        list_for_each_entry(file, &tr->events, list) {
@@ -426,6 +473,148 @@ static void ftrace_clear_events(struct trace_array *tr)
        mutex_unlock(&event_mutex);
 }
 
+static int cmp_pid(const void *key, const void *elt)
+{
+       const pid_t *search_pid = key;
+       const pid_t *pid = elt;
+
+       if (*search_pid == *pid)
+               return 0;
+       if (*search_pid < *pid)
+               return -1;
+       return 1;
+}
+
+static bool
+check_ignore_pid(struct trace_pid_list *filtered_pids, struct task_struct *task)
+{
+       pid_t search_pid;
+       pid_t *pid;
+
+       /*
+        * Return false, because if filtered_pids does not exist,
+        * all pids are good to trace.
+        */
+       if (!filtered_pids)
+               return false;
+
+       search_pid = task->pid;
+
+       pid = bsearch(&search_pid, filtered_pids->pids,
+                     filtered_pids->nr_pids, sizeof(pid_t),
+                     cmp_pid);
+       if (!pid)
+               return true;
+
+       return false;
+}
+
+static void
+event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
+                   struct task_struct *prev, struct task_struct *next)
+{
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, prev) &&
+                      check_ignore_pid(pid_list, next));
+}
+
+static void
+event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
+                   struct task_struct *prev, struct task_struct *next)
+{
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, next));
+}
+
+static void
+event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
+{
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+
+       /* Nothing to do if we are already tracing */
+       if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
+               return;
+
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, task));
+}
+
+static void
+event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
+{
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+
+       /* Nothing to do if we are not tracing */
+       if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
+               return;
+
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+
+       /* Set tracing if current is enabled */
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, current));
+}
+
+static void __ftrace_clear_event_pids(struct trace_array *tr)
+{
+       struct trace_pid_list *pid_list;
+       struct trace_event_file *file;
+       int cpu;
+
+       pid_list = rcu_dereference_protected(tr->filtered_pids,
+                                            lockdep_is_held(&event_mutex));
+       if (!pid_list)
+               return;
+
+       unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
+       unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
+
+       unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
+       unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
+
+       unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
+       unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
+
+       unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
+       unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
+
+       list_for_each_entry(file, &tr->events, list) {
+               clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
+       }
+
+       for_each_possible_cpu(cpu)
+               per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
+
+       rcu_assign_pointer(tr->filtered_pids, NULL);
+
+       /* Wait till all users are no longer using pid filtering */
+       synchronize_sched();
+
+       free_pages((unsigned long)pid_list->pids, pid_list->order);
+       kfree(pid_list);
+}
+
+static void ftrace_clear_event_pids(struct trace_array *tr)
+{
+       mutex_lock(&event_mutex);
+       __ftrace_clear_event_pids(tr);
+       mutex_unlock(&event_mutex);
+}
+
 static void __put_system(struct event_subsystem *system)
 {
        struct event_filter *filter = system->filter;
@@ -440,8 +629,7 @@ static void __put_system(struct event_subsystem *system)
                kfree(filter->filter_string);
                kfree(filter);
        }
-       if (system->ref_count & SYSTEM_FL_FREE_NAME)
-               kfree(system->name);
+       kfree_const(system->name);
        kfree(system);
 }
 
@@ -451,14 +639,14 @@ static void __get_system(struct event_subsystem *system)
        system_refcount_inc(system);
 }
 
-static void __get_system_dir(struct ftrace_subsystem_dir *dir)
+static void __get_system_dir(struct trace_subsystem_dir *dir)
 {
        WARN_ON_ONCE(dir->ref_count == 0);
        dir->ref_count++;
        __get_system(dir->subsystem);
 }
 
-static void __put_system_dir(struct ftrace_subsystem_dir *dir)
+static void __put_system_dir(struct trace_subsystem_dir *dir)
 {
        WARN_ON_ONCE(dir->ref_count == 0);
        /* If the subsystem is about to be freed, the dir must be too */
@@ -469,14 +657,14 @@ static void __put_system_dir(struct ftrace_subsystem_dir *dir)
                kfree(dir);
 }
 
-static void put_system(struct ftrace_subsystem_dir *dir)
+static void put_system(struct trace_subsystem_dir *dir)
 {
        mutex_lock(&event_mutex);
        __put_system_dir(dir);
        mutex_unlock(&event_mutex);
 }
 
-static void remove_subsystem(struct ftrace_subsystem_dir *dir)
+static void remove_subsystem(struct trace_subsystem_dir *dir)
 {
        if (!dir)
                return;
@@ -488,7 +676,7 @@ static void remove_subsystem(struct ftrace_subsystem_dir *dir)
        }
 }
 
-static void remove_event_file_dir(struct ftrace_event_file *file)
+static void remove_event_file_dir(struct trace_event_file *file)
 {
        struct dentry *dir = file->dir;
        struct dentry *child;
@@ -517,15 +705,15 @@ static int
 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
                              const char *sub, const char *event, int set)
 {
-       struct ftrace_event_file *file;
-       struct ftrace_event_call *call;
+       struct trace_event_file *file;
+       struct trace_event_call *call;
        const char *name;
        int ret = -EINVAL;
 
        list_for_each_entry(file, &tr->events, list) {
 
                call = file->event_call;
-               name = ftrace_event_name(call);
+               name = trace_event_name(call);
 
                if (!name || !call->class || !call->class->reg)
                        continue;
@@ -673,8 +861,8 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
 static void *
 t_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       struct ftrace_event_file *file = v;
-       struct ftrace_event_call *call;
+       struct trace_event_file *file = v;
+       struct trace_event_call *call;
        struct trace_array *tr = m->private;
 
        (*pos)++;
@@ -685,7 +873,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                 * The ftrace subsystem is for showing formats only.
                 * They can not be enabled or disabled via the event files.
                 */
-               if (call->class && call->class->reg)
+               if (call->class && call->class->reg &&
+                   !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
                        return file;
        }
 
@@ -694,13 +883,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
 
 static void *t_start(struct seq_file *m, loff_t *pos)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
        struct trace_array *tr = m->private;
        loff_t l;
 
        mutex_lock(&event_mutex);
 
-       file = list_entry(&tr->events, struct ftrace_event_file, list);
+       file = list_entry(&tr->events, struct trace_event_file, list);
        for (l = 0; l <= *pos; ) {
                file = t_next(m, file, &l);
                if (!file)
@@ -712,13 +901,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)
 static void *
 s_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       struct ftrace_event_file *file = v;
+       struct trace_event_file *file = v;
        struct trace_array *tr = m->private;
 
        (*pos)++;
 
        list_for_each_entry_continue(file, &tr->events, list) {
-               if (file->flags & FTRACE_EVENT_FL_ENABLED)
+               if (file->flags & EVENT_FILE_FL_ENABLED)
                        return file;
        }
 
@@ -727,13 +916,13 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
 
 static void *s_start(struct seq_file *m, loff_t *pos)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
        struct trace_array *tr = m->private;
        loff_t l;
 
        mutex_lock(&event_mutex);
 
-       file = list_entry(&tr->events, struct ftrace_event_file, list);
+       file = list_entry(&tr->events, struct trace_event_file, list);
        for (l = 0; l <= *pos; ) {
                file = s_next(m, file, &l);
                if (!file)
@@ -744,12 +933,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
 
 static int t_show(struct seq_file *m, void *v)
 {
-       struct ftrace_event_file *file = v;
-       struct ftrace_event_call *call = file->event_call;
+       struct trace_event_file *file = v;
+       struct trace_event_call *call = file->event_call;
 
        if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
                seq_printf(m, "%s:", call->class->system);
-       seq_printf(m, "%s\n", ftrace_event_name(call));
+       seq_printf(m, "%s\n", trace_event_name(call));
 
        return 0;
 }
@@ -759,11 +948,63 @@ static void t_stop(struct seq_file *m, void *p)
        mutex_unlock(&event_mutex);
 }
 
+static void *p_start(struct seq_file *m, loff_t *pos)
+       __acquires(RCU)
+{
+       struct trace_pid_list *pid_list;
+       struct trace_array *tr = m->private;
+
+       /*
+        * Grab the mutex, to keep calls to p_next() having the same
+        * tr->filtered_pids as p_start() has.
+        * If we just passed the tr->filtered_pids around, then RCU would
+        * have been enough, but doing that makes things more complex.
+        */
+       mutex_lock(&event_mutex);
+       rcu_read_lock_sched();
+
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+
+       if (!pid_list || *pos >= pid_list->nr_pids)
+               return NULL;
+
+       return (void *)&pid_list->pids[*pos];
+}
+
+static void p_stop(struct seq_file *m, void *p)
+       __releases(RCU)
+{
+       rcu_read_unlock_sched();
+       mutex_unlock(&event_mutex);
+}
+
+static void *
+p_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct trace_array *tr = m->private;
+       struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
+
+       (*pos)++;
+
+       if (*pos >= pid_list->nr_pids)
+               return NULL;
+
+       return (void *)&pid_list->pids[*pos];
+}
+
+static int p_show(struct seq_file *m, void *v)
+{
+       pid_t *pid = v;
+
+       seq_printf(m, "%d\n", *pid);
+       return 0;
+}
+
 static ssize_t
 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
                  loff_t *ppos)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
        unsigned long flags;
        char buf[4] = "0";
 
@@ -776,12 +1017,12 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
        if (!file)
                return -ENODEV;
 
-       if (flags & FTRACE_EVENT_FL_ENABLED &&
-           !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
+       if (flags & EVENT_FILE_FL_ENABLED &&
+           !(flags & EVENT_FILE_FL_SOFT_DISABLED))
                strcpy(buf, "1");
 
-       if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
-           flags & FTRACE_EVENT_FL_SOFT_MODE)
+       if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
+           flags & EVENT_FILE_FL_SOFT_MODE)
                strcat(buf, "*");
 
        strcat(buf, "\n");
@@ -793,7 +1034,7 @@ static ssize_t
 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
                   loff_t *ppos)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
        unsigned long val;
        int ret;
 
@@ -830,10 +1071,10 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
                   loff_t *ppos)
 {
        const char set_to_char[4] = { '?', '0', '1', 'X' };
-       struct ftrace_subsystem_dir *dir = filp->private_data;
+       struct trace_subsystem_dir *dir = filp->private_data;
        struct event_subsystem *system = dir->subsystem;
-       struct ftrace_event_call *call;
-       struct ftrace_event_file *file;
+       struct trace_event_call *call;
+       struct trace_event_file *file;
        struct trace_array *tr = dir->tr;
        char buf[2];
        int set = 0;
@@ -842,7 +1083,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
        mutex_lock(&event_mutex);
        list_for_each_entry(file, &tr->events, list) {
                call = file->event_call;
-               if (!ftrace_event_name(call) || !call->class || !call->class->reg)
+               if (!trace_event_name(call) || !call->class || !call->class->reg)
                        continue;
 
                if (system && strcmp(call->class->system, system->name) != 0)
@@ -853,7 +1094,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
                 * or if all events or cleared, or if we have
                 * a mixture.
                 */
-               set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
+               set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
 
                /*
                 * If we have a mixture, no need to look further.
@@ -875,7 +1116,7 @@ static ssize_t
 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
                    loff_t *ppos)
 {
-       struct ftrace_subsystem_dir *dir = filp->private_data;
+       struct trace_subsystem_dir *dir = filp->private_data;
        struct event_subsystem *system = dir->subsystem;
        const char *name = NULL;
        unsigned long val;
@@ -919,7 +1160,7 @@ enum {
 
 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       struct ftrace_event_call *call = event_file_data(m->private);
+       struct trace_event_call *call = event_file_data(m->private);
        struct list_head *common_head = &ftrace_common_fields;
        struct list_head *head = trace_get_fields(call);
        struct list_head *node = v;
@@ -951,13 +1192,13 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
 
 static int f_show(struct seq_file *m, void *v)
 {
-       struct ftrace_event_call *call = event_file_data(m->private);
+       struct trace_event_call *call = event_file_data(m->private);
        struct ftrace_event_field *field;
        const char *array_descriptor;
 
        switch ((unsigned long)v) {
        case FORMAT_HEADER:
-               seq_printf(m, "name: %s\n", ftrace_event_name(call));
+               seq_printf(m, "name: %s\n", trace_event_name(call));
                seq_printf(m, "ID: %d\n", call->event.type);
                seq_puts(m, "format:\n");
                return 0;
@@ -1064,7 +1305,7 @@ static ssize_t
 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
                  loff_t *ppos)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
        struct trace_seq *s;
        int r = -ENODEV;
 
@@ -1097,7 +1338,7 @@ static ssize_t
 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
                   loff_t *ppos)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
        char *buf;
        int err = -ENODEV;
 
@@ -1134,7 +1375,7 @@ static LIST_HEAD(event_subsystems);
 static int subsystem_open(struct inode *inode, struct file *filp)
 {
        struct event_subsystem *system = NULL;
-       struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
+       struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
        struct trace_array *tr;
        int ret;
 
@@ -1183,7 +1424,7 @@ static int subsystem_open(struct inode *inode, struct file *filp)
 
 static int system_tr_open(struct inode *inode, struct file *filp)
 {
-       struct ftrace_subsystem_dir *dir;
+       struct trace_subsystem_dir *dir;
        struct trace_array *tr = inode->i_private;
        int ret;
 
@@ -1216,7 +1457,7 @@ static int system_tr_open(struct inode *inode, struct file *filp)
 
 static int subsystem_release(struct inode *inode, struct file *file)
 {
-       struct ftrace_subsystem_dir *dir = file->private_data;
+       struct trace_subsystem_dir *dir = file->private_data;
 
        trace_array_put(dir->tr);
 
@@ -1237,7 +1478,7 @@ static ssize_t
 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
                      loff_t *ppos)
 {
-       struct ftrace_subsystem_dir *dir = filp->private_data;
+       struct trace_subsystem_dir *dir = filp->private_data;
        struct event_subsystem *system = dir->subsystem;
        struct trace_seq *s;
        int r;
@@ -1264,7 +1505,7 @@ static ssize_t
 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
                       loff_t *ppos)
 {
-       struct ftrace_subsystem_dir *dir = filp->private_data;
+       struct trace_subsystem_dir *dir = filp->private_data;
        char *buf;
        int err;
 
@@ -1316,8 +1557,219 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
        return r;
 }
 
+static int max_pids(struct trace_pid_list *pid_list)
+{
+       return (PAGE_SIZE << pid_list->order) / sizeof(pid_t);
+}
+
+static void ignore_task_cpu(void *data)
+{
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+
+       /*
+        * This function is called by on_each_cpu() while the
+        * event_mutex is held.
+        */
+       pid_list = rcu_dereference_protected(tr->filtered_pids,
+                                            mutex_is_locked(&event_mutex));
+
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, current));
+}
+
+static ssize_t
+ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
+                      size_t cnt, loff_t *ppos)
+{
+       struct seq_file *m = filp->private_data;
+       struct trace_array *tr = m->private;
+       struct trace_pid_list *filtered_pids = NULL;
+       struct trace_pid_list *pid_list = NULL;
+       struct trace_event_file *file;
+       struct trace_parser parser;
+       unsigned long val;
+       loff_t this_pos;
+       ssize_t read = 0;
+       ssize_t ret = 0;
+       pid_t pid;
+       int i;
+
+       if (!cnt)
+               return 0;
+
+       ret = tracing_update_buffers();
+       if (ret < 0)
+               return ret;
+
+       if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
+               return -ENOMEM;
+
+       mutex_lock(&event_mutex);
+       /*
+        * Load as many pids into the array before doing a
+        * swap from the tr->filtered_pids to the new list.
+        */
+       while (cnt > 0) {
+
+               this_pos = 0;
+
+               ret = trace_get_user(&parser, ubuf, cnt, &this_pos);
+               if (ret < 0 || !trace_parser_loaded(&parser))
+                       break;
+
+               read += ret;
+               ubuf += ret;
+               cnt -= ret;
+
+               parser.buffer[parser.idx] = 0;
+
+               ret = -EINVAL;
+               if (kstrtoul(parser.buffer, 0, &val))
+                       break;
+               if (val > INT_MAX)
+                       break;
+
+               pid = (pid_t)val;
+
+               ret = -ENOMEM;
+               if (!pid_list) {
+                       pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
+                       if (!pid_list)
+                               break;
+
+                       filtered_pids = rcu_dereference_protected(tr->filtered_pids,
+                                                       lockdep_is_held(&event_mutex));
+                       if (filtered_pids)
+                               pid_list->order = filtered_pids->order;
+                       else
+                               pid_list->order = 0;
+
+                       pid_list->pids = (void *)__get_free_pages(GFP_KERNEL,
+                                                                 pid_list->order);
+                       if (!pid_list->pids)
+                               break;
+
+                       if (filtered_pids) {
+                               pid_list->nr_pids = filtered_pids->nr_pids;
+                               memcpy(pid_list->pids, filtered_pids->pids,
+                                      pid_list->nr_pids * sizeof(pid_t));
+                       } else
+                               pid_list->nr_pids = 0;
+               }
+
+               if (pid_list->nr_pids >= max_pids(pid_list)) {
+                       pid_t *pid_page;
+
+                       pid_page = (void *)__get_free_pages(GFP_KERNEL,
+                                                           pid_list->order + 1);
+                       if (!pid_page)
+                               break;
+                       memcpy(pid_page, pid_list->pids,
+                              pid_list->nr_pids * sizeof(pid_t));
+                       free_pages((unsigned long)pid_list->pids, pid_list->order);
+
+                       pid_list->order++;
+                       pid_list->pids = pid_page;
+               }
+
+               pid_list->pids[pid_list->nr_pids++] = pid;
+               trace_parser_clear(&parser);
+               ret = 0;
+       }
+       trace_parser_put(&parser);
+
+       if (ret < 0) {
+               if (pid_list)
+                       free_pages((unsigned long)pid_list->pids, pid_list->order);
+               kfree(pid_list);
+               mutex_unlock(&event_mutex);
+               return ret;
+       }
+
+       if (!pid_list) {
+               mutex_unlock(&event_mutex);
+               return ret;
+       }
+
+       sort(pid_list->pids, pid_list->nr_pids, sizeof(pid_t), cmp_pid, NULL);
+
+       /* Remove duplicates */
+       for (i = 1; i < pid_list->nr_pids; i++) {
+               int start = i;
+
+               while (i < pid_list->nr_pids &&
+                      pid_list->pids[i - 1] == pid_list->pids[i])
+                       i++;
+
+               if (start != i) {
+                       if (i < pid_list->nr_pids) {
+                               memmove(&pid_list->pids[start], &pid_list->pids[i],
+                                       (pid_list->nr_pids - i) * sizeof(pid_t));
+                               pid_list->nr_pids -= i - start;
+                               i = start;
+                       } else
+                               pid_list->nr_pids = start;
+               }
+       }
+
+       rcu_assign_pointer(tr->filtered_pids, pid_list);
+
+       list_for_each_entry(file, &tr->events, list) {
+               set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
+       }
+
+       if (filtered_pids) {
+               synchronize_sched();
+
+               free_pages((unsigned long)filtered_pids->pids, filtered_pids->order);
+               kfree(filtered_pids);
+       } else {
+               /*
+                * Register a probe that is called before all other probes
+                * to set ignore_pid if next or prev do not match.
+                * Register a probe this is called after all other probes
+                * to only keep ignore_pid set if next pid matches.
+                */
+               register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
+                                                tr, INT_MAX);
+               register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
+                                                tr, 0);
+
+               register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
+                                                tr, INT_MAX);
+               register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
+                                                tr, 0);
+
+               register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
+                                                    tr, INT_MAX);
+               register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
+                                                    tr, 0);
+
+               register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
+                                                tr, INT_MAX);
+               register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
+                                                tr, 0);
+       }
+
+       /*
+        * Ignoring of pids is done at task switch. But we have to
+        * check for those tasks that are currently running.
+        * Always do this in case a pid was appended or removed.
+        */
+       on_each_cpu(ignore_task_cpu, tr, 1);
+
+       mutex_unlock(&event_mutex);
+
+       ret = read;
+       *ppos += read;
+
+       return ret;
+}
+
 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
 static int ftrace_event_set_open(struct inode *inode, struct file *file);
+static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
 static int ftrace_event_release(struct inode *inode, struct file *file);
 
 static const struct seq_operations show_event_seq_ops = {
@@ -1334,6 +1786,13 @@ static const struct seq_operations show_set_event_seq_ops = {
        .stop = t_stop,
 };
 
+static const struct seq_operations show_set_pid_seq_ops = {
+       .start = p_start,
+       .next = p_next,
+       .show = p_show,
+       .stop = p_stop,
+};
+
 static const struct file_operations ftrace_avail_fops = {
        .open = ftrace_event_avail_open,
        .read = seq_read,
@@ -1349,6 +1808,14 @@ static const struct file_operations ftrace_set_event_fops = {
        .release = ftrace_event_release,
 };
 
+static const struct file_operations ftrace_set_event_pid_fops = {
+       .open = ftrace_event_set_pid_open,
+       .read = seq_read,
+       .write = ftrace_event_pid_write,
+       .llseek = seq_lseek,
+       .release = ftrace_event_release,
+};
+
 static const struct file_operations ftrace_enable_fops = {
        .open = tracing_open_generic,
        .read = event_enable_read,
@@ -1459,6 +1926,26 @@ ftrace_event_set_open(struct inode *inode, struct file *file)
        return ret;
 }
 
+static int
+ftrace_event_set_pid_open(struct inode *inode, struct file *file)
+{
+       const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
+       struct trace_array *tr = inode->i_private;
+       int ret;
+
+       if (trace_array_get(tr) < 0)
+               return -ENODEV;
+
+       if ((file->f_mode & FMODE_WRITE) &&
+           (file->f_flags & O_TRUNC))
+               ftrace_clear_event_pids(tr);
+
+       ret = ftrace_event_open(inode, file, seq_ops);
+       if (ret < 0)
+               trace_array_put(tr);
+       return ret;
+}
+
 static struct event_subsystem *
 create_new_subsystem(const char *name)
 {
@@ -1472,13 +1959,9 @@ create_new_subsystem(const char *name)
        system->ref_count = 1;
 
        /* Only allocate if dynamic (kprobes and modules) */
-       if (!core_kernel_data((unsigned long)name)) {
-               system->ref_count |= SYSTEM_FL_FREE_NAME;
-               system->name = kstrdup(name, GFP_KERNEL);
-               if (!system->name)
-                       goto out_free;
-       } else
-               system->name = name;
+       system->name = kstrdup_const(name, GFP_KERNEL);
+       if (!system->name)
+               goto out_free;
 
        system->filter = NULL;
 
@@ -1491,17 +1974,16 @@ create_new_subsystem(const char *name)
        return system;
 
  out_free:
-       if (system->ref_count & SYSTEM_FL_FREE_NAME)
-               kfree(system->name);
+       kfree_const(system->name);
        kfree(system);
        return NULL;
 }
 
 static struct dentry *
 event_subsystem_dir(struct trace_array *tr, const char *name,
-                   struct ftrace_event_file *file, struct dentry *parent)
+                   struct trace_event_file *file, struct dentry *parent)
 {
-       struct ftrace_subsystem_dir *dir;
+       struct trace_subsystem_dir *dir;
        struct event_subsystem *system;
        struct dentry *entry;
 
@@ -1573,9 +2055,9 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
 }
 
 static int
-event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
+event_create_dir(struct dentry *parent, struct trace_event_file *file)
 {
-       struct ftrace_event_call *call = file->event_call;
+       struct trace_event_call *call = file->event_call;
        struct trace_array *tr = file->tr;
        struct list_head *head;
        struct dentry *d_events;
@@ -1593,7 +2075,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
        } else
                d_events = parent;
 
-       name = ftrace_event_name(call);
+       name = trace_event_name(call);
        file->dir = tracefs_create_dir(name, d_events);
        if (!file->dir) {
                pr_warn("Could not create tracefs '%s' directory\n", name);
@@ -1636,9 +2118,9 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
        return 0;
 }
 
-static void remove_event_from_tracers(struct ftrace_event_call *call)
+static void remove_event_from_tracers(struct trace_event_call *call)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
        struct trace_array *tr;
 
        do_for_each_event_file_safe(tr, file) {
@@ -1656,10 +2138,10 @@ static void remove_event_from_tracers(struct ftrace_event_call *call)
        } while_for_each_event_file();
 }
 
-static void event_remove(struct ftrace_event_call *call)
+static void event_remove(struct trace_event_call *call)
 {
        struct trace_array *tr;
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
 
        do_for_each_event_file(tr, file) {
                if (file->event_call != call)
@@ -1675,17 +2157,17 @@ static void event_remove(struct ftrace_event_call *call)
        } while_for_each_event_file();
 
        if (call->event.funcs)
-               __unregister_ftrace_event(&call->event);
+               __unregister_trace_event(&call->event);
        remove_event_from_tracers(call);
        list_del(&call->list);
 }
 
-static int event_init(struct ftrace_event_call *call)
+static int event_init(struct trace_event_call *call)
 {
        int ret = 0;
        const char *name;
 
-       name = ftrace_event_name(call);
+       name = trace_event_name(call);
        if (WARN_ON(!name))
                return -EINVAL;
 
@@ -1699,7 +2181,7 @@ static int event_init(struct ftrace_event_call *call)
 }
 
 static int
-__register_event(struct ftrace_event_call *call, struct module *mod)
+__register_event(struct trace_event_call *call, struct module *mod)
 {
        int ret;
 
@@ -1735,7 +2217,7 @@ static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
        return ptr + elen;
 }
 
-static void update_event_printk(struct ftrace_event_call *call,
+static void update_event_printk(struct trace_event_call *call,
                                struct trace_enum_map *map)
 {
        char *ptr;
@@ -1813,7 +2295,7 @@ static void update_event_printk(struct ftrace_event_call *call,
 
 void trace_event_enum_update(struct trace_enum_map **map, int len)
 {
-       struct ftrace_event_call *call, *p;
+       struct trace_event_call *call, *p;
        const char *last_system = NULL;
        int last_i;
        int i;
@@ -1838,11 +2320,11 @@ void trace_event_enum_update(struct trace_enum_map **map, int len)
        up_write(&trace_event_sem);
 }
 
-static struct ftrace_event_file *
-trace_create_new_event(struct ftrace_event_call *call,
+static struct trace_event_file *
+trace_create_new_event(struct trace_event_call *call,
                       struct trace_array *tr)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
 
        file = kmem_cache_alloc(file_cachep, GFP_TRACE);
        if (!file)
@@ -1860,9 +2342,9 @@ trace_create_new_event(struct ftrace_event_call *call,
 
 /* Add an event to a trace directory */
 static int
-__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
+__trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
 
        file = trace_create_new_event(call, tr);
        if (!file)
@@ -1877,10 +2359,10 @@ __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
  * the filesystem is initialized.
  */
 static __init int
-__trace_early_add_new_event(struct ftrace_event_call *call,
+__trace_early_add_new_event(struct trace_event_call *call,
                            struct trace_array *tr)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
 
        file = trace_create_new_event(call, tr);
        if (!file)
@@ -1890,10 +2372,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
 }
 
 struct ftrace_module_file_ops;
-static void __add_event_to_tracers(struct ftrace_event_call *call);
+static void __add_event_to_tracers(struct trace_event_call *call);
 
 /* Add an additional event_call dynamically */
-int trace_add_event_call(struct ftrace_event_call *call)
+int trace_add_event_call(struct trace_event_call *call)
 {
        int ret;
        mutex_lock(&trace_types_lock);
@@ -1912,7 +2394,7 @@ int trace_add_event_call(struct ftrace_event_call *call)
  * Must be called under locking of trace_types_lock, event_mutex and
  * trace_event_sem.
  */
-static void __trace_remove_event_call(struct ftrace_event_call *call)
+static void __trace_remove_event_call(struct trace_event_call *call)
 {
        event_remove(call);
        trace_destroy_fields(call);
@@ -1920,10 +2402,10 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
        call->filter = NULL;
 }
 
-static int probe_remove_event_call(struct ftrace_event_call *call)
+static int probe_remove_event_call(struct trace_event_call *call)
 {
        struct trace_array *tr;
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
 
 #ifdef CONFIG_PERF_EVENTS
        if (call->perf_refcount)
@@ -1934,10 +2416,10 @@ static int probe_remove_event_call(struct ftrace_event_call *call)
                        continue;
                /*
                 * We can't rely on ftrace_event_enable_disable(enable => 0)
-                * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
+                * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
                 * TRACE_REG_UNREGISTER.
                 */
-               if (file->flags & FTRACE_EVENT_FL_ENABLED)
+               if (file->flags & EVENT_FILE_FL_ENABLED)
                        return -EBUSY;
                /*
                 * The do_for_each_event_file_safe() is
@@ -1954,7 +2436,7 @@ static int probe_remove_event_call(struct ftrace_event_call *call)
 }
 
 /* Remove an event_call */
-int trace_remove_event_call(struct ftrace_event_call *call)
+int trace_remove_event_call(struct trace_event_call *call)
 {
        int ret;
 
@@ -1978,7 +2460,7 @@ int trace_remove_event_call(struct ftrace_event_call *call)
 
 static void trace_module_add_events(struct module *mod)
 {
-       struct ftrace_event_call **call, **start, **end;
+       struct trace_event_call **call, **start, **end;
 
        if (!mod->num_trace_events)
                return;
@@ -2001,7 +2483,7 @@ static void trace_module_add_events(struct module *mod)
 
 static void trace_module_remove_events(struct module *mod)
 {
-       struct ftrace_event_call *call, *p;
+       struct trace_event_call *call, *p;
        bool clear_trace = false;
 
        down_write(&trace_event_sem);
@@ -2057,28 +2539,28 @@ static struct notifier_block trace_module_nb = {
 static void
 __trace_add_event_dirs(struct trace_array *tr)
 {
-       struct ftrace_event_call *call;
+       struct trace_event_call *call;
        int ret;
 
        list_for_each_entry(call, &ftrace_events, list) {
                ret = __trace_add_new_event(call, tr);
                if (ret < 0)
                        pr_warn("Could not create directory for event %s\n",
-                               ftrace_event_name(call));
+                               trace_event_name(call));
        }
 }
 
-struct ftrace_event_file *
+struct trace_event_file *
 find_event_file(struct trace_array *tr, const char *system,  const char *event)
 {
-       struct ftrace_event_file *file;
-       struct ftrace_event_call *call;
+       struct trace_event_file *file;
+       struct trace_event_call *call;
        const char *name;
 
        list_for_each_entry(file, &tr->events, list) {
 
                call = file->event_call;
-               name = ftrace_event_name(call);
+               name = trace_event_name(call);
 
                if (!name || !call->class || !call->class->reg)
                        continue;
@@ -2100,7 +2582,7 @@ find_event_file(struct trace_array *tr, const char *system,  const char *event)
 #define DISABLE_EVENT_STR      "disable_event"
 
 struct event_probe_data {
-       struct ftrace_event_file        *file;
+       struct trace_event_file *file;
        unsigned long                   count;
        int                             ref;
        bool                            enable;
@@ -2116,9 +2598,9 @@ event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
                return;
 
        if (data->enable)
-               clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
+               clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
        else
-               set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
+               set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
 }
 
 static void
@@ -2134,7 +2616,7 @@ event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data
                return;
 
        /* Skip if the event is in a state we want to switch to */
-       if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
+       if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
                return;
 
        if (data->count != -1)
@@ -2154,7 +2636,7 @@ event_enable_print(struct seq_file *m, unsigned long ip,
        seq_printf(m, "%s:%s:%s",
                   data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
                   data->file->event_call->class->system,
-                  ftrace_event_name(data->file->event_call));
+                  trace_event_name(data->file->event_call));
 
        if (data->count == -1)
                seq_puts(m, ":unlimited\n");
@@ -2228,7 +2710,7 @@ event_enable_func(struct ftrace_hash *hash,
                  char *glob, char *cmd, char *param, int enabled)
 {
        struct trace_array *tr = top_trace_array();
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
        struct ftrace_probe_ops *ops;
        struct event_probe_data *data;
        const char *system;
@@ -2360,7 +2842,7 @@ static inline int register_event_cmds(void) { return 0; }
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 /*
- * The top level array has already had its ftrace_event_file
+ * The top level array has already had its trace_event_file
  * descriptors created in order to allow for early events to
  * be recorded. This function is called after the tracefs has been
  * initialized, and we now have to create the files associated
@@ -2369,7 +2851,7 @@ static inline int register_event_cmds(void) { return 0; }
 static __init void
 __trace_early_add_event_dirs(struct trace_array *tr)
 {
-       struct ftrace_event_file *file;
+       struct trace_event_file *file;
        int ret;
 
 
@@ -2377,7 +2859,7 @@ __trace_early_add_event_dirs(struct trace_array *tr)
                ret = event_create_dir(tr->event_dir, file);
                if (ret < 0)
                        pr_warn("Could not create directory for event %s\n",
-                               ftrace_event_name(file->event_call));
+                               trace_event_name(file->event_call));
        }
 }
 
@@ -2390,7 +2872,7 @@ __trace_early_add_event_dirs(struct trace_array *tr)
 static __init void
 __trace_early_add_events(struct trace_array *tr)
 {
-       struct ftrace_event_call *call;
+       struct trace_event_call *call;
        int ret;
 
        list_for_each_entry(call, &ftrace_events, list) {
@@ -2401,7 +2883,7 @@ __trace_early_add_events(struct trace_array *tr)
                ret = __trace_early_add_new_event(call, tr);
                if (ret < 0)
                        pr_warn("Could not create early event %s\n",
-                               ftrace_event_name(call));
+                               trace_event_name(call));
        }
 }
 
@@ -2409,13 +2891,13 @@ __trace_early_add_events(struct trace_array *tr)
 static void
 __trace_remove_event_dirs(struct trace_array *tr)
 {
-       struct ftrace_event_file *file, *next;
+       struct trace_event_file *file, *next;
 
        list_for_each_entry_safe(file, next, &tr->events, list)
                remove_event_file_dir(file);
 }
 
-static void __add_event_to_tracers(struct ftrace_event_call *call)
+static void __add_event_to_tracers(struct trace_event_call *call)
 {
        struct trace_array *tr;
 
@@ -2423,8 +2905,8 @@ static void __add_event_to_tracers(struct ftrace_event_call *call)
                __trace_add_new_event(call, tr);
 }
 
-extern struct ftrace_event_call *__start_ftrace_events[];
-extern struct ftrace_event_call *__stop_ftrace_events[];
+extern struct trace_event_call *__start_ftrace_events[];
+extern struct trace_event_call *__stop_ftrace_events[];
 
 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
 
@@ -2458,6 +2940,9 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
                return -ENOMEM;
        }
 
+       entry = tracefs_create_file("set_event_pid", 0644, parent,
+                                   tr, &ftrace_set_event_pid_fops);
+
        /* ring buffer internal formats */
        trace_create_file("header_page", 0444, d_events,
                          ring_buffer_print_page_header,
@@ -2538,6 +3023,9 @@ int event_trace_del_tracer(struct trace_array *tr)
        /* Disable any event triggers and associated soft-disabled events */
        clear_event_triggers(tr);
 
+       /* Clear the pid list */
+       __ftrace_clear_event_pids(tr);
+
        /* Disable any running events */
        __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
 
@@ -2559,7 +3047,7 @@ int event_trace_del_tracer(struct trace_array *tr)
 static __init int event_trace_memsetup(void)
 {
        field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
-       file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
+       file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
        return 0;
 }
 
@@ -2575,16 +3063,16 @@ early_enable_events(struct trace_array *tr, bool disable_first)
 
                if (!token)
                        break;
-               if (!*token)
-                       continue;
 
-               /* Restarting syscalls requires that we stop them first */
-               if (disable_first)
-                       ftrace_set_clr_event(tr, token, 0);
+               if (*token) {
+                       /* Restarting syscalls requires that we stop them first */
+                       if (disable_first)
+                               ftrace_set_clr_event(tr, token, 0);
 
-               ret = ftrace_set_clr_event(tr, token, 1);
-               if (ret)
-                       pr_warn("Failed to enable trace event: %s\n", token);
+                       ret = ftrace_set_clr_event(tr, token, 1);
+                       if (ret)
+                               pr_warn("Failed to enable trace event: %s\n", token);
+               }
 
                /* Put back the comma to allow this to be called again */
                if (buf)
@@ -2595,7 +3083,7 @@ early_enable_events(struct trace_array *tr, bool disable_first)
 static __init int event_trace_enable(void)
 {
        struct trace_array *tr = top_trace_array();
-       struct ftrace_event_call **iter, *call;
+       struct trace_event_call **iter, *call;
        int ret;
 
        if (!tr)
@@ -2673,6 +3161,9 @@ static __init int event_trace_init(void)
        if (!entry)
                pr_warn("Could not create tracefs 'available_events' entry\n");
 
+       if (trace_define_generic_fields())
+               pr_warn("tracing: Failed to allocated generic fields");
+
        if (trace_define_common_fields())
                pr_warn("tracing: Failed to allocate common fields");
 
@@ -2756,9 +3247,9 @@ static __init void event_test_stuff(void)
  */
 static __init void event_trace_self_tests(void)
 {
-       struct ftrace_subsystem_dir *dir;
-       struct ftrace_event_file *file;
-       struct ftrace_event_call *call;
+       struct trace_subsystem_dir *dir;
+       struct trace_event_file *file;
+       struct trace_event_call *call;
        struct event_subsystem *system;
        struct trace_array *tr;
        int ret;
@@ -2789,13 +3280,13 @@ static __init void event_trace_self_tests(void)
                        continue;
 #endif
 
-               pr_info("Testing event %s: ", ftrace_event_name(call));
+               pr_info("Testing event %s: ", trace_event_name(call));
 
                /*
                 * If an event is already enabled, someone is using
                 * it and the self test should not be on.
                 */
-               if (file->flags & FTRACE_EVENT_FL_ENABLED) {
+               if (file->flags & EVENT_FILE_FL_ENABLED) {
                        pr_warn("Enabled event during self test!\n");
                        WARN_ON_ONCE(1);
                        continue;
@@ -2868,7 +3359,9 @@ static __init void event_trace_self_tests(void)
 
 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
 
-static void
+static struct trace_array *event_tr;
+
+static void __init
 function_test_events_call(unsigned long ip, unsigned long parent_ip,
                          struct ftrace_ops *op, struct pt_regs *pt_regs)
 {
@@ -2899,7 +3392,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
        entry->ip                       = ip;
        entry->parent_ip                = parent_ip;
 
-       trace_buffer_unlock_commit(buffer, event, flags, pc);
+       trace_buffer_unlock_commit(event_tr, buffer, event, flags, pc);
 
  out:
        atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
@@ -2915,6 +3408,9 @@ static struct ftrace_ops trace_ops __initdata  =
 static __init void event_trace_self_test_with_function(void)
 {
        int ret;
+       event_tr = top_trace_array();
+       if (WARN_ON(!event_tr))
+               return;
        ret = register_ftrace_function(&trace_ops);
        if (WARN_ON(ret < 0)) {
                pr_info("Failed to enable function tracer for event tests\n");