These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / trace / trace_event_perf.c
index 6fa484d..cc9f7a9 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * trace event based perf event profiling/tracing
  *
- * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  */
 
@@ -21,7 +21,7 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
 /* Count the events in use (per event id, not per instance) */
 static int     total_ref_count;
 
-static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
+static int perf_trace_event_perm(struct trace_event_call *tp_event,
                                 struct perf_event *p_event)
 {
        if (tp_event->perf_perm) {
@@ -83,7 +83,7 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
        return 0;
 }
 
-static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
+static int perf_trace_event_reg(struct trace_event_call *tp_event,
                                struct perf_event *p_event)
 {
        struct hlist_head __percpu *list;
@@ -143,7 +143,7 @@ fail:
 
 static void perf_trace_event_unreg(struct perf_event *p_event)
 {
-       struct ftrace_event_call *tp_event = p_event->tp_event;
+       struct trace_event_call *tp_event = p_event->tp_event;
        int i;
 
        if (--tp_event->perf_refcount > 0)
@@ -172,17 +172,17 @@ out:
 
 static int perf_trace_event_open(struct perf_event *p_event)
 {
-       struct ftrace_event_call *tp_event = p_event->tp_event;
+       struct trace_event_call *tp_event = p_event->tp_event;
        return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
 }
 
 static void perf_trace_event_close(struct perf_event *p_event)
 {
-       struct ftrace_event_call *tp_event = p_event->tp_event;
+       struct trace_event_call *tp_event = p_event->tp_event;
        tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
 }
 
-static int perf_trace_event_init(struct ftrace_event_call *tp_event,
+static int perf_trace_event_init(struct trace_event_call *tp_event,
                                 struct perf_event *p_event)
 {
        int ret;
@@ -206,7 +206,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
 
 int perf_trace_init(struct perf_event *p_event)
 {
-       struct ftrace_event_call *tp_event;
+       struct trace_event_call *tp_event;
        u64 event_id = p_event->attr.config;
        int ret = -EINVAL;
 
@@ -236,7 +236,7 @@ void perf_trace_destroy(struct perf_event *p_event)
 
 int perf_trace_add(struct perf_event *p_event, int flags)
 {
-       struct ftrace_event_call *tp_event = p_event->tp_event;
+       struct trace_event_call *tp_event = p_event->tp_event;
        struct hlist_head __percpu *pcpu_list;
        struct hlist_head *list;
 
@@ -255,7 +255,7 @@ int perf_trace_add(struct perf_event *p_event, int flags)
 
 void perf_trace_del(struct perf_event *p_event, int flags)
 {
-       struct ftrace_event_call *tp_event = p_event->tp_event;
+       struct trace_event_call *tp_event = p_event->tp_event;
        hlist_del_rcu(&p_event->hlist_entry);
        tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
 }
@@ -357,7 +357,7 @@ static void perf_ftrace_function_disable(struct perf_event *event)
        ftrace_function_local_disable(&event->ftrace_ops);
 }
 
-int perf_ftrace_event_register(struct ftrace_event_call *call,
+int perf_ftrace_event_register(struct trace_event_call *call,
                               enum trace_reg type, void *data)
 {
        switch (type) {