Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / kernel / trace / latency_hist.c
1 /*
2  * kernel/trace/latency_hist.c
3  *
4  * Add support for histograms of preemption-off latency and
5  * interrupt-off latency and wakeup latency, it depends on
6  * Real-Time Preemption Support.
7  *
8  *  Copyright (C) 2005 MontaVista Software, Inc.
9  *  Yi Yang <yyang@ch.mvista.com>
10  *
11  *  Converted to work with the new latency tracer.
12  *  Copyright (C) 2008 Red Hat, Inc.
13  *    Steven Rostedt <srostedt@redhat.com>
14  *
15  */
16 #include <linux/module.h>
17 #include <linux/debugfs.h>
18 #include <linux/seq_file.h>
19 #include <linux/percpu.h>
20 #include <linux/kallsyms.h>
21 #include <linux/uaccess.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/slab.h>
25 #include <linux/atomic.h>
26 #include <asm/div64.h>
27
28 #include "trace.h"
29 #include <trace/events/sched.h>
30
31 #define NSECS_PER_USECS 1000L
32
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/hist.h>
35
36 enum {
37         IRQSOFF_LATENCY = 0,
38         PREEMPTOFF_LATENCY,
39         PREEMPTIRQSOFF_LATENCY,
40         WAKEUP_LATENCY,
41         WAKEUP_LATENCY_SHAREDPRIO,
42         MISSED_TIMER_OFFSETS,
43         TIMERANDWAKEUP_LATENCY,
44         MAX_LATENCY_TYPE,
45 };
46
47 #define MAX_ENTRY_NUM 10240
48
49 struct hist_data {
50         atomic_t hist_mode; /* 0 log, 1 don't log */
51         long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
52         long min_lat;
53         long max_lat;
54         unsigned long long below_hist_bound_samples;
55         unsigned long long above_hist_bound_samples;
56         long long accumulate_lat;
57         unsigned long long total_samples;
58         unsigned long long hist_array[MAX_ENTRY_NUM];
59 };
60
61 struct enable_data {
62         int latency_type;
63         int enabled;
64 };
65
66 static char *latency_hist_dir_root = "latency_hist";
67
68 #ifdef CONFIG_INTERRUPT_OFF_HIST
69 static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
70 static char *irqsoff_hist_dir = "irqsoff";
71 static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
72 static DEFINE_PER_CPU(int, hist_irqsoff_counting);
73 #endif
74
75 #ifdef CONFIG_PREEMPT_OFF_HIST
76 static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
77 static char *preemptoff_hist_dir = "preemptoff";
78 static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
79 static DEFINE_PER_CPU(int, hist_preemptoff_counting);
80 #endif
81
82 #if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
83 static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
84 static char *preemptirqsoff_hist_dir = "preemptirqsoff";
85 static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
86 static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
87 #endif
88
89 #if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
90 static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
91 static struct enable_data preemptirqsoff_enabled_data = {
92         .latency_type = PREEMPTIRQSOFF_LATENCY,
93         .enabled = 0,
94 };
95 #endif
96
97 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
98         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
99 struct maxlatproc_data {
100         char comm[FIELD_SIZEOF(struct task_struct, comm)];
101         char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
102         int pid;
103         int current_pid;
104         int prio;
105         int current_prio;
106         long latency;
107         long timeroffset;
108         cycle_t timestamp;
109 };
110 #endif
111
112 #ifdef CONFIG_WAKEUP_LATENCY_HIST
113 static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
114 static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
115 static char *wakeup_latency_hist_dir = "wakeup";
116 static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
117 static notrace void probe_wakeup_latency_hist_start(void *v,
118         struct task_struct *p, int success);
119 static notrace void probe_wakeup_latency_hist_stop(void *v,
120         struct task_struct *prev, struct task_struct *next);
121 static notrace void probe_sched_migrate_task(void *,
122         struct task_struct *task, int cpu);
123 static struct enable_data wakeup_latency_enabled_data = {
124         .latency_type = WAKEUP_LATENCY,
125         .enabled = 0,
126 };
127 static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
128 static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
129 static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
130 static DEFINE_PER_CPU(int, wakeup_sharedprio);
131 static unsigned long wakeup_pid;
132 #endif
133
134 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
135 static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
136 static char *missed_timer_offsets_dir = "missed_timer_offsets";
137 static notrace void probe_hrtimer_interrupt(void *v, int cpu,
138         long long offset, struct task_struct *curr, struct task_struct *task);
139 static struct enable_data missed_timer_offsets_enabled_data = {
140         .latency_type = MISSED_TIMER_OFFSETS,
141         .enabled = 0,
142 };
143 static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
144 static unsigned long missed_timer_offsets_pid;
145 #endif
146
147 #if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
148         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
149 static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
150 static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
151 static struct enable_data timerandwakeup_enabled_data = {
152         .latency_type = TIMERANDWAKEUP_LATENCY,
153         .enabled = 0,
154 };
155 static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
156 #endif
157
158 void notrace latency_hist(int latency_type, int cpu, long latency,
159                           long timeroffset, cycle_t stop,
160                           struct task_struct *p)
161 {
162         struct hist_data *my_hist;
163 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
164         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
165         struct maxlatproc_data *mp = NULL;
166 #endif
167
168         if (!cpu_possible(cpu) || latency_type < 0 ||
169             latency_type >= MAX_LATENCY_TYPE)
170                 return;
171
172         switch (latency_type) {
173 #ifdef CONFIG_INTERRUPT_OFF_HIST
174         case IRQSOFF_LATENCY:
175                 my_hist = &per_cpu(irqsoff_hist, cpu);
176                 break;
177 #endif
178 #ifdef CONFIG_PREEMPT_OFF_HIST
179         case PREEMPTOFF_LATENCY:
180                 my_hist = &per_cpu(preemptoff_hist, cpu);
181                 break;
182 #endif
183 #if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
184         case PREEMPTIRQSOFF_LATENCY:
185                 my_hist = &per_cpu(preemptirqsoff_hist, cpu);
186                 break;
187 #endif
188 #ifdef CONFIG_WAKEUP_LATENCY_HIST
189         case WAKEUP_LATENCY:
190                 my_hist = &per_cpu(wakeup_latency_hist, cpu);
191                 mp = &per_cpu(wakeup_maxlatproc, cpu);
192                 break;
193         case WAKEUP_LATENCY_SHAREDPRIO:
194                 my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
195                 mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
196                 break;
197 #endif
198 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
199         case MISSED_TIMER_OFFSETS:
200                 my_hist = &per_cpu(missed_timer_offsets, cpu);
201                 mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
202                 break;
203 #endif
204 #if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
205         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
206         case TIMERANDWAKEUP_LATENCY:
207                 my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
208                 mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
209                 break;
210 #endif
211
212         default:
213                 return;
214         }
215
216         latency += my_hist->offset;
217
218         if (atomic_read(&my_hist->hist_mode) == 0)
219                 return;
220
221         if (latency < 0 || latency >= MAX_ENTRY_NUM) {
222                 if (latency < 0)
223                         my_hist->below_hist_bound_samples++;
224                 else
225                         my_hist->above_hist_bound_samples++;
226         } else
227                 my_hist->hist_array[latency]++;
228
229         if (unlikely(latency > my_hist->max_lat ||
230             my_hist->min_lat == LONG_MAX)) {
231 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
232         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
233                 if (latency_type == WAKEUP_LATENCY ||
234                     latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
235                     latency_type == MISSED_TIMER_OFFSETS ||
236                     latency_type == TIMERANDWAKEUP_LATENCY) {
237                         strncpy(mp->comm, p->comm, sizeof(mp->comm));
238                         strncpy(mp->current_comm, current->comm,
239                             sizeof(mp->current_comm));
240                         mp->pid = task_pid_nr(p);
241                         mp->current_pid = task_pid_nr(current);
242                         mp->prio = p->prio;
243                         mp->current_prio = current->prio;
244                         mp->latency = latency;
245                         mp->timeroffset = timeroffset;
246                         mp->timestamp = stop;
247                 }
248 #endif
249                 my_hist->max_lat = latency;
250         }
251         if (unlikely(latency < my_hist->min_lat))
252                 my_hist->min_lat = latency;
253         my_hist->total_samples++;
254         my_hist->accumulate_lat += latency;
255 }
256
257 static void *l_start(struct seq_file *m, loff_t *pos)
258 {
259         loff_t *index_ptr = NULL;
260         loff_t index = *pos;
261         struct hist_data *my_hist = m->private;
262
263         if (index == 0) {
264                 char minstr[32], avgstr[32], maxstr[32];
265
266                 atomic_dec(&my_hist->hist_mode);
267
268                 if (likely(my_hist->total_samples)) {
269                         long avg = (long) div64_s64(my_hist->accumulate_lat,
270                             my_hist->total_samples);
271                         snprintf(minstr, sizeof(minstr), "%ld",
272                             my_hist->min_lat - my_hist->offset);
273                         snprintf(avgstr, sizeof(avgstr), "%ld",
274                             avg - my_hist->offset);
275                         snprintf(maxstr, sizeof(maxstr), "%ld",
276                             my_hist->max_lat - my_hist->offset);
277                 } else {
278                         strcpy(minstr, "<undef>");
279                         strcpy(avgstr, minstr);
280                         strcpy(maxstr, minstr);
281                 }
282
283                 seq_printf(m, "#Minimum latency: %s microseconds\n"
284                            "#Average latency: %s microseconds\n"
285                            "#Maximum latency: %s microseconds\n"
286                            "#Total samples: %llu\n"
287                            "#There are %llu samples lower than %ld"
288                            " microseconds.\n"
289                            "#There are %llu samples greater or equal"
290                            " than %ld microseconds.\n"
291                            "#usecs\t%16s\n",
292                            minstr, avgstr, maxstr,
293                            my_hist->total_samples,
294                            my_hist->below_hist_bound_samples,
295                            -my_hist->offset,
296                            my_hist->above_hist_bound_samples,
297                            MAX_ENTRY_NUM - my_hist->offset,
298                            "samples");
299         }
300         if (index < MAX_ENTRY_NUM) {
301                 index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
302                 if (index_ptr)
303                         *index_ptr = index;
304         }
305
306         return index_ptr;
307 }
308
309 static void *l_next(struct seq_file *m, void *p, loff_t *pos)
310 {
311         loff_t *index_ptr = p;
312         struct hist_data *my_hist = m->private;
313
314         if (++*pos >= MAX_ENTRY_NUM) {
315                 atomic_inc(&my_hist->hist_mode);
316                 return NULL;
317         }
318         *index_ptr = *pos;
319         return index_ptr;
320 }
321
322 static void l_stop(struct seq_file *m, void *p)
323 {
324         kfree(p);
325 }
326
327 static int l_show(struct seq_file *m, void *p)
328 {
329         int index = *(loff_t *) p;
330         struct hist_data *my_hist = m->private;
331
332         seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
333             my_hist->hist_array[index]);
334         return 0;
335 }
336
337 static const struct seq_operations latency_hist_seq_op = {
338         .start = l_start,
339         .next  = l_next,
340         .stop  = l_stop,
341         .show  = l_show
342 };
343
344 static int latency_hist_open(struct inode *inode, struct file *file)
345 {
346         int ret;
347
348         ret = seq_open(file, &latency_hist_seq_op);
349         if (!ret) {
350                 struct seq_file *seq = file->private_data;
351                 seq->private = inode->i_private;
352         }
353         return ret;
354 }
355
356 static const struct file_operations latency_hist_fops = {
357         .open = latency_hist_open,
358         .read = seq_read,
359         .llseek = seq_lseek,
360         .release = seq_release,
361 };
362
363 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
364         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
365 static void clear_maxlatprocdata(struct maxlatproc_data *mp)
366 {
367         mp->comm[0] = mp->current_comm[0] = '\0';
368         mp->prio = mp->current_prio = mp->pid = mp->current_pid =
369             mp->latency = mp->timeroffset = -1;
370         mp->timestamp = 0;
371 }
372 #endif
373
374 static void hist_reset(struct hist_data *hist)
375 {
376         atomic_dec(&hist->hist_mode);
377
378         memset(hist->hist_array, 0, sizeof(hist->hist_array));
379         hist->below_hist_bound_samples = 0ULL;
380         hist->above_hist_bound_samples = 0ULL;
381         hist->min_lat = LONG_MAX;
382         hist->max_lat = LONG_MIN;
383         hist->total_samples = 0ULL;
384         hist->accumulate_lat = 0LL;
385
386         atomic_inc(&hist->hist_mode);
387 }
388
389 static ssize_t
390 latency_hist_reset(struct file *file, const char __user *a,
391                    size_t size, loff_t *off)
392 {
393         int cpu;
394         struct hist_data *hist = NULL;
395 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
396         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
397         struct maxlatproc_data *mp = NULL;
398 #endif
399         off_t latency_type = (off_t) file->private_data;
400
401         for_each_online_cpu(cpu) {
402
403                 switch (latency_type) {
404 #ifdef CONFIG_PREEMPT_OFF_HIST
405                 case PREEMPTOFF_LATENCY:
406                         hist = &per_cpu(preemptoff_hist, cpu);
407                         break;
408 #endif
409 #ifdef CONFIG_INTERRUPT_OFF_HIST
410                 case IRQSOFF_LATENCY:
411                         hist = &per_cpu(irqsoff_hist, cpu);
412                         break;
413 #endif
414 #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
415                 case PREEMPTIRQSOFF_LATENCY:
416                         hist = &per_cpu(preemptirqsoff_hist, cpu);
417                         break;
418 #endif
419 #ifdef CONFIG_WAKEUP_LATENCY_HIST
420                 case WAKEUP_LATENCY:
421                         hist = &per_cpu(wakeup_latency_hist, cpu);
422                         mp = &per_cpu(wakeup_maxlatproc, cpu);
423                         break;
424                 case WAKEUP_LATENCY_SHAREDPRIO:
425                         hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
426                         mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
427                         break;
428 #endif
429 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
430                 case MISSED_TIMER_OFFSETS:
431                         hist = &per_cpu(missed_timer_offsets, cpu);
432                         mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
433                         break;
434 #endif
435 #if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
436         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
437                 case TIMERANDWAKEUP_LATENCY:
438                         hist = &per_cpu(timerandwakeup_latency_hist, cpu);
439                         mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
440                         break;
441 #endif
442                 }
443
444                 hist_reset(hist);
445 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
446         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
447                 if (latency_type == WAKEUP_LATENCY ||
448                     latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
449                     latency_type == MISSED_TIMER_OFFSETS ||
450                     latency_type == TIMERANDWAKEUP_LATENCY)
451                         clear_maxlatprocdata(mp);
452 #endif
453         }
454
455         return size;
456 }
457
458 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
459         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
460 static ssize_t
461 show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
462 {
463         char buf[64];
464         int r;
465         unsigned long *this_pid = file->private_data;
466
467         r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
468         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
469 }
470
471 static ssize_t do_pid(struct file *file, const char __user *ubuf,
472                       size_t cnt, loff_t *ppos)
473 {
474         char buf[64];
475         unsigned long pid;
476         unsigned long *this_pid = file->private_data;
477
478         if (cnt >= sizeof(buf))
479                 return -EINVAL;
480
481         if (copy_from_user(&buf, ubuf, cnt))
482                 return -EFAULT;
483
484         buf[cnt] = '\0';
485
486         if (kstrtoul(buf, 10, &pid))
487                 return -EINVAL;
488
489         *this_pid = pid;
490
491         return cnt;
492 }
493 #endif
494
495 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
496         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
497 static ssize_t
498 show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
499 {
500         int r;
501         struct maxlatproc_data *mp = file->private_data;
502         int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
503         unsigned long long t;
504         unsigned long usecs, secs;
505         char *buf;
506
507         if (mp->pid == -1 || mp->current_pid == -1) {
508                 buf = "(none)\n";
509                 return simple_read_from_buffer(ubuf, cnt, ppos, buf,
510                     strlen(buf));
511         }
512
513         buf = kmalloc(strmaxlen, GFP_KERNEL);
514         if (buf == NULL)
515                 return -ENOMEM;
516
517         t = ns2usecs(mp->timestamp);
518         usecs = do_div(t, USEC_PER_SEC);
519         secs = (unsigned long) t;
520         r = snprintf(buf, strmaxlen,
521             "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
522             MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
523             mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
524             secs, usecs);
525         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
526         kfree(buf);
527         return r;
528 }
529 #endif
530
531 static ssize_t
532 show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
533 {
534         char buf[64];
535         struct enable_data *ed = file->private_data;
536         int r;
537
538         r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
539         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
540 }
541
542 static ssize_t
543 do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
544 {
545         char buf[64];
546         long enable;
547         struct enable_data *ed = file->private_data;
548
549         if (cnt >= sizeof(buf))
550                 return -EINVAL;
551
552         if (copy_from_user(&buf, ubuf, cnt))
553                 return -EFAULT;
554
555         buf[cnt] = 0;
556
557         if (kstrtoul(buf, 10, &enable))
558                 return -EINVAL;
559
560         if ((enable && ed->enabled) || (!enable && !ed->enabled))
561                 return cnt;
562
563         if (enable) {
564                 int ret;
565
566                 switch (ed->latency_type) {
567 #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
568                 case PREEMPTIRQSOFF_LATENCY:
569                         ret = register_trace_preemptirqsoff_hist(
570                             probe_preemptirqsoff_hist, NULL);
571                         if (ret) {
572                                 pr_info("wakeup trace: Couldn't assign "
573                                     "probe_preemptirqsoff_hist "
574                                     "to trace_preemptirqsoff_hist\n");
575                                 return ret;
576                         }
577                         break;
578 #endif
579 #ifdef CONFIG_WAKEUP_LATENCY_HIST
580                 case WAKEUP_LATENCY:
581                         ret = register_trace_sched_wakeup(
582                             probe_wakeup_latency_hist_start, NULL);
583                         if (ret) {
584                                 pr_info("wakeup trace: Couldn't assign "
585                                     "probe_wakeup_latency_hist_start "
586                                     "to trace_sched_wakeup\n");
587                                 return ret;
588                         }
589                         ret = register_trace_sched_wakeup_new(
590                             probe_wakeup_latency_hist_start, NULL);
591                         if (ret) {
592                                 pr_info("wakeup trace: Couldn't assign "
593                                     "probe_wakeup_latency_hist_start "
594                                     "to trace_sched_wakeup_new\n");
595                                 unregister_trace_sched_wakeup(
596                                     probe_wakeup_latency_hist_start, NULL);
597                                 return ret;
598                         }
599                         ret = register_trace_sched_switch(
600                             probe_wakeup_latency_hist_stop, NULL);
601                         if (ret) {
602                                 pr_info("wakeup trace: Couldn't assign "
603                                     "probe_wakeup_latency_hist_stop "
604                                     "to trace_sched_switch\n");
605                                 unregister_trace_sched_wakeup(
606                                     probe_wakeup_latency_hist_start, NULL);
607                                 unregister_trace_sched_wakeup_new(
608                                     probe_wakeup_latency_hist_start, NULL);
609                                 return ret;
610                         }
611                         ret = register_trace_sched_migrate_task(
612                             probe_sched_migrate_task, NULL);
613                         if (ret) {
614                                 pr_info("wakeup trace: Couldn't assign "
615                                     "probe_sched_migrate_task "
616                                     "to trace_sched_migrate_task\n");
617                                 unregister_trace_sched_wakeup(
618                                     probe_wakeup_latency_hist_start, NULL);
619                                 unregister_trace_sched_wakeup_new(
620                                     probe_wakeup_latency_hist_start, NULL);
621                                 unregister_trace_sched_switch(
622                                     probe_wakeup_latency_hist_stop, NULL);
623                                 return ret;
624                         }
625                         break;
626 #endif
627 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
628                 case MISSED_TIMER_OFFSETS:
629                         ret = register_trace_hrtimer_interrupt(
630                             probe_hrtimer_interrupt, NULL);
631                         if (ret) {
632                                 pr_info("wakeup trace: Couldn't assign "
633                                     "probe_hrtimer_interrupt "
634                                     "to trace_hrtimer_interrupt\n");
635                                 return ret;
636                         }
637                         break;
638 #endif
639 #if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
640         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
641                 case TIMERANDWAKEUP_LATENCY:
642                         if (!wakeup_latency_enabled_data.enabled ||
643                             !missed_timer_offsets_enabled_data.enabled)
644                                 return -EINVAL;
645                         break;
646 #endif
647                 default:
648                         break;
649                 }
650         } else {
651                 switch (ed->latency_type) {
652 #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
653                 case PREEMPTIRQSOFF_LATENCY:
654                         {
655                                 int cpu;
656
657                                 unregister_trace_preemptirqsoff_hist(
658                                     probe_preemptirqsoff_hist, NULL);
659                                 for_each_online_cpu(cpu) {
660 #ifdef CONFIG_INTERRUPT_OFF_HIST
661                                         per_cpu(hist_irqsoff_counting,
662                                             cpu) = 0;
663 #endif
664 #ifdef CONFIG_PREEMPT_OFF_HIST
665                                         per_cpu(hist_preemptoff_counting,
666                                             cpu) = 0;
667 #endif
668 #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
669                                         per_cpu(hist_preemptirqsoff_counting,
670                                             cpu) = 0;
671 #endif
672                                 }
673                         }
674                         break;
675 #endif
676 #ifdef CONFIG_WAKEUP_LATENCY_HIST
677                 case WAKEUP_LATENCY:
678                         {
679                                 int cpu;
680
681                                 unregister_trace_sched_wakeup(
682                                     probe_wakeup_latency_hist_start, NULL);
683                                 unregister_trace_sched_wakeup_new(
684                                     probe_wakeup_latency_hist_start, NULL);
685                                 unregister_trace_sched_switch(
686                                     probe_wakeup_latency_hist_stop, NULL);
687                                 unregister_trace_sched_migrate_task(
688                                     probe_sched_migrate_task, NULL);
689
690                                 for_each_online_cpu(cpu) {
691                                         per_cpu(wakeup_task, cpu) = NULL;
692                                         per_cpu(wakeup_sharedprio, cpu) = 0;
693                                 }
694                         }
695 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
696                         timerandwakeup_enabled_data.enabled = 0;
697 #endif
698                         break;
699 #endif
700 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
701                 case MISSED_TIMER_OFFSETS:
702                         unregister_trace_hrtimer_interrupt(
703                             probe_hrtimer_interrupt, NULL);
704 #ifdef CONFIG_WAKEUP_LATENCY_HIST
705                         timerandwakeup_enabled_data.enabled = 0;
706 #endif
707                         break;
708 #endif
709                 default:
710                         break;
711                 }
712         }
713         ed->enabled = enable;
714         return cnt;
715 }
716
717 static const struct file_operations latency_hist_reset_fops = {
718         .open = tracing_open_generic,
719         .write = latency_hist_reset,
720 };
721
722 static const struct file_operations enable_fops = {
723         .open = tracing_open_generic,
724         .read = show_enable,
725         .write = do_enable,
726 };
727
728 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
729         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
730 static const struct file_operations pid_fops = {
731         .open = tracing_open_generic,
732         .read = show_pid,
733         .write = do_pid,
734 };
735
736 static const struct file_operations maxlatproc_fops = {
737         .open = tracing_open_generic,
738         .read = show_maxlatproc,
739 };
740 #endif
741
742 #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
743 static notrace void probe_preemptirqsoff_hist(void *v, int reason,
744         int starthist)
745 {
746         int cpu = raw_smp_processor_id();
747         int time_set = 0;
748
749         if (starthist) {
750                 cycle_t uninitialized_var(start);
751
752                 if (!preempt_count() && !irqs_disabled())
753                         return;
754
755 #ifdef CONFIG_INTERRUPT_OFF_HIST
756                 if ((reason == IRQS_OFF || reason == TRACE_START) &&
757                     !per_cpu(hist_irqsoff_counting, cpu)) {
758                         per_cpu(hist_irqsoff_counting, cpu) = 1;
759                         start = ftrace_now(cpu);
760                         time_set++;
761                         per_cpu(hist_irqsoff_start, cpu) = start;
762                 }
763 #endif
764
765 #ifdef CONFIG_PREEMPT_OFF_HIST
766                 if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
767                     !per_cpu(hist_preemptoff_counting, cpu)) {
768                         per_cpu(hist_preemptoff_counting, cpu) = 1;
769                         if (!(time_set++))
770                                 start = ftrace_now(cpu);
771                         per_cpu(hist_preemptoff_start, cpu) = start;
772                 }
773 #endif
774
775 #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
776                 if (per_cpu(hist_irqsoff_counting, cpu) &&
777                     per_cpu(hist_preemptoff_counting, cpu) &&
778                     !per_cpu(hist_preemptirqsoff_counting, cpu)) {
779                         per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
780                         if (!time_set)
781                                 start = ftrace_now(cpu);
782                         per_cpu(hist_preemptirqsoff_start, cpu) = start;
783                 }
784 #endif
785         } else {
786                 cycle_t uninitialized_var(stop);
787
788 #ifdef CONFIG_INTERRUPT_OFF_HIST
789                 if ((reason == IRQS_ON || reason == TRACE_STOP) &&
790                     per_cpu(hist_irqsoff_counting, cpu)) {
791                         cycle_t start = per_cpu(hist_irqsoff_start, cpu);
792
793                         stop = ftrace_now(cpu);
794                         time_set++;
795                         if (start) {
796                                 long latency = ((long) (stop - start)) /
797                                     NSECS_PER_USECS;
798
799                                 latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
800                                     stop, NULL);
801                         }
802                         per_cpu(hist_irqsoff_counting, cpu) = 0;
803                 }
804 #endif
805
806 #ifdef CONFIG_PREEMPT_OFF_HIST
807                 if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
808                     per_cpu(hist_preemptoff_counting, cpu)) {
809                         cycle_t start = per_cpu(hist_preemptoff_start, cpu);
810
811                         if (!(time_set++))
812                                 stop = ftrace_now(cpu);
813                         if (start) {
814                                 long latency = ((long) (stop - start)) /
815                                     NSECS_PER_USECS;
816
817                                 latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
818                                     0, stop, NULL);
819                         }
820                         per_cpu(hist_preemptoff_counting, cpu) = 0;
821                 }
822 #endif
823
824 #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
825                 if ((!per_cpu(hist_irqsoff_counting, cpu) ||
826                      !per_cpu(hist_preemptoff_counting, cpu)) &&
827                    per_cpu(hist_preemptirqsoff_counting, cpu)) {
828                         cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
829
830                         if (!time_set)
831                                 stop = ftrace_now(cpu);
832                         if (start) {
833                                 long latency = ((long) (stop - start)) /
834                                     NSECS_PER_USECS;
835
836                                 latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
837                                     latency, 0, stop, NULL);
838                         }
839                         per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
840                 }
841 #endif
842         }
843 }
844 #endif
845
846 #ifdef CONFIG_WAKEUP_LATENCY_HIST
847 static DEFINE_RAW_SPINLOCK(wakeup_lock);
848 static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
849         int cpu)
850 {
851         int old_cpu = task_cpu(task);
852
853         if (cpu != old_cpu) {
854                 unsigned long flags;
855                 struct task_struct *cpu_wakeup_task;
856
857                 raw_spin_lock_irqsave(&wakeup_lock, flags);
858
859                 cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
860                 if (task == cpu_wakeup_task) {
861                         put_task_struct(cpu_wakeup_task);
862                         per_cpu(wakeup_task, old_cpu) = NULL;
863                         cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
864                         get_task_struct(cpu_wakeup_task);
865                 }
866
867                 raw_spin_unlock_irqrestore(&wakeup_lock, flags);
868         }
869 }
870
871 static notrace void probe_wakeup_latency_hist_start(void *v,
872         struct task_struct *p, int success)
873 {
874         unsigned long flags;
875         struct task_struct *curr = current;
876         int cpu = task_cpu(p);
877         struct task_struct *cpu_wakeup_task;
878
879         raw_spin_lock_irqsave(&wakeup_lock, flags);
880
881         cpu_wakeup_task = per_cpu(wakeup_task, cpu);
882
883         if (wakeup_pid) {
884                 if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
885                     p->prio == curr->prio)
886                         per_cpu(wakeup_sharedprio, cpu) = 1;
887                 if (likely(wakeup_pid != task_pid_nr(p)))
888                         goto out;
889         } else {
890                 if (likely(!rt_task(p)) ||
891                     (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
892                     p->prio > curr->prio)
893                         goto out;
894                 if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
895                     p->prio == curr->prio)
896                         per_cpu(wakeup_sharedprio, cpu) = 1;
897         }
898
899         if (cpu_wakeup_task)
900                 put_task_struct(cpu_wakeup_task);
901         cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
902         get_task_struct(cpu_wakeup_task);
903         cpu_wakeup_task->preempt_timestamp_hist =
904                 ftrace_now(raw_smp_processor_id());
905 out:
906         raw_spin_unlock_irqrestore(&wakeup_lock, flags);
907 }
908
909 static notrace void probe_wakeup_latency_hist_stop(void *v,
910         struct task_struct *prev, struct task_struct *next)
911 {
912         unsigned long flags;
913         int cpu = task_cpu(next);
914         long latency;
915         cycle_t stop;
916         struct task_struct *cpu_wakeup_task;
917
918         raw_spin_lock_irqsave(&wakeup_lock, flags);
919
920         cpu_wakeup_task = per_cpu(wakeup_task, cpu);
921
922         if (cpu_wakeup_task == NULL)
923                 goto out;
924
925         /* Already running? */
926         if (unlikely(current == cpu_wakeup_task))
927                 goto out_reset;
928
929         if (next != cpu_wakeup_task) {
930                 if (next->prio < cpu_wakeup_task->prio)
931                         goto out_reset;
932
933                 if (next->prio == cpu_wakeup_task->prio)
934                         per_cpu(wakeup_sharedprio, cpu) = 1;
935
936                 goto out;
937         }
938
939         if (current->prio == cpu_wakeup_task->prio)
940                 per_cpu(wakeup_sharedprio, cpu) = 1;
941
942         /*
943          * The task we are waiting for is about to be switched to.
944          * Calculate latency and store it in histogram.
945          */
946         stop = ftrace_now(raw_smp_processor_id());
947
948         latency = ((long) (stop - next->preempt_timestamp_hist)) /
949             NSECS_PER_USECS;
950
951         if (per_cpu(wakeup_sharedprio, cpu)) {
952                 latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
953                     next);
954                 per_cpu(wakeup_sharedprio, cpu) = 0;
955         } else {
956                 latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
957 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
958                 if (timerandwakeup_enabled_data.enabled) {
959                         latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
960                             next->timer_offset + latency, next->timer_offset,
961                             stop, next);
962                 }
963 #endif
964         }
965
966 out_reset:
967 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
968         next->timer_offset = 0;
969 #endif
970         put_task_struct(cpu_wakeup_task);
971         per_cpu(wakeup_task, cpu) = NULL;
972 out:
973         raw_spin_unlock_irqrestore(&wakeup_lock, flags);
974 }
975 #endif
976
977 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
978 static notrace void probe_hrtimer_interrupt(void *v, int cpu,
979         long long latency_ns, struct task_struct *curr,
980         struct task_struct *task)
981 {
982         if (latency_ns <= 0 && task != NULL && rt_task(task) &&
983             (task->prio < curr->prio ||
984             (task->prio == curr->prio &&
985             !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
986                 long latency;
987                 cycle_t now;
988
989                 if (missed_timer_offsets_pid) {
990                         if (likely(missed_timer_offsets_pid !=
991                             task_pid_nr(task)))
992                                 return;
993                 }
994
995                 now = ftrace_now(cpu);
996                 latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
997                 latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
998                     task);
999 #ifdef CONFIG_WAKEUP_LATENCY_HIST
1000                 task->timer_offset = latency;
1001 #endif
1002         }
1003 }
1004 #endif
1005
1006 static __init int latency_hist_init(void)
1007 {
1008         struct dentry *latency_hist_root = NULL;
1009         struct dentry *dentry;
1010 #ifdef CONFIG_WAKEUP_LATENCY_HIST
1011         struct dentry *dentry_sharedprio;
1012 #endif
1013         struct dentry *entry;
1014         struct dentry *enable_root;
1015         int i = 0;
1016         struct hist_data *my_hist;
1017         char name[64];
1018         char *cpufmt = "CPU%d";
1019 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
1020         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
1021         char *cpufmt_maxlatproc = "max_latency-CPU%d";
1022         struct maxlatproc_data *mp = NULL;
1023 #endif
1024
1025         dentry = tracing_init_dentry();
1026         latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
1027         enable_root = debugfs_create_dir("enable", latency_hist_root);
1028
1029 #ifdef CONFIG_INTERRUPT_OFF_HIST
1030         dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
1031         for_each_possible_cpu(i) {
1032                 sprintf(name, cpufmt, i);
1033                 entry = debugfs_create_file(name, 0444, dentry,
1034                     &per_cpu(irqsoff_hist, i), &latency_hist_fops);
1035                 my_hist = &per_cpu(irqsoff_hist, i);
1036                 atomic_set(&my_hist->hist_mode, 1);
1037                 my_hist->min_lat = LONG_MAX;
1038         }
1039         entry = debugfs_create_file("reset", 0644, dentry,
1040             (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
1041 #endif
1042
1043 #ifdef CONFIG_PREEMPT_OFF_HIST
1044         dentry = debugfs_create_dir(preemptoff_hist_dir,
1045             latency_hist_root);
1046         for_each_possible_cpu(i) {
1047                 sprintf(name, cpufmt, i);
1048                 entry = debugfs_create_file(name, 0444, dentry,
1049                     &per_cpu(preemptoff_hist, i), &latency_hist_fops);
1050                 my_hist = &per_cpu(preemptoff_hist, i);
1051                 atomic_set(&my_hist->hist_mode, 1);
1052                 my_hist->min_lat = LONG_MAX;
1053         }
1054         entry = debugfs_create_file("reset", 0644, dentry,
1055             (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
1056 #endif
1057
1058 #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
1059         dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
1060             latency_hist_root);
1061         for_each_possible_cpu(i) {
1062                 sprintf(name, cpufmt, i);
1063                 entry = debugfs_create_file(name, 0444, dentry,
1064                     &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
1065                 my_hist = &per_cpu(preemptirqsoff_hist, i);
1066                 atomic_set(&my_hist->hist_mode, 1);
1067                 my_hist->min_lat = LONG_MAX;
1068         }
1069         entry = debugfs_create_file("reset", 0644, dentry,
1070             (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
1071 #endif
1072
1073 #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
1074         entry = debugfs_create_file("preemptirqsoff", 0644,
1075             enable_root, (void *)&preemptirqsoff_enabled_data,
1076             &enable_fops);
1077 #endif
1078
1079 #ifdef CONFIG_WAKEUP_LATENCY_HIST
1080         dentry = debugfs_create_dir(wakeup_latency_hist_dir,
1081             latency_hist_root);
1082         dentry_sharedprio = debugfs_create_dir(
1083             wakeup_latency_hist_dir_sharedprio, dentry);
1084         for_each_possible_cpu(i) {
1085                 sprintf(name, cpufmt, i);
1086
1087                 entry = debugfs_create_file(name, 0444, dentry,
1088                     &per_cpu(wakeup_latency_hist, i),
1089                     &latency_hist_fops);
1090                 my_hist = &per_cpu(wakeup_latency_hist, i);
1091                 atomic_set(&my_hist->hist_mode, 1);
1092                 my_hist->min_lat = LONG_MAX;
1093
1094                 entry = debugfs_create_file(name, 0444, dentry_sharedprio,
1095                     &per_cpu(wakeup_latency_hist_sharedprio, i),
1096                     &latency_hist_fops);
1097                 my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
1098                 atomic_set(&my_hist->hist_mode, 1);
1099                 my_hist->min_lat = LONG_MAX;
1100
1101                 sprintf(name, cpufmt_maxlatproc, i);
1102
1103                 mp = &per_cpu(wakeup_maxlatproc, i);
1104                 entry = debugfs_create_file(name, 0444, dentry, mp,
1105                     &maxlatproc_fops);
1106                 clear_maxlatprocdata(mp);
1107
1108                 mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
1109                 entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
1110                     &maxlatproc_fops);
1111                 clear_maxlatprocdata(mp);
1112         }
1113         entry = debugfs_create_file("pid", 0644, dentry,
1114             (void *)&wakeup_pid, &pid_fops);
1115         entry = debugfs_create_file("reset", 0644, dentry,
1116             (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
1117         entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
1118             (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
1119         entry = debugfs_create_file("wakeup", 0644,
1120             enable_root, (void *)&wakeup_latency_enabled_data,
1121             &enable_fops);
1122 #endif
1123
1124 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
1125         dentry = debugfs_create_dir(missed_timer_offsets_dir,
1126             latency_hist_root);
1127         for_each_possible_cpu(i) {
1128                 sprintf(name, cpufmt, i);
1129                 entry = debugfs_create_file(name, 0444, dentry,
1130                     &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
1131                 my_hist = &per_cpu(missed_timer_offsets, i);
1132                 atomic_set(&my_hist->hist_mode, 1);
1133                 my_hist->min_lat = LONG_MAX;
1134
1135                 sprintf(name, cpufmt_maxlatproc, i);
1136                 mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
1137                 entry = debugfs_create_file(name, 0444, dentry, mp,
1138                     &maxlatproc_fops);
1139                 clear_maxlatprocdata(mp);
1140         }
1141         entry = debugfs_create_file("pid", 0644, dentry,
1142             (void *)&missed_timer_offsets_pid, &pid_fops);
1143         entry = debugfs_create_file("reset", 0644, dentry,
1144             (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
1145         entry = debugfs_create_file("missed_timer_offsets", 0644,
1146             enable_root, (void *)&missed_timer_offsets_enabled_data,
1147             &enable_fops);
1148 #endif
1149
1150 #if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
1151         defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
1152         dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
1153             latency_hist_root);
1154         for_each_possible_cpu(i) {
1155                 sprintf(name, cpufmt, i);
1156                 entry = debugfs_create_file(name, 0444, dentry,
1157                     &per_cpu(timerandwakeup_latency_hist, i),
1158                     &latency_hist_fops);
1159                 my_hist = &per_cpu(timerandwakeup_latency_hist, i);
1160                 atomic_set(&my_hist->hist_mode, 1);
1161                 my_hist->min_lat = LONG_MAX;
1162
1163                 sprintf(name, cpufmt_maxlatproc, i);
1164                 mp = &per_cpu(timerandwakeup_maxlatproc, i);
1165                 entry = debugfs_create_file(name, 0444, dentry, mp,
1166                     &maxlatproc_fops);
1167                 clear_maxlatprocdata(mp);
1168         }
1169         entry = debugfs_create_file("reset", 0644, dentry,
1170             (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
1171         entry = debugfs_create_file("timerandwakeup", 0644,
1172             enable_root, (void *)&timerandwakeup_enabled_data,
1173             &enable_fops);
1174 #endif
1175         return 0;
1176 }
1177
1178 device_initcall(latency_hist_init);