2 * kernel/trace/latency_hist.c
4 * Add support for histograms of preemption-off latency and
5 * interrupt-off latency and wakeup latency, it depends on
6 * Real-Time Preemption Support.
8 * Copyright (C) 2005 MontaVista Software, Inc.
9 * Yi Yang <yyang@ch.mvista.com>
11 * Converted to work with the new latency tracer.
12 * Copyright (C) 2008 Red Hat, Inc.
13 * Steven Rostedt <srostedt@redhat.com>
16 #include <linux/module.h>
17 #include <linux/debugfs.h>
18 #include <linux/seq_file.h>
19 #include <linux/percpu.h>
20 #include <linux/kallsyms.h>
21 #include <linux/uaccess.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/slab.h>
25 #include <linux/atomic.h>
26 #include <asm/div64.h>
29 #include <trace/events/sched.h>
31 #define NSECS_PER_USECS 1000L
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/hist.h>
39 PREEMPTIRQSOFF_LATENCY,
41 WAKEUP_LATENCY_SHAREDPRIO,
43 TIMERANDWAKEUP_LATENCY,
47 #define MAX_ENTRY_NUM 10240
50 atomic_t hist_mode; /* 0 log, 1 don't log */
51 long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
54 unsigned long long below_hist_bound_samples;
55 unsigned long long above_hist_bound_samples;
56 long long accumulate_lat;
57 unsigned long long total_samples;
58 unsigned long long hist_array[MAX_ENTRY_NUM];
66 static char *latency_hist_dir_root = "latency_hist";
68 #ifdef CONFIG_INTERRUPT_OFF_HIST
69 static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
70 static char *irqsoff_hist_dir = "irqsoff";
71 static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
72 static DEFINE_PER_CPU(int, hist_irqsoff_counting);
75 #ifdef CONFIG_PREEMPT_OFF_HIST
76 static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
77 static char *preemptoff_hist_dir = "preemptoff";
78 static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
79 static DEFINE_PER_CPU(int, hist_preemptoff_counting);
82 #if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
83 static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
84 static char *preemptirqsoff_hist_dir = "preemptirqsoff";
85 static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
86 static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
89 #if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
90 static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
91 static struct enable_data preemptirqsoff_enabled_data = {
92 .latency_type = PREEMPTIRQSOFF_LATENCY,
97 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
98 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
99 struct maxlatproc_data {
100 char comm[FIELD_SIZEOF(struct task_struct, comm)];
101 char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
112 #ifdef CONFIG_WAKEUP_LATENCY_HIST
113 static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
114 static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
115 static char *wakeup_latency_hist_dir = "wakeup";
116 static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
117 static notrace void probe_wakeup_latency_hist_start(void *v,
118 struct task_struct *p, int success);
119 static notrace void probe_wakeup_latency_hist_stop(void *v,
120 struct task_struct *prev, struct task_struct *next);
121 static notrace void probe_sched_migrate_task(void *,
122 struct task_struct *task, int cpu);
123 static struct enable_data wakeup_latency_enabled_data = {
124 .latency_type = WAKEUP_LATENCY,
127 static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
128 static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
129 static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
130 static DEFINE_PER_CPU(int, wakeup_sharedprio);
131 static unsigned long wakeup_pid;
134 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
135 static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
136 static char *missed_timer_offsets_dir = "missed_timer_offsets";
137 static notrace void probe_hrtimer_interrupt(void *v, int cpu,
138 long long offset, struct task_struct *curr, struct task_struct *task);
139 static struct enable_data missed_timer_offsets_enabled_data = {
140 .latency_type = MISSED_TIMER_OFFSETS,
143 static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
144 static unsigned long missed_timer_offsets_pid;
147 #if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
148 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
149 static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
150 static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
151 static struct enable_data timerandwakeup_enabled_data = {
152 .latency_type = TIMERANDWAKEUP_LATENCY,
155 static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
158 void notrace latency_hist(int latency_type, int cpu, long latency,
159 long timeroffset, cycle_t stop,
160 struct task_struct *p)
162 struct hist_data *my_hist;
163 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
164 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
165 struct maxlatproc_data *mp = NULL;
168 if (!cpu_possible(cpu) || latency_type < 0 ||
169 latency_type >= MAX_LATENCY_TYPE)
172 switch (latency_type) {
173 #ifdef CONFIG_INTERRUPT_OFF_HIST
174 case IRQSOFF_LATENCY:
175 my_hist = &per_cpu(irqsoff_hist, cpu);
178 #ifdef CONFIG_PREEMPT_OFF_HIST
179 case PREEMPTOFF_LATENCY:
180 my_hist = &per_cpu(preemptoff_hist, cpu);
183 #if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
184 case PREEMPTIRQSOFF_LATENCY:
185 my_hist = &per_cpu(preemptirqsoff_hist, cpu);
188 #ifdef CONFIG_WAKEUP_LATENCY_HIST
190 my_hist = &per_cpu(wakeup_latency_hist, cpu);
191 mp = &per_cpu(wakeup_maxlatproc, cpu);
193 case WAKEUP_LATENCY_SHAREDPRIO:
194 my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
195 mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
198 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
199 case MISSED_TIMER_OFFSETS:
200 my_hist = &per_cpu(missed_timer_offsets, cpu);
201 mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
204 #if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
205 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
206 case TIMERANDWAKEUP_LATENCY:
207 my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
208 mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
216 latency += my_hist->offset;
218 if (atomic_read(&my_hist->hist_mode) == 0)
221 if (latency < 0 || latency >= MAX_ENTRY_NUM) {
223 my_hist->below_hist_bound_samples++;
225 my_hist->above_hist_bound_samples++;
227 my_hist->hist_array[latency]++;
229 if (unlikely(latency > my_hist->max_lat ||
230 my_hist->min_lat == LONG_MAX)) {
231 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
232 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
233 if (latency_type == WAKEUP_LATENCY ||
234 latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
235 latency_type == MISSED_TIMER_OFFSETS ||
236 latency_type == TIMERANDWAKEUP_LATENCY) {
237 strncpy(mp->comm, p->comm, sizeof(mp->comm));
238 strncpy(mp->current_comm, current->comm,
239 sizeof(mp->current_comm));
240 mp->pid = task_pid_nr(p);
241 mp->current_pid = task_pid_nr(current);
243 mp->current_prio = current->prio;
244 mp->latency = latency;
245 mp->timeroffset = timeroffset;
246 mp->timestamp = stop;
249 my_hist->max_lat = latency;
251 if (unlikely(latency < my_hist->min_lat))
252 my_hist->min_lat = latency;
253 my_hist->total_samples++;
254 my_hist->accumulate_lat += latency;
257 static void *l_start(struct seq_file *m, loff_t *pos)
259 loff_t *index_ptr = NULL;
261 struct hist_data *my_hist = m->private;
264 char minstr[32], avgstr[32], maxstr[32];
266 atomic_dec(&my_hist->hist_mode);
268 if (likely(my_hist->total_samples)) {
269 long avg = (long) div64_s64(my_hist->accumulate_lat,
270 my_hist->total_samples);
271 snprintf(minstr, sizeof(minstr), "%ld",
272 my_hist->min_lat - my_hist->offset);
273 snprintf(avgstr, sizeof(avgstr), "%ld",
274 avg - my_hist->offset);
275 snprintf(maxstr, sizeof(maxstr), "%ld",
276 my_hist->max_lat - my_hist->offset);
278 strcpy(minstr, "<undef>");
279 strcpy(avgstr, minstr);
280 strcpy(maxstr, minstr);
283 seq_printf(m, "#Minimum latency: %s microseconds\n"
284 "#Average latency: %s microseconds\n"
285 "#Maximum latency: %s microseconds\n"
286 "#Total samples: %llu\n"
287 "#There are %llu samples lower than %ld"
289 "#There are %llu samples greater or equal"
290 " than %ld microseconds.\n"
292 minstr, avgstr, maxstr,
293 my_hist->total_samples,
294 my_hist->below_hist_bound_samples,
296 my_hist->above_hist_bound_samples,
297 MAX_ENTRY_NUM - my_hist->offset,
300 if (index < MAX_ENTRY_NUM) {
301 index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
309 static void *l_next(struct seq_file *m, void *p, loff_t *pos)
311 loff_t *index_ptr = p;
312 struct hist_data *my_hist = m->private;
314 if (++*pos >= MAX_ENTRY_NUM) {
315 atomic_inc(&my_hist->hist_mode);
322 static void l_stop(struct seq_file *m, void *p)
327 static int l_show(struct seq_file *m, void *p)
329 int index = *(loff_t *) p;
330 struct hist_data *my_hist = m->private;
332 seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
333 my_hist->hist_array[index]);
337 static const struct seq_operations latency_hist_seq_op = {
344 static int latency_hist_open(struct inode *inode, struct file *file)
348 ret = seq_open(file, &latency_hist_seq_op);
350 struct seq_file *seq = file->private_data;
351 seq->private = inode->i_private;
356 static const struct file_operations latency_hist_fops = {
357 .open = latency_hist_open,
360 .release = seq_release,
363 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
364 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
365 static void clear_maxlatprocdata(struct maxlatproc_data *mp)
367 mp->comm[0] = mp->current_comm[0] = '\0';
368 mp->prio = mp->current_prio = mp->pid = mp->current_pid =
369 mp->latency = mp->timeroffset = -1;
374 static void hist_reset(struct hist_data *hist)
376 atomic_dec(&hist->hist_mode);
378 memset(hist->hist_array, 0, sizeof(hist->hist_array));
379 hist->below_hist_bound_samples = 0ULL;
380 hist->above_hist_bound_samples = 0ULL;
381 hist->min_lat = LONG_MAX;
382 hist->max_lat = LONG_MIN;
383 hist->total_samples = 0ULL;
384 hist->accumulate_lat = 0LL;
386 atomic_inc(&hist->hist_mode);
390 latency_hist_reset(struct file *file, const char __user *a,
391 size_t size, loff_t *off)
394 struct hist_data *hist = NULL;
395 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
396 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
397 struct maxlatproc_data *mp = NULL;
399 off_t latency_type = (off_t) file->private_data;
401 for_each_online_cpu(cpu) {
403 switch (latency_type) {
404 #ifdef CONFIG_PREEMPT_OFF_HIST
405 case PREEMPTOFF_LATENCY:
406 hist = &per_cpu(preemptoff_hist, cpu);
409 #ifdef CONFIG_INTERRUPT_OFF_HIST
410 case IRQSOFF_LATENCY:
411 hist = &per_cpu(irqsoff_hist, cpu);
414 #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
415 case PREEMPTIRQSOFF_LATENCY:
416 hist = &per_cpu(preemptirqsoff_hist, cpu);
419 #ifdef CONFIG_WAKEUP_LATENCY_HIST
421 hist = &per_cpu(wakeup_latency_hist, cpu);
422 mp = &per_cpu(wakeup_maxlatproc, cpu);
424 case WAKEUP_LATENCY_SHAREDPRIO:
425 hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
426 mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
429 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
430 case MISSED_TIMER_OFFSETS:
431 hist = &per_cpu(missed_timer_offsets, cpu);
432 mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
435 #if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
436 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
437 case TIMERANDWAKEUP_LATENCY:
438 hist = &per_cpu(timerandwakeup_latency_hist, cpu);
439 mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
445 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
446 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
447 if (latency_type == WAKEUP_LATENCY ||
448 latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
449 latency_type == MISSED_TIMER_OFFSETS ||
450 latency_type == TIMERANDWAKEUP_LATENCY)
451 clear_maxlatprocdata(mp);
458 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
459 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
461 show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
465 unsigned long *this_pid = file->private_data;
467 r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
468 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
471 static ssize_t do_pid(struct file *file, const char __user *ubuf,
472 size_t cnt, loff_t *ppos)
476 unsigned long *this_pid = file->private_data;
478 if (cnt >= sizeof(buf))
481 if (copy_from_user(&buf, ubuf, cnt))
486 if (kstrtoul(buf, 10, &pid))
495 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
496 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
498 show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
501 struct maxlatproc_data *mp = file->private_data;
502 int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
503 unsigned long long t;
504 unsigned long usecs, secs;
507 if (mp->pid == -1 || mp->current_pid == -1) {
509 return simple_read_from_buffer(ubuf, cnt, ppos, buf,
513 buf = kmalloc(strmaxlen, GFP_KERNEL);
517 t = ns2usecs(mp->timestamp);
518 usecs = do_div(t, USEC_PER_SEC);
519 secs = (unsigned long) t;
520 r = snprintf(buf, strmaxlen,
521 "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
522 MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
523 mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
525 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
532 show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
535 struct enable_data *ed = file->private_data;
538 r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
539 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
543 do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
547 struct enable_data *ed = file->private_data;
549 if (cnt >= sizeof(buf))
552 if (copy_from_user(&buf, ubuf, cnt))
557 if (kstrtoul(buf, 10, &enable))
560 if ((enable && ed->enabled) || (!enable && !ed->enabled))
566 switch (ed->latency_type) {
567 #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
568 case PREEMPTIRQSOFF_LATENCY:
569 ret = register_trace_preemptirqsoff_hist(
570 probe_preemptirqsoff_hist, NULL);
572 pr_info("wakeup trace: Couldn't assign "
573 "probe_preemptirqsoff_hist "
574 "to trace_preemptirqsoff_hist\n");
579 #ifdef CONFIG_WAKEUP_LATENCY_HIST
581 ret = register_trace_sched_wakeup(
582 probe_wakeup_latency_hist_start, NULL);
584 pr_info("wakeup trace: Couldn't assign "
585 "probe_wakeup_latency_hist_start "
586 "to trace_sched_wakeup\n");
589 ret = register_trace_sched_wakeup_new(
590 probe_wakeup_latency_hist_start, NULL);
592 pr_info("wakeup trace: Couldn't assign "
593 "probe_wakeup_latency_hist_start "
594 "to trace_sched_wakeup_new\n");
595 unregister_trace_sched_wakeup(
596 probe_wakeup_latency_hist_start, NULL);
599 ret = register_trace_sched_switch(
600 probe_wakeup_latency_hist_stop, NULL);
602 pr_info("wakeup trace: Couldn't assign "
603 "probe_wakeup_latency_hist_stop "
604 "to trace_sched_switch\n");
605 unregister_trace_sched_wakeup(
606 probe_wakeup_latency_hist_start, NULL);
607 unregister_trace_sched_wakeup_new(
608 probe_wakeup_latency_hist_start, NULL);
611 ret = register_trace_sched_migrate_task(
612 probe_sched_migrate_task, NULL);
614 pr_info("wakeup trace: Couldn't assign "
615 "probe_sched_migrate_task "
616 "to trace_sched_migrate_task\n");
617 unregister_trace_sched_wakeup(
618 probe_wakeup_latency_hist_start, NULL);
619 unregister_trace_sched_wakeup_new(
620 probe_wakeup_latency_hist_start, NULL);
621 unregister_trace_sched_switch(
622 probe_wakeup_latency_hist_stop, NULL);
627 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
628 case MISSED_TIMER_OFFSETS:
629 ret = register_trace_hrtimer_interrupt(
630 probe_hrtimer_interrupt, NULL);
632 pr_info("wakeup trace: Couldn't assign "
633 "probe_hrtimer_interrupt "
634 "to trace_hrtimer_interrupt\n");
639 #if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
640 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
641 case TIMERANDWAKEUP_LATENCY:
642 if (!wakeup_latency_enabled_data.enabled ||
643 !missed_timer_offsets_enabled_data.enabled)
651 switch (ed->latency_type) {
652 #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
653 case PREEMPTIRQSOFF_LATENCY:
657 unregister_trace_preemptirqsoff_hist(
658 probe_preemptirqsoff_hist, NULL);
659 for_each_online_cpu(cpu) {
660 #ifdef CONFIG_INTERRUPT_OFF_HIST
661 per_cpu(hist_irqsoff_counting,
664 #ifdef CONFIG_PREEMPT_OFF_HIST
665 per_cpu(hist_preemptoff_counting,
668 #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
669 per_cpu(hist_preemptirqsoff_counting,
676 #ifdef CONFIG_WAKEUP_LATENCY_HIST
681 unregister_trace_sched_wakeup(
682 probe_wakeup_latency_hist_start, NULL);
683 unregister_trace_sched_wakeup_new(
684 probe_wakeup_latency_hist_start, NULL);
685 unregister_trace_sched_switch(
686 probe_wakeup_latency_hist_stop, NULL);
687 unregister_trace_sched_migrate_task(
688 probe_sched_migrate_task, NULL);
690 for_each_online_cpu(cpu) {
691 per_cpu(wakeup_task, cpu) = NULL;
692 per_cpu(wakeup_sharedprio, cpu) = 0;
695 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
696 timerandwakeup_enabled_data.enabled = 0;
700 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
701 case MISSED_TIMER_OFFSETS:
702 unregister_trace_hrtimer_interrupt(
703 probe_hrtimer_interrupt, NULL);
704 #ifdef CONFIG_WAKEUP_LATENCY_HIST
705 timerandwakeup_enabled_data.enabled = 0;
713 ed->enabled = enable;
717 static const struct file_operations latency_hist_reset_fops = {
718 .open = tracing_open_generic,
719 .write = latency_hist_reset,
722 static const struct file_operations enable_fops = {
723 .open = tracing_open_generic,
728 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
729 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
730 static const struct file_operations pid_fops = {
731 .open = tracing_open_generic,
736 static const struct file_operations maxlatproc_fops = {
737 .open = tracing_open_generic,
738 .read = show_maxlatproc,
742 #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
743 static notrace void probe_preemptirqsoff_hist(void *v, int reason,
746 int cpu = raw_smp_processor_id();
750 cycle_t uninitialized_var(start);
752 if (!preempt_count() && !irqs_disabled())
755 #ifdef CONFIG_INTERRUPT_OFF_HIST
756 if ((reason == IRQS_OFF || reason == TRACE_START) &&
757 !per_cpu(hist_irqsoff_counting, cpu)) {
758 per_cpu(hist_irqsoff_counting, cpu) = 1;
759 start = ftrace_now(cpu);
761 per_cpu(hist_irqsoff_start, cpu) = start;
765 #ifdef CONFIG_PREEMPT_OFF_HIST
766 if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
767 !per_cpu(hist_preemptoff_counting, cpu)) {
768 per_cpu(hist_preemptoff_counting, cpu) = 1;
770 start = ftrace_now(cpu);
771 per_cpu(hist_preemptoff_start, cpu) = start;
775 #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
776 if (per_cpu(hist_irqsoff_counting, cpu) &&
777 per_cpu(hist_preemptoff_counting, cpu) &&
778 !per_cpu(hist_preemptirqsoff_counting, cpu)) {
779 per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
781 start = ftrace_now(cpu);
782 per_cpu(hist_preemptirqsoff_start, cpu) = start;
786 cycle_t uninitialized_var(stop);
788 #ifdef CONFIG_INTERRUPT_OFF_HIST
789 if ((reason == IRQS_ON || reason == TRACE_STOP) &&
790 per_cpu(hist_irqsoff_counting, cpu)) {
791 cycle_t start = per_cpu(hist_irqsoff_start, cpu);
793 stop = ftrace_now(cpu);
796 long latency = ((long) (stop - start)) /
799 latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
802 per_cpu(hist_irqsoff_counting, cpu) = 0;
806 #ifdef CONFIG_PREEMPT_OFF_HIST
807 if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
808 per_cpu(hist_preemptoff_counting, cpu)) {
809 cycle_t start = per_cpu(hist_preemptoff_start, cpu);
812 stop = ftrace_now(cpu);
814 long latency = ((long) (stop - start)) /
817 latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
820 per_cpu(hist_preemptoff_counting, cpu) = 0;
824 #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
825 if ((!per_cpu(hist_irqsoff_counting, cpu) ||
826 !per_cpu(hist_preemptoff_counting, cpu)) &&
827 per_cpu(hist_preemptirqsoff_counting, cpu)) {
828 cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
831 stop = ftrace_now(cpu);
833 long latency = ((long) (stop - start)) /
836 latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
837 latency, 0, stop, NULL);
839 per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
846 #ifdef CONFIG_WAKEUP_LATENCY_HIST
847 static DEFINE_RAW_SPINLOCK(wakeup_lock);
848 static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
851 int old_cpu = task_cpu(task);
853 if (cpu != old_cpu) {
855 struct task_struct *cpu_wakeup_task;
857 raw_spin_lock_irqsave(&wakeup_lock, flags);
859 cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
860 if (task == cpu_wakeup_task) {
861 put_task_struct(cpu_wakeup_task);
862 per_cpu(wakeup_task, old_cpu) = NULL;
863 cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
864 get_task_struct(cpu_wakeup_task);
867 raw_spin_unlock_irqrestore(&wakeup_lock, flags);
871 static notrace void probe_wakeup_latency_hist_start(void *v,
872 struct task_struct *p, int success)
875 struct task_struct *curr = current;
876 int cpu = task_cpu(p);
877 struct task_struct *cpu_wakeup_task;
879 raw_spin_lock_irqsave(&wakeup_lock, flags);
881 cpu_wakeup_task = per_cpu(wakeup_task, cpu);
884 if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
885 p->prio == curr->prio)
886 per_cpu(wakeup_sharedprio, cpu) = 1;
887 if (likely(wakeup_pid != task_pid_nr(p)))
890 if (likely(!rt_task(p)) ||
891 (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
892 p->prio > curr->prio)
894 if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
895 p->prio == curr->prio)
896 per_cpu(wakeup_sharedprio, cpu) = 1;
900 put_task_struct(cpu_wakeup_task);
901 cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
902 get_task_struct(cpu_wakeup_task);
903 cpu_wakeup_task->preempt_timestamp_hist =
904 ftrace_now(raw_smp_processor_id());
906 raw_spin_unlock_irqrestore(&wakeup_lock, flags);
909 static notrace void probe_wakeup_latency_hist_stop(void *v,
910 struct task_struct *prev, struct task_struct *next)
913 int cpu = task_cpu(next);
916 struct task_struct *cpu_wakeup_task;
918 raw_spin_lock_irqsave(&wakeup_lock, flags);
920 cpu_wakeup_task = per_cpu(wakeup_task, cpu);
922 if (cpu_wakeup_task == NULL)
925 /* Already running? */
926 if (unlikely(current == cpu_wakeup_task))
929 if (next != cpu_wakeup_task) {
930 if (next->prio < cpu_wakeup_task->prio)
933 if (next->prio == cpu_wakeup_task->prio)
934 per_cpu(wakeup_sharedprio, cpu) = 1;
939 if (current->prio == cpu_wakeup_task->prio)
940 per_cpu(wakeup_sharedprio, cpu) = 1;
943 * The task we are waiting for is about to be switched to.
944 * Calculate latency and store it in histogram.
946 stop = ftrace_now(raw_smp_processor_id());
948 latency = ((long) (stop - next->preempt_timestamp_hist)) /
951 if (per_cpu(wakeup_sharedprio, cpu)) {
952 latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
954 per_cpu(wakeup_sharedprio, cpu) = 0;
956 latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
957 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
958 if (timerandwakeup_enabled_data.enabled) {
959 latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
960 next->timer_offset + latency, next->timer_offset,
967 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
968 next->timer_offset = 0;
970 put_task_struct(cpu_wakeup_task);
971 per_cpu(wakeup_task, cpu) = NULL;
973 raw_spin_unlock_irqrestore(&wakeup_lock, flags);
977 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
978 static notrace void probe_hrtimer_interrupt(void *v, int cpu,
979 long long latency_ns, struct task_struct *curr,
980 struct task_struct *task)
982 if (latency_ns <= 0 && task != NULL && rt_task(task) &&
983 (task->prio < curr->prio ||
984 (task->prio == curr->prio &&
985 !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
989 if (missed_timer_offsets_pid) {
990 if (likely(missed_timer_offsets_pid !=
995 now = ftrace_now(cpu);
996 latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
997 latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
999 #ifdef CONFIG_WAKEUP_LATENCY_HIST
1000 task->timer_offset = latency;
1006 static __init int latency_hist_init(void)
1008 struct dentry *latency_hist_root = NULL;
1009 struct dentry *dentry;
1010 #ifdef CONFIG_WAKEUP_LATENCY_HIST
1011 struct dentry *dentry_sharedprio;
1013 struct dentry *entry;
1014 struct dentry *enable_root;
1016 struct hist_data *my_hist;
1018 char *cpufmt = "CPU%d";
1019 #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
1020 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
1021 char *cpufmt_maxlatproc = "max_latency-CPU%d";
1022 struct maxlatproc_data *mp = NULL;
1025 dentry = tracing_init_dentry();
1026 latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
1027 enable_root = debugfs_create_dir("enable", latency_hist_root);
1029 #ifdef CONFIG_INTERRUPT_OFF_HIST
1030 dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
1031 for_each_possible_cpu(i) {
1032 sprintf(name, cpufmt, i);
1033 entry = debugfs_create_file(name, 0444, dentry,
1034 &per_cpu(irqsoff_hist, i), &latency_hist_fops);
1035 my_hist = &per_cpu(irqsoff_hist, i);
1036 atomic_set(&my_hist->hist_mode, 1);
1037 my_hist->min_lat = LONG_MAX;
1039 entry = debugfs_create_file("reset", 0644, dentry,
1040 (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
1043 #ifdef CONFIG_PREEMPT_OFF_HIST
1044 dentry = debugfs_create_dir(preemptoff_hist_dir,
1046 for_each_possible_cpu(i) {
1047 sprintf(name, cpufmt, i);
1048 entry = debugfs_create_file(name, 0444, dentry,
1049 &per_cpu(preemptoff_hist, i), &latency_hist_fops);
1050 my_hist = &per_cpu(preemptoff_hist, i);
1051 atomic_set(&my_hist->hist_mode, 1);
1052 my_hist->min_lat = LONG_MAX;
1054 entry = debugfs_create_file("reset", 0644, dentry,
1055 (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
1058 #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
1059 dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
1061 for_each_possible_cpu(i) {
1062 sprintf(name, cpufmt, i);
1063 entry = debugfs_create_file(name, 0444, dentry,
1064 &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
1065 my_hist = &per_cpu(preemptirqsoff_hist, i);
1066 atomic_set(&my_hist->hist_mode, 1);
1067 my_hist->min_lat = LONG_MAX;
1069 entry = debugfs_create_file("reset", 0644, dentry,
1070 (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
1073 #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
1074 entry = debugfs_create_file("preemptirqsoff", 0644,
1075 enable_root, (void *)&preemptirqsoff_enabled_data,
1079 #ifdef CONFIG_WAKEUP_LATENCY_HIST
1080 dentry = debugfs_create_dir(wakeup_latency_hist_dir,
1082 dentry_sharedprio = debugfs_create_dir(
1083 wakeup_latency_hist_dir_sharedprio, dentry);
1084 for_each_possible_cpu(i) {
1085 sprintf(name, cpufmt, i);
1087 entry = debugfs_create_file(name, 0444, dentry,
1088 &per_cpu(wakeup_latency_hist, i),
1089 &latency_hist_fops);
1090 my_hist = &per_cpu(wakeup_latency_hist, i);
1091 atomic_set(&my_hist->hist_mode, 1);
1092 my_hist->min_lat = LONG_MAX;
1094 entry = debugfs_create_file(name, 0444, dentry_sharedprio,
1095 &per_cpu(wakeup_latency_hist_sharedprio, i),
1096 &latency_hist_fops);
1097 my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
1098 atomic_set(&my_hist->hist_mode, 1);
1099 my_hist->min_lat = LONG_MAX;
1101 sprintf(name, cpufmt_maxlatproc, i);
1103 mp = &per_cpu(wakeup_maxlatproc, i);
1104 entry = debugfs_create_file(name, 0444, dentry, mp,
1106 clear_maxlatprocdata(mp);
1108 mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
1109 entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
1111 clear_maxlatprocdata(mp);
1113 entry = debugfs_create_file("pid", 0644, dentry,
1114 (void *)&wakeup_pid, &pid_fops);
1115 entry = debugfs_create_file("reset", 0644, dentry,
1116 (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
1117 entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
1118 (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
1119 entry = debugfs_create_file("wakeup", 0644,
1120 enable_root, (void *)&wakeup_latency_enabled_data,
1124 #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
1125 dentry = debugfs_create_dir(missed_timer_offsets_dir,
1127 for_each_possible_cpu(i) {
1128 sprintf(name, cpufmt, i);
1129 entry = debugfs_create_file(name, 0444, dentry,
1130 &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
1131 my_hist = &per_cpu(missed_timer_offsets, i);
1132 atomic_set(&my_hist->hist_mode, 1);
1133 my_hist->min_lat = LONG_MAX;
1135 sprintf(name, cpufmt_maxlatproc, i);
1136 mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
1137 entry = debugfs_create_file(name, 0444, dentry, mp,
1139 clear_maxlatprocdata(mp);
1141 entry = debugfs_create_file("pid", 0644, dentry,
1142 (void *)&missed_timer_offsets_pid, &pid_fops);
1143 entry = debugfs_create_file("reset", 0644, dentry,
1144 (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
1145 entry = debugfs_create_file("missed_timer_offsets", 0644,
1146 enable_root, (void *)&missed_timer_offsets_enabled_data,
1150 #if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
1151 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
1152 dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
1154 for_each_possible_cpu(i) {
1155 sprintf(name, cpufmt, i);
1156 entry = debugfs_create_file(name, 0444, dentry,
1157 &per_cpu(timerandwakeup_latency_hist, i),
1158 &latency_hist_fops);
1159 my_hist = &per_cpu(timerandwakeup_latency_hist, i);
1160 atomic_set(&my_hist->hist_mode, 1);
1161 my_hist->min_lat = LONG_MAX;
1163 sprintf(name, cpufmt_maxlatproc, i);
1164 mp = &per_cpu(timerandwakeup_maxlatproc, i);
1165 entry = debugfs_create_file(name, 0444, dentry, mp,
1167 clear_maxlatprocdata(mp);
1169 entry = debugfs_create_file("reset", 0644, dentry,
1170 (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
1171 entry = debugfs_create_file("timerandwakeup", 0644,
1172 enable_root, (void *)&timerandwakeup_enabled_data,
1178 device_initcall(latency_hist_init);