2 * thread-stack.c: Synthesize a thread's stack using call / return events
3 * Copyright (c) 2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/rbtree.h>
17 #include <linux/list.h>
25 #include "thread-stack.h"
27 #define CALL_PATH_BLOCK_SHIFT 8
28 #define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT)
29 #define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1)
31 struct call_path_block {
32 struct call_path cp[CALL_PATH_BLOCK_SIZE];
33 struct list_head node;
37 * struct call_path_root - root of all call paths.
38 * @call_path: root call path
39 * @blocks: list of blocks to store call paths
40 * @next: next free space
41 * @sz: number of spaces
43 struct call_path_root {
44 struct call_path call_path;
45 struct list_head blocks;
51 * struct call_return_processor - provides a call-back to consume call-return
53 * @cpr: call path root
54 * @process: call-back that accepts call/return information
55 * @data: anonymous data for call-back
57 struct call_return_processor {
58 struct call_path_root *cpr;
59 int (*process)(struct call_return *cr, void *data);
63 #define STACK_GROWTH 2048
66 * struct thread_stack_entry - thread stack entry.
67 * @ret_addr: return address
68 * @timestamp: timestamp (if known)
69 * @ref: external reference (e.g. db_id of sample)
70 * @branch_count: the branch count when the entry was created
72 * @no_call: a 'call' was not seen
74 struct thread_stack_entry {
84 * struct thread_stack - thread stack constructed from 'call' and 'return'
86 * @stack: array that holds the stack
87 * @cnt: number of entries in the stack
88 * @sz: current maximum stack size
89 * @trace_nr: current trace number
90 * @branch_count: running branch count
91 * @kernel_start: kernel start address
92 * @last_time: last timestamp
93 * @crp: call/return processor
97 struct thread_stack_entry *stack;
104 struct call_return_processor *crp;
108 static int thread_stack__grow(struct thread_stack *ts)
110 struct thread_stack_entry *new_stack;
113 new_sz = ts->sz + STACK_GROWTH;
114 sz = new_sz * sizeof(struct thread_stack_entry);
116 new_stack = realloc(ts->stack, sz);
120 ts->stack = new_stack;
126 static struct thread_stack *thread_stack__new(struct thread *thread,
127 struct call_return_processor *crp)
129 struct thread_stack *ts;
131 ts = zalloc(sizeof(struct thread_stack));
135 if (thread_stack__grow(ts)) {
140 if (thread->mg && thread->mg->machine)
141 ts->kernel_start = machine__kernel_start(thread->mg->machine);
143 ts->kernel_start = 1ULL << 63;
149 static int thread_stack__push(struct thread_stack *ts, u64 ret_addr)
153 if (ts->cnt == ts->sz) {
154 err = thread_stack__grow(ts);
156 pr_warning("Out of memory: discarding thread stack\n");
161 ts->stack[ts->cnt++].ret_addr = ret_addr;
166 static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
171 * In some cases there may be functions which are not seen to return.
172 * For example when setjmp / longjmp has been used. Or the perf context
173 * switch in the kernel which doesn't stop and start tracing in exactly
174 * the same code path. When that happens the return address will be
175 * further down the stack. If the return address is not found at all,
176 * we assume the opposite (i.e. this is a return for a call that wasn't
177 * seen for some reason) and leave the stack alone.
179 for (i = ts->cnt; i; ) {
180 if (ts->stack[--i].ret_addr == ret_addr) {
187 static bool thread_stack__in_kernel(struct thread_stack *ts)
192 return ts->stack[ts->cnt - 1].cp->in_kernel;
195 static int thread_stack__call_return(struct thread *thread,
196 struct thread_stack *ts, size_t idx,
197 u64 timestamp, u64 ref, bool no_return)
199 struct call_return_processor *crp = ts->crp;
200 struct thread_stack_entry *tse;
201 struct call_return cr = {
207 tse = &ts->stack[idx];
209 cr.call_time = tse->timestamp;
210 cr.return_time = timestamp;
211 cr.branch_count = ts->branch_count - tse->branch_count;
212 cr.call_ref = tse->ref;
215 cr.flags |= CALL_RETURN_NO_CALL;
217 cr.flags |= CALL_RETURN_NO_RETURN;
219 return crp->process(&cr, crp->data);
222 static int thread_stack__flush(struct thread *thread, struct thread_stack *ts)
224 struct call_return_processor *crp = ts->crp;
233 err = thread_stack__call_return(thread, ts, --ts->cnt,
234 ts->last_time, 0, true);
236 pr_err("Error flushing thread stack!\n");
245 int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
246 u64 to_ip, u16 insn_len, u64 trace_nr)
252 thread->ts = thread_stack__new(thread, NULL);
254 pr_warning("Out of memory: no thread stack\n");
257 thread->ts->trace_nr = trace_nr;
261 * When the trace is discontinuous, the trace_nr changes. In that case
262 * the stack might be completely invalid. Better to report nothing than
263 * to report something misleading, so flush the stack.
265 if (trace_nr != thread->ts->trace_nr) {
266 if (thread->ts->trace_nr)
267 thread_stack__flush(thread, thread->ts);
268 thread->ts->trace_nr = trace_nr;
271 /* Stop here if thread_stack__process() is in use */
275 if (flags & PERF_IP_FLAG_CALL) {
280 ret_addr = from_ip + insn_len;
281 if (ret_addr == to_ip)
282 return 0; /* Zero-length calls are excluded */
283 return thread_stack__push(thread->ts, ret_addr);
284 } else if (flags & PERF_IP_FLAG_RETURN) {
287 thread_stack__pop(thread->ts, to_ip);
293 void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
295 if (!thread || !thread->ts)
298 if (trace_nr != thread->ts->trace_nr) {
299 if (thread->ts->trace_nr)
300 thread_stack__flush(thread, thread->ts);
301 thread->ts->trace_nr = trace_nr;
305 void thread_stack__free(struct thread *thread)
308 thread_stack__flush(thread, thread->ts);
309 zfree(&thread->ts->stack);
314 void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
319 if (!thread || !thread->ts)
322 chain->nr = min(sz, thread->ts->cnt + 1);
326 for (i = 1; i < chain->nr; i++)
327 chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
330 static void call_path__init(struct call_path *cp, struct call_path *parent,
331 struct symbol *sym, u64 ip, bool in_kernel)
335 cp->ip = sym ? 0 : ip;
337 cp->in_kernel = in_kernel;
338 RB_CLEAR_NODE(&cp->rb_node);
339 cp->children = RB_ROOT;
342 static struct call_path_root *call_path_root__new(void)
344 struct call_path_root *cpr;
346 cpr = zalloc(sizeof(struct call_path_root));
349 call_path__init(&cpr->call_path, NULL, NULL, 0, false);
350 INIT_LIST_HEAD(&cpr->blocks);
354 static void call_path_root__free(struct call_path_root *cpr)
356 struct call_path_block *pos, *n;
358 list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
359 list_del(&pos->node);
365 static struct call_path *call_path__new(struct call_path_root *cpr,
366 struct call_path *parent,
367 struct symbol *sym, u64 ip,
370 struct call_path_block *cpb;
371 struct call_path *cp;
374 if (cpr->next < cpr->sz) {
375 cpb = list_last_entry(&cpr->blocks, struct call_path_block,
378 cpb = zalloc(sizeof(struct call_path_block));
381 list_add_tail(&cpb->node, &cpr->blocks);
382 cpr->sz += CALL_PATH_BLOCK_SIZE;
385 n = cpr->next++ & CALL_PATH_BLOCK_MASK;
388 call_path__init(cp, parent, sym, ip, in_kernel);
393 static struct call_path *call_path__findnew(struct call_path_root *cpr,
394 struct call_path *parent,
395 struct symbol *sym, u64 ip, u64 ks)
398 struct rb_node *node_parent = NULL;
399 struct call_path *cp;
400 bool in_kernel = ip >= ks;
406 return call_path__new(cpr, parent, sym, ip, in_kernel);
408 p = &parent->children.rb_node;
411 cp = rb_entry(node_parent, struct call_path, rb_node);
413 if (cp->sym == sym && cp->ip == ip)
416 if (sym < cp->sym || (sym == cp->sym && ip < cp->ip))
422 cp = call_path__new(cpr, parent, sym, ip, in_kernel);
426 rb_link_node(&cp->rb_node, node_parent, p);
427 rb_insert_color(&cp->rb_node, &parent->children);
432 struct call_return_processor *
433 call_return_processor__new(int (*process)(struct call_return *cr, void *data),
436 struct call_return_processor *crp;
438 crp = zalloc(sizeof(struct call_return_processor));
441 crp->cpr = call_path_root__new();
444 crp->process = process;
453 void call_return_processor__free(struct call_return_processor *crp)
456 call_path_root__free(crp->cpr);
461 static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
462 u64 timestamp, u64 ref, struct call_path *cp,
465 struct thread_stack_entry *tse;
468 if (ts->cnt == ts->sz) {
469 err = thread_stack__grow(ts);
474 tse = &ts->stack[ts->cnt++];
475 tse->ret_addr = ret_addr;
476 tse->timestamp = timestamp;
478 tse->branch_count = ts->branch_count;
480 tse->no_call = no_call;
485 static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
486 u64 ret_addr, u64 timestamp, u64 ref,
495 struct thread_stack_entry *tse = &ts->stack[0];
497 if (tse->cp->sym == sym)
498 return thread_stack__call_return(thread, ts, --ts->cnt,
499 timestamp, ref, false);
502 if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
503 return thread_stack__call_return(thread, ts, --ts->cnt,
504 timestamp, ref, false);
506 size_t i = ts->cnt - 1;
509 if (ts->stack[i].ret_addr != ret_addr)
512 while (ts->cnt > i) {
513 err = thread_stack__call_return(thread, ts,
520 return thread_stack__call_return(thread, ts, --ts->cnt,
521 timestamp, ref, false);
528 static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts,
529 struct perf_sample *sample,
530 struct addr_location *from_al,
531 struct addr_location *to_al, u64 ref)
533 struct call_path_root *cpr = ts->crp->cpr;
534 struct call_path *cp;
541 } else if (sample->addr) {
548 cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
553 return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp,
557 static int thread_stack__no_call_return(struct thread *thread,
558 struct thread_stack *ts,
559 struct perf_sample *sample,
560 struct addr_location *from_al,
561 struct addr_location *to_al, u64 ref)
563 struct call_path_root *cpr = ts->crp->cpr;
564 struct call_path *cp, *parent;
565 u64 ks = ts->kernel_start;
568 if (sample->ip >= ks && sample->addr < ks) {
569 /* Return to userspace, so pop all kernel addresses */
570 while (thread_stack__in_kernel(ts)) {
571 err = thread_stack__call_return(thread, ts, --ts->cnt,
578 /* If the stack is empty, push the userspace address */
580 cp = call_path__findnew(cpr, &cpr->call_path,
581 to_al->sym, sample->addr,
585 return thread_stack__push_cp(ts, 0, sample->time, ref,
588 } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
589 /* Return to userspace, so pop all kernel addresses */
590 while (thread_stack__in_kernel(ts)) {
591 err = thread_stack__call_return(thread, ts, --ts->cnt,
600 parent = ts->stack[ts->cnt - 1].cp;
602 parent = &cpr->call_path;
604 /* This 'return' had no 'call', so push and pop top of stack */
605 cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
610 err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
615 return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
619 static int thread_stack__trace_begin(struct thread *thread,
620 struct thread_stack *ts, u64 timestamp,
623 struct thread_stack_entry *tse;
630 tse = &ts->stack[ts->cnt - 1];
631 if (tse->cp->sym == NULL && tse->cp->ip == 0) {
632 err = thread_stack__call_return(thread, ts, --ts->cnt,
633 timestamp, ref, false);
641 static int thread_stack__trace_end(struct thread_stack *ts,
642 struct perf_sample *sample, u64 ref)
644 struct call_path_root *cpr = ts->crp->cpr;
645 struct call_path *cp;
648 /* No point having 'trace end' on the bottom of the stack */
649 if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
652 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
657 ret_addr = sample->ip + sample->insn_len;
659 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
663 int thread_stack__process(struct thread *thread, struct comm *comm,
664 struct perf_sample *sample,
665 struct addr_location *from_al,
666 struct addr_location *to_al, u64 ref,
667 struct call_return_processor *crp)
669 struct thread_stack *ts = thread->ts;
674 /* Supersede thread_stack__event() */
675 thread_stack__free(thread);
676 thread->ts = thread_stack__new(thread, crp);
683 thread->ts = thread_stack__new(thread, crp);
690 /* Flush stack on exec */
691 if (ts->comm != comm && thread->pid_ == thread->tid) {
692 err = thread_stack__flush(thread, ts);
698 /* If the stack is empty, put the current symbol on the stack */
700 err = thread_stack__bottom(thread, ts, sample, from_al, to_al,
706 ts->branch_count += 1;
707 ts->last_time = sample->time;
709 if (sample->flags & PERF_IP_FLAG_CALL) {
710 struct call_path_root *cpr = ts->crp->cpr;
711 struct call_path *cp;
714 if (!sample->ip || !sample->addr)
717 ret_addr = sample->ip + sample->insn_len;
718 if (ret_addr == sample->addr)
719 return 0; /* Zero-length calls are excluded */
721 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
722 to_al->sym, sample->addr,
726 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
728 } else if (sample->flags & PERF_IP_FLAG_RETURN) {
729 if (!sample->ip || !sample->addr)
732 err = thread_stack__pop_cp(thread, ts, sample->addr,
733 sample->time, ref, from_al->sym);
737 err = thread_stack__no_call_return(thread, ts, sample,
738 from_al, to_al, ref);
740 } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
741 err = thread_stack__trace_begin(thread, ts, sample->time, ref);
742 } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
743 err = thread_stack__trace_end(ts, sample, ref);