Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / tools / perf / util / event.h
1 #ifndef __PERF_RECORD_H
2 #define __PERF_RECORD_H
3
4 #include <limits.h>
5 #include <stdio.h>
6
7 #include "../perf.h"
8 #include "map.h"
9 #include "build-id.h"
10 #include "perf_regs.h"
11
12 struct mmap_event {
13         struct perf_event_header header;
14         u32 pid, tid;
15         u64 start;
16         u64 len;
17         u64 pgoff;
18         char filename[PATH_MAX];
19 };
20
21 struct mmap2_event {
22         struct perf_event_header header;
23         u32 pid, tid;
24         u64 start;
25         u64 len;
26         u64 pgoff;
27         u32 maj;
28         u32 min;
29         u64 ino;
30         u64 ino_generation;
31         u32 prot;
32         u32 flags;
33         char filename[PATH_MAX];
34 };
35
36 struct comm_event {
37         struct perf_event_header header;
38         u32 pid, tid;
39         char comm[16];
40 };
41
42 struct fork_event {
43         struct perf_event_header header;
44         u32 pid, ppid;
45         u32 tid, ptid;
46         u64 time;
47 };
48
49 struct lost_event {
50         struct perf_event_header header;
51         u64 id;
52         u64 lost;
53 };
54
55 /*
56  * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
57  */
58 struct read_event {
59         struct perf_event_header header;
60         u32 pid, tid;
61         u64 value;
62         u64 time_enabled;
63         u64 time_running;
64         u64 id;
65 };
66
67 struct throttle_event {
68         struct perf_event_header header;
69         u64 time;
70         u64 id;
71         u64 stream_id;
72 };
73
74 #define PERF_SAMPLE_MASK                                \
75         (PERF_SAMPLE_IP | PERF_SAMPLE_TID |             \
76          PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR |          \
77         PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID |        \
78          PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD |         \
79          PERF_SAMPLE_IDENTIFIER)
80
81 /* perf sample has 16 bits size limit */
82 #define PERF_SAMPLE_MAX_SIZE (1 << 16)
83
84 struct sample_event {
85         struct perf_event_header        header;
86         u64 array[];
87 };
88
89 struct regs_dump {
90         u64 abi;
91         u64 mask;
92         u64 *regs;
93
94         /* Cached values/mask filled by first register access. */
95         u64 cache_regs[PERF_REGS_MAX];
96         u64 cache_mask;
97 };
98
99 struct stack_dump {
100         u16 offset;
101         u64 size;
102         char *data;
103 };
104
105 struct sample_read_value {
106         u64 value;
107         u64 id;
108 };
109
110 struct sample_read {
111         u64 time_enabled;
112         u64 time_running;
113         union {
114                 struct {
115                         u64 nr;
116                         struct sample_read_value *values;
117                 } group;
118                 struct sample_read_value one;
119         };
120 };
121
122 struct ip_callchain {
123         u64 nr;
124         u64 ips[0];
125 };
126
127 struct branch_flags {
128         u64 mispred:1;
129         u64 predicted:1;
130         u64 in_tx:1;
131         u64 abort:1;
132         u64 reserved:60;
133 };
134
135 struct branch_entry {
136         u64                     from;
137         u64                     to;
138         struct branch_flags     flags;
139 };
140
141 struct branch_stack {
142         u64                     nr;
143         struct branch_entry     entries[0];
144 };
145
146 enum {
147         PERF_IP_FLAG_BRANCH             = 1ULL << 0,
148         PERF_IP_FLAG_CALL               = 1ULL << 1,
149         PERF_IP_FLAG_RETURN             = 1ULL << 2,
150         PERF_IP_FLAG_CONDITIONAL        = 1ULL << 3,
151         PERF_IP_FLAG_SYSCALLRET         = 1ULL << 4,
152         PERF_IP_FLAG_ASYNC              = 1ULL << 5,
153         PERF_IP_FLAG_INTERRUPT          = 1ULL << 6,
154         PERF_IP_FLAG_TX_ABORT           = 1ULL << 7,
155         PERF_IP_FLAG_TRACE_BEGIN        = 1ULL << 8,
156         PERF_IP_FLAG_TRACE_END          = 1ULL << 9,
157         PERF_IP_FLAG_IN_TX              = 1ULL << 10,
158 };
159
160 #define PERF_BRANCH_MASK                (\
161         PERF_IP_FLAG_BRANCH             |\
162         PERF_IP_FLAG_CALL               |\
163         PERF_IP_FLAG_RETURN             |\
164         PERF_IP_FLAG_CONDITIONAL        |\
165         PERF_IP_FLAG_SYSCALLRET         |\
166         PERF_IP_FLAG_ASYNC              |\
167         PERF_IP_FLAG_INTERRUPT          |\
168         PERF_IP_FLAG_TX_ABORT           |\
169         PERF_IP_FLAG_TRACE_BEGIN        |\
170         PERF_IP_FLAG_TRACE_END)
171
172 struct perf_sample {
173         u64 ip;
174         u32 pid, tid;
175         u64 time;
176         u64 addr;
177         u64 id;
178         u64 stream_id;
179         u64 period;
180         u64 weight;
181         u64 transaction;
182         u32 cpu;
183         u32 raw_size;
184         u64 data_src;
185         u32 flags;
186         u16 insn_len;
187         void *raw_data;
188         struct ip_callchain *callchain;
189         struct branch_stack *branch_stack;
190         struct regs_dump  user_regs;
191         struct regs_dump  intr_regs;
192         struct stack_dump user_stack;
193         struct sample_read read;
194 };
195
196 #define PERF_MEM_DATA_SRC_NONE \
197         (PERF_MEM_S(OP, NA) |\
198          PERF_MEM_S(LVL, NA) |\
199          PERF_MEM_S(SNOOP, NA) |\
200          PERF_MEM_S(LOCK, NA) |\
201          PERF_MEM_S(TLB, NA))
202
203 struct build_id_event {
204         struct perf_event_header header;
205         pid_t                    pid;
206         u8                       build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
207         char                     filename[];
208 };
209
210 enum perf_user_event_type { /* above any possible kernel type */
211         PERF_RECORD_USER_TYPE_START             = 64,
212         PERF_RECORD_HEADER_ATTR                 = 64,
213         PERF_RECORD_HEADER_EVENT_TYPE           = 65, /* depreceated */
214         PERF_RECORD_HEADER_TRACING_DATA         = 66,
215         PERF_RECORD_HEADER_BUILD_ID             = 67,
216         PERF_RECORD_FINISHED_ROUND              = 68,
217         PERF_RECORD_ID_INDEX                    = 69,
218         PERF_RECORD_HEADER_MAX
219 };
220
221 /*
222  * The kernel collects the number of events it couldn't send in a stretch and
223  * when possible sends this number in a PERF_RECORD_LOST event. The number of
224  * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
225  * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
226  * the sum of all struct lost_event.lost fields reported.
227  *
228  * The total_period is needed because by default auto-freq is used, so
229  * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
230  * the total number of low level events, it is necessary to to sum all struct
231  * sample_event.period and stash the result in total_period.
232  */
233 struct events_stats {
234         u64 total_period;
235         u64 total_non_filtered_period;
236         u64 total_lost;
237         u64 total_invalid_chains;
238         u32 nr_events[PERF_RECORD_HEADER_MAX];
239         u32 nr_non_filtered_samples;
240         u32 nr_lost_warned;
241         u32 nr_unknown_events;
242         u32 nr_invalid_chains;
243         u32 nr_unknown_id;
244         u32 nr_unprocessable_samples;
245 };
246
247 struct attr_event {
248         struct perf_event_header header;
249         struct perf_event_attr attr;
250         u64 id[];
251 };
252
253 #define MAX_EVENT_NAME 64
254
255 struct perf_trace_event_type {
256         u64     event_id;
257         char    name[MAX_EVENT_NAME];
258 };
259
260 struct event_type_event {
261         struct perf_event_header header;
262         struct perf_trace_event_type event_type;
263 };
264
265 struct tracing_data_event {
266         struct perf_event_header header;
267         u32 size;
268 };
269
270 struct id_index_entry {
271         u64 id;
272         u64 idx;
273         u64 cpu;
274         u64 tid;
275 };
276
277 struct id_index_event {
278         struct perf_event_header header;
279         u64 nr;
280         struct id_index_entry entries[0];
281 };
282
283 union perf_event {
284         struct perf_event_header        header;
285         struct mmap_event               mmap;
286         struct mmap2_event              mmap2;
287         struct comm_event               comm;
288         struct fork_event               fork;
289         struct lost_event               lost;
290         struct read_event               read;
291         struct throttle_event           throttle;
292         struct sample_event             sample;
293         struct attr_event               attr;
294         struct event_type_event         event_type;
295         struct tracing_data_event       tracing_data;
296         struct build_id_event           build_id;
297         struct id_index_event           id_index;
298 };
299
300 void perf_event__print_totals(void);
301
302 struct perf_tool;
303 struct thread_map;
304
305 typedef int (*perf_event__handler_t)(struct perf_tool *tool,
306                                      union perf_event *event,
307                                      struct perf_sample *sample,
308                                      struct machine *machine);
309
310 int perf_event__synthesize_thread_map(struct perf_tool *tool,
311                                       struct thread_map *threads,
312                                       perf_event__handler_t process,
313                                       struct machine *machine, bool mmap_data);
314 int perf_event__synthesize_threads(struct perf_tool *tool,
315                                    perf_event__handler_t process,
316                                    struct machine *machine, bool mmap_data);
317 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
318                                        perf_event__handler_t process,
319                                        struct machine *machine);
320
321 int perf_event__synthesize_modules(struct perf_tool *tool,
322                                    perf_event__handler_t process,
323                                    struct machine *machine);
324
325 int perf_event__process_comm(struct perf_tool *tool,
326                              union perf_event *event,
327                              struct perf_sample *sample,
328                              struct machine *machine);
329 int perf_event__process_lost(struct perf_tool *tool,
330                              union perf_event *event,
331                              struct perf_sample *sample,
332                              struct machine *machine);
333 int perf_event__process_mmap(struct perf_tool *tool,
334                              union perf_event *event,
335                              struct perf_sample *sample,
336                              struct machine *machine);
337 int perf_event__process_mmap2(struct perf_tool *tool,
338                              union perf_event *event,
339                              struct perf_sample *sample,
340                              struct machine *machine);
341 int perf_event__process_fork(struct perf_tool *tool,
342                              union perf_event *event,
343                              struct perf_sample *sample,
344                              struct machine *machine);
345 int perf_event__process_exit(struct perf_tool *tool,
346                              union perf_event *event,
347                              struct perf_sample *sample,
348                              struct machine *machine);
349 int perf_event__process(struct perf_tool *tool,
350                         union perf_event *event,
351                         struct perf_sample *sample,
352                         struct machine *machine);
353
354 struct addr_location;
355
356 int perf_event__preprocess_sample(const union perf_event *event,
357                                   struct machine *machine,
358                                   struct addr_location *al,
359                                   struct perf_sample *sample);
360
361 struct thread;
362
363 bool is_bts_event(struct perf_event_attr *attr);
364 bool sample_addr_correlates_sym(struct perf_event_attr *attr);
365 void perf_event__preprocess_sample_addr(union perf_event *event,
366                                         struct perf_sample *sample,
367                                         struct thread *thread,
368                                         struct addr_location *al);
369
370 const char *perf_event__name(unsigned int id);
371
372 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
373                                      u64 read_format);
374 int perf_event__synthesize_sample(union perf_event *event, u64 type,
375                                   u64 read_format,
376                                   const struct perf_sample *sample,
377                                   bool swapped);
378
379 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
380                                        union perf_event *event,
381                                        pid_t pid, pid_t tgid,
382                                        perf_event__handler_t process,
383                                        struct machine *machine,
384                                        bool mmap_data);
385
386 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
387 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
388 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
389 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
390 size_t perf_event__fprintf(union perf_event *event, FILE *fp);
391
392 u64 kallsyms__get_function_start(const char *kallsyms_filename,
393                                  const char *symbol_name);
394
395 #endif /* __PERF_RECORD_H */