Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / tools / perf / tests / code-reading.c
1 #include <linux/types.h>
2 #include <stdlib.h>
3 #include <unistd.h>
4 #include <stdio.h>
5 #include <ctype.h>
6 #include <string.h>
7
8 #include "parse-events.h"
9 #include "evlist.h"
10 #include "evsel.h"
11 #include "thread_map.h"
12 #include "cpumap.h"
13 #include "machine.h"
14 #include "event.h"
15 #include "thread.h"
16
17 #include "tests.h"
18
19 #define BUFSZ   1024
20 #define READLEN 128
21
22 struct state {
23         u64 done[1024];
24         size_t done_cnt;
25 };
26
27 static unsigned int hex(char c)
28 {
29         if (c >= '0' && c <= '9')
30                 return c - '0';
31         if (c >= 'a' && c <= 'f')
32                 return c - 'a' + 10;
33         return c - 'A' + 10;
34 }
35
36 static void read_objdump_line(const char *line, size_t line_len, void **buf,
37                               size_t *len)
38 {
39         const char *p;
40         size_t i;
41
42         /* Skip to a colon */
43         p = strchr(line, ':');
44         if (!p)
45                 return;
46         i = p + 1 - line;
47
48         /* Read bytes */
49         while (*len) {
50                 char c1, c2;
51
52                 /* Skip spaces */
53                 for (; i < line_len; i++) {
54                         if (!isspace(line[i]))
55                                 break;
56                 }
57                 /* Get 2 hex digits */
58                 if (i >= line_len || !isxdigit(line[i]))
59                         break;
60                 c1 = line[i++];
61                 if (i >= line_len || !isxdigit(line[i]))
62                         break;
63                 c2 = line[i++];
64                 /* Followed by a space */
65                 if (i < line_len && line[i] && !isspace(line[i]))
66                         break;
67                 /* Store byte */
68                 *(unsigned char *)*buf = (hex(c1) << 4) | hex(c2);
69                 *buf += 1;
70                 *len -= 1;
71         }
72 }
73
74 static int read_objdump_output(FILE *f, void **buf, size_t *len)
75 {
76         char *line = NULL;
77         size_t line_len;
78         ssize_t ret;
79         int err = 0;
80
81         while (1) {
82                 ret = getline(&line, &line_len, f);
83                 if (feof(f))
84                         break;
85                 if (ret < 0) {
86                         pr_debug("getline failed\n");
87                         err = -1;
88                         break;
89                 }
90                 read_objdump_line(line, ret, buf, len);
91         }
92
93         free(line);
94
95         return err;
96 }
97
98 static int read_via_objdump(const char *filename, u64 addr, void *buf,
99                             size_t len)
100 {
101         char cmd[PATH_MAX * 2];
102         const char *fmt;
103         FILE *f;
104         int ret;
105
106         fmt = "%s -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
107         ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
108                        filename);
109         if (ret <= 0 || (size_t)ret >= sizeof(cmd))
110                 return -1;
111
112         pr_debug("Objdump command is: %s\n", cmd);
113
114         /* Ignore objdump errors */
115         strcat(cmd, " 2>/dev/null");
116
117         f = popen(cmd, "r");
118         if (!f) {
119                 pr_debug("popen failed\n");
120                 return -1;
121         }
122
123         ret = read_objdump_output(f, &buf, &len);
124         if (len) {
125                 pr_debug("objdump read too few bytes\n");
126                 if (!ret)
127                         ret = len;
128         }
129
130         pclose(f);
131
132         return ret;
133 }
134
135 static int read_object_code(u64 addr, size_t len, u8 cpumode,
136                             struct thread *thread, struct state *state)
137 {
138         struct addr_location al;
139         unsigned char buf1[BUFSZ];
140         unsigned char buf2[BUFSZ];
141         size_t ret_len;
142         u64 objdump_addr;
143         int ret;
144
145         pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
146
147         thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al);
148         if (!al.map || !al.map->dso) {
149                 pr_debug("thread__find_addr_map failed\n");
150                 return -1;
151         }
152
153         pr_debug("File is: %s\n", al.map->dso->long_name);
154
155         if (al.map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
156             !dso__is_kcore(al.map->dso)) {
157                 pr_debug("Unexpected kernel address - skipping\n");
158                 return 0;
159         }
160
161         pr_debug("On file address is: %#"PRIx64"\n", al.addr);
162
163         if (len > BUFSZ)
164                 len = BUFSZ;
165
166         /* Do not go off the map */
167         if (addr + len > al.map->end)
168                 len = al.map->end - addr;
169
170         /* Read the object code using perf */
171         ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine,
172                                         al.addr, buf1, len);
173         if (ret_len != len) {
174                 pr_debug("dso__data_read_offset failed\n");
175                 return -1;
176         }
177
178         /*
179          * Converting addresses for use by objdump requires more information.
180          * map__load() does that.  See map__rip_2objdump() for details.
181          */
182         if (map__load(al.map, NULL))
183                 return -1;
184
185         /* objdump struggles with kcore - try each map only once */
186         if (dso__is_kcore(al.map->dso)) {
187                 size_t d;
188
189                 for (d = 0; d < state->done_cnt; d++) {
190                         if (state->done[d] == al.map->start) {
191                                 pr_debug("kcore map tested already");
192                                 pr_debug(" - skipping\n");
193                                 return 0;
194                         }
195                 }
196                 if (state->done_cnt >= ARRAY_SIZE(state->done)) {
197                         pr_debug("Too many kcore maps - skipping\n");
198                         return 0;
199                 }
200                 state->done[state->done_cnt++] = al.map->start;
201         }
202
203         /* Read the object code using objdump */
204         objdump_addr = map__rip_2objdump(al.map, al.addr);
205         ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
206         if (ret > 0) {
207                 /*
208                  * The kernel maps are inaccurate - assume objdump is right in
209                  * that case.
210                  */
211                 if (cpumode == PERF_RECORD_MISC_KERNEL ||
212                     cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
213                         len -= ret;
214                         if (len) {
215                                 pr_debug("Reducing len to %zu\n", len);
216                         } else if (dso__is_kcore(al.map->dso)) {
217                                 /*
218                                  * objdump cannot handle very large segments
219                                  * that may be found in kcore.
220                                  */
221                                 pr_debug("objdump failed for kcore");
222                                 pr_debug(" - skipping\n");
223                                 return 0;
224                         } else {
225                                 return -1;
226                         }
227                 }
228         }
229         if (ret < 0) {
230                 pr_debug("read_via_objdump failed\n");
231                 return -1;
232         }
233
234         /* The results should be identical */
235         if (memcmp(buf1, buf2, len)) {
236                 pr_debug("Bytes read differ from those read by objdump\n");
237                 return -1;
238         }
239         pr_debug("Bytes read match those read by objdump\n");
240
241         return 0;
242 }
243
244 static int process_sample_event(struct machine *machine,
245                                 struct perf_evlist *evlist,
246                                 union perf_event *event, struct state *state)
247 {
248         struct perf_sample sample;
249         struct thread *thread;
250         u8 cpumode;
251
252         if (perf_evlist__parse_sample(evlist, event, &sample)) {
253                 pr_debug("perf_evlist__parse_sample failed\n");
254                 return -1;
255         }
256
257         thread = machine__findnew_thread(machine, sample.pid, sample.tid);
258         if (!thread) {
259                 pr_debug("machine__findnew_thread failed\n");
260                 return -1;
261         }
262
263         cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
264
265         return read_object_code(sample.ip, READLEN, cpumode, thread, state);
266 }
267
268 static int process_event(struct machine *machine, struct perf_evlist *evlist,
269                          union perf_event *event, struct state *state)
270 {
271         if (event->header.type == PERF_RECORD_SAMPLE)
272                 return process_sample_event(machine, evlist, event, state);
273
274         if (event->header.type == PERF_RECORD_THROTTLE ||
275             event->header.type == PERF_RECORD_UNTHROTTLE)
276                 return 0;
277
278         if (event->header.type < PERF_RECORD_MAX) {
279                 int ret;
280
281                 ret = machine__process_event(machine, event, NULL);
282                 if (ret < 0)
283                         pr_debug("machine__process_event failed, event type %u\n",
284                                  event->header.type);
285                 return ret;
286         }
287
288         return 0;
289 }
290
291 static int process_events(struct machine *machine, struct perf_evlist *evlist,
292                           struct state *state)
293 {
294         union perf_event *event;
295         int i, ret;
296
297         for (i = 0; i < evlist->nr_mmaps; i++) {
298                 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
299                         ret = process_event(machine, evlist, event, state);
300                         perf_evlist__mmap_consume(evlist, i);
301                         if (ret < 0)
302                                 return ret;
303                 }
304         }
305         return 0;
306 }
307
308 static int comp(const void *a, const void *b)
309 {
310         return *(int *)a - *(int *)b;
311 }
312
313 static void do_sort_something(void)
314 {
315         int buf[40960], i;
316
317         for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
318                 buf[i] = ARRAY_SIZE(buf) - i - 1;
319
320         qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
321
322         for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
323                 if (buf[i] != i) {
324                         pr_debug("qsort failed\n");
325                         break;
326                 }
327         }
328 }
329
330 static void sort_something(void)
331 {
332         int i;
333
334         for (i = 0; i < 10; i++)
335                 do_sort_something();
336 }
337
338 static void syscall_something(void)
339 {
340         int pipefd[2];
341         int i;
342
343         for (i = 0; i < 1000; i++) {
344                 if (pipe(pipefd) < 0) {
345                         pr_debug("pipe failed\n");
346                         break;
347                 }
348                 close(pipefd[1]);
349                 close(pipefd[0]);
350         }
351 }
352
353 static void fs_something(void)
354 {
355         const char *test_file_name = "temp-perf-code-reading-test-file--";
356         FILE *f;
357         int i;
358
359         for (i = 0; i < 1000; i++) {
360                 f = fopen(test_file_name, "w+");
361                 if (f) {
362                         fclose(f);
363                         unlink(test_file_name);
364                 }
365         }
366 }
367
368 static void do_something(void)
369 {
370         fs_something();
371
372         sort_something();
373
374         syscall_something();
375 }
376
377 enum {
378         TEST_CODE_READING_OK,
379         TEST_CODE_READING_NO_VMLINUX,
380         TEST_CODE_READING_NO_KCORE,
381         TEST_CODE_READING_NO_ACCESS,
382         TEST_CODE_READING_NO_KERNEL_OBJ,
383 };
384
385 static int do_test_code_reading(bool try_kcore)
386 {
387         struct machines machines;
388         struct machine *machine;
389         struct thread *thread;
390         struct record_opts opts = {
391                 .mmap_pages          = UINT_MAX,
392                 .user_freq           = UINT_MAX,
393                 .user_interval       = ULLONG_MAX,
394                 .freq                = 4000,
395                 .target              = {
396                         .uses_mmap   = true,
397                 },
398         };
399         struct state state = {
400                 .done_cnt = 0,
401         };
402         struct thread_map *threads = NULL;
403         struct cpu_map *cpus = NULL;
404         struct perf_evlist *evlist = NULL;
405         struct perf_evsel *evsel = NULL;
406         int err = -1, ret;
407         pid_t pid;
408         struct map *map;
409         bool have_vmlinux, have_kcore, excl_kernel = false;
410
411         pid = getpid();
412
413         machines__init(&machines);
414         machine = &machines.host;
415
416         ret = machine__create_kernel_maps(machine);
417         if (ret < 0) {
418                 pr_debug("machine__create_kernel_maps failed\n");
419                 goto out_err;
420         }
421
422         /* Force the use of kallsyms instead of vmlinux to try kcore */
423         if (try_kcore)
424                 symbol_conf.kallsyms_name = "/proc/kallsyms";
425
426         /* Load kernel map */
427         map = machine->vmlinux_maps[MAP__FUNCTION];
428         ret = map__load(map, NULL);
429         if (ret < 0) {
430                 pr_debug("map__load failed\n");
431                 goto out_err;
432         }
433         have_vmlinux = dso__is_vmlinux(map->dso);
434         have_kcore = dso__is_kcore(map->dso);
435
436         /* 2nd time through we just try kcore */
437         if (try_kcore && !have_kcore)
438                 return TEST_CODE_READING_NO_KCORE;
439
440         /* No point getting kernel events if there is no kernel object */
441         if (!have_vmlinux && !have_kcore)
442                 excl_kernel = true;
443
444         threads = thread_map__new_by_tid(pid);
445         if (!threads) {
446                 pr_debug("thread_map__new_by_tid failed\n");
447                 goto out_err;
448         }
449
450         ret = perf_event__synthesize_thread_map(NULL, threads,
451                                                 perf_event__process, machine, false);
452         if (ret < 0) {
453                 pr_debug("perf_event__synthesize_thread_map failed\n");
454                 goto out_err;
455         }
456
457         thread = machine__findnew_thread(machine, pid, pid);
458         if (!thread) {
459                 pr_debug("machine__findnew_thread failed\n");
460                 goto out_err;
461         }
462
463         cpus = cpu_map__new(NULL);
464         if (!cpus) {
465                 pr_debug("cpu_map__new failed\n");
466                 goto out_err;
467         }
468
469         while (1) {
470                 const char *str;
471
472                 evlist = perf_evlist__new();
473                 if (!evlist) {
474                         pr_debug("perf_evlist__new failed\n");
475                         goto out_err;
476                 }
477
478                 perf_evlist__set_maps(evlist, cpus, threads);
479
480                 if (excl_kernel)
481                         str = "cycles:u";
482                 else
483                         str = "cycles";
484                 pr_debug("Parsing event '%s'\n", str);
485                 ret = parse_events(evlist, str);
486                 if (ret < 0) {
487                         pr_debug("parse_events failed\n");
488                         goto out_err;
489                 }
490
491                 perf_evlist__config(evlist, &opts);
492
493                 evsel = perf_evlist__first(evlist);
494
495                 evsel->attr.comm = 1;
496                 evsel->attr.disabled = 1;
497                 evsel->attr.enable_on_exec = 0;
498
499                 ret = perf_evlist__open(evlist);
500                 if (ret < 0) {
501                         if (!excl_kernel) {
502                                 excl_kernel = true;
503                                 perf_evlist__set_maps(evlist, NULL, NULL);
504                                 perf_evlist__delete(evlist);
505                                 evlist = NULL;
506                                 continue;
507                         }
508                         pr_debug("perf_evlist__open failed\n");
509                         goto out_err;
510                 }
511                 break;
512         }
513
514         ret = perf_evlist__mmap(evlist, UINT_MAX, false);
515         if (ret < 0) {
516                 pr_debug("perf_evlist__mmap failed\n");
517                 goto out_err;
518         }
519
520         perf_evlist__enable(evlist);
521
522         do_something();
523
524         perf_evlist__disable(evlist);
525
526         ret = process_events(machine, evlist, &state);
527         if (ret < 0)
528                 goto out_err;
529
530         if (!have_vmlinux && !have_kcore && !try_kcore)
531                 err = TEST_CODE_READING_NO_KERNEL_OBJ;
532         else if (!have_vmlinux && !try_kcore)
533                 err = TEST_CODE_READING_NO_VMLINUX;
534         else if (excl_kernel)
535                 err = TEST_CODE_READING_NO_ACCESS;
536         else
537                 err = TEST_CODE_READING_OK;
538 out_err:
539         if (evlist) {
540                 perf_evlist__delete(evlist);
541         } else {
542                 cpu_map__delete(cpus);
543                 thread_map__delete(threads);
544         }
545         machines__destroy_kernel_maps(&machines);
546         machine__delete_threads(machine);
547         machines__exit(&machines);
548
549         return err;
550 }
551
552 int test__code_reading(void)
553 {
554         int ret;
555
556         ret = do_test_code_reading(false);
557         if (!ret)
558                 ret = do_test_code_reading(true);
559
560         switch (ret) {
561         case TEST_CODE_READING_OK:
562                 return 0;
563         case TEST_CODE_READING_NO_VMLINUX:
564                 fprintf(stderr, " (no vmlinux)");
565                 return 0;
566         case TEST_CODE_READING_NO_KCORE:
567                 fprintf(stderr, " (no kcore)");
568                 return 0;
569         case TEST_CODE_READING_NO_ACCESS:
570                 fprintf(stderr, " (no access)");
571                 return 0;
572         case TEST_CODE_READING_NO_KERNEL_OBJ:
573                 fprintf(stderr, " (no kernel obj)");
574                 return 0;
575         default:
576                 return -1;
577         };
578 }