Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / tools / perf / util / parse-events.c
1 #include <linux/hw_breakpoint.h>
2 #include "util.h"
3 #include "../perf.h"
4 #include "evlist.h"
5 #include "evsel.h"
6 #include "parse-options.h"
7 #include "parse-events.h"
8 #include "exec_cmd.h"
9 #include "string.h"
10 #include "symbol.h"
11 #include "cache.h"
12 #include "header.h"
13 #include "debug.h"
14 #include <api/fs/debugfs.h>
15 #include "parse-events-bison.h"
16 #define YY_EXTRA_TYPE int
17 #include "parse-events-flex.h"
18 #include "pmu.h"
19 #include "thread_map.h"
20
21 #define MAX_NAME_LEN 100
22
23 #ifdef PARSER_DEBUG
24 extern int parse_events_debug;
25 #endif
26 int parse_events_parse(void *data, void *scanner);
27
28 static struct perf_pmu_event_symbol *perf_pmu_events_list;
29 /*
30  * The variable indicates the number of supported pmu event symbols.
31  * 0 means not initialized and ready to init
32  * -1 means failed to init, don't try anymore
33  * >0 is the number of supported pmu event symbols
34  */
35 static int perf_pmu_events_list_num;
36
37 struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
38         [PERF_COUNT_HW_CPU_CYCLES] = {
39                 .symbol = "cpu-cycles",
40                 .alias  = "cycles",
41         },
42         [PERF_COUNT_HW_INSTRUCTIONS] = {
43                 .symbol = "instructions",
44                 .alias  = "",
45         },
46         [PERF_COUNT_HW_CACHE_REFERENCES] = {
47                 .symbol = "cache-references",
48                 .alias  = "",
49         },
50         [PERF_COUNT_HW_CACHE_MISSES] = {
51                 .symbol = "cache-misses",
52                 .alias  = "",
53         },
54         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
55                 .symbol = "branch-instructions",
56                 .alias  = "branches",
57         },
58         [PERF_COUNT_HW_BRANCH_MISSES] = {
59                 .symbol = "branch-misses",
60                 .alias  = "",
61         },
62         [PERF_COUNT_HW_BUS_CYCLES] = {
63                 .symbol = "bus-cycles",
64                 .alias  = "",
65         },
66         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
67                 .symbol = "stalled-cycles-frontend",
68                 .alias  = "idle-cycles-frontend",
69         },
70         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
71                 .symbol = "stalled-cycles-backend",
72                 .alias  = "idle-cycles-backend",
73         },
74         [PERF_COUNT_HW_REF_CPU_CYCLES] = {
75                 .symbol = "ref-cycles",
76                 .alias  = "",
77         },
78 };
79
80 struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
81         [PERF_COUNT_SW_CPU_CLOCK] = {
82                 .symbol = "cpu-clock",
83                 .alias  = "",
84         },
85         [PERF_COUNT_SW_TASK_CLOCK] = {
86                 .symbol = "task-clock",
87                 .alias  = "",
88         },
89         [PERF_COUNT_SW_PAGE_FAULTS] = {
90                 .symbol = "page-faults",
91                 .alias  = "faults",
92         },
93         [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
94                 .symbol = "context-switches",
95                 .alias  = "cs",
96         },
97         [PERF_COUNT_SW_CPU_MIGRATIONS] = {
98                 .symbol = "cpu-migrations",
99                 .alias  = "migrations",
100         },
101         [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
102                 .symbol = "minor-faults",
103                 .alias  = "",
104         },
105         [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
106                 .symbol = "major-faults",
107                 .alias  = "",
108         },
109         [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
110                 .symbol = "alignment-faults",
111                 .alias  = "",
112         },
113         [PERF_COUNT_SW_EMULATION_FAULTS] = {
114                 .symbol = "emulation-faults",
115                 .alias  = "",
116         },
117         [PERF_COUNT_SW_DUMMY] = {
118                 .symbol = "dummy",
119                 .alias  = "",
120         },
121 };
122
123 #define __PERF_EVENT_FIELD(config, name) \
124         ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
125
126 #define PERF_EVENT_RAW(config)          __PERF_EVENT_FIELD(config, RAW)
127 #define PERF_EVENT_CONFIG(config)       __PERF_EVENT_FIELD(config, CONFIG)
128 #define PERF_EVENT_TYPE(config)         __PERF_EVENT_FIELD(config, TYPE)
129 #define PERF_EVENT_ID(config)           __PERF_EVENT_FIELD(config, EVENT)
130
131 #define for_each_subsystem(sys_dir, sys_dirent, sys_next)              \
132         while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next)        \
133         if (sys_dirent.d_type == DT_DIR &&                                     \
134            (strcmp(sys_dirent.d_name, ".")) &&                                 \
135            (strcmp(sys_dirent.d_name, "..")))
136
137 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
138 {
139         char evt_path[MAXPATHLEN];
140         int fd;
141
142         snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
143                         sys_dir->d_name, evt_dir->d_name);
144         fd = open(evt_path, O_RDONLY);
145         if (fd < 0)
146                 return -EINVAL;
147         close(fd);
148
149         return 0;
150 }
151
152 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next)              \
153         while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next)        \
154         if (evt_dirent.d_type == DT_DIR &&                                     \
155            (strcmp(evt_dirent.d_name, ".")) &&                                 \
156            (strcmp(evt_dirent.d_name, "..")) &&                                \
157            (!tp_event_has_id(&sys_dirent, &evt_dirent)))
158
159 #define MAX_EVENT_LENGTH 512
160
161
162 struct tracepoint_path *tracepoint_id_to_path(u64 config)
163 {
164         struct tracepoint_path *path = NULL;
165         DIR *sys_dir, *evt_dir;
166         struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
167         char id_buf[24];
168         int fd;
169         u64 id;
170         char evt_path[MAXPATHLEN];
171         char dir_path[MAXPATHLEN];
172
173         sys_dir = opendir(tracing_events_path);
174         if (!sys_dir)
175                 return NULL;
176
177         for_each_subsystem(sys_dir, sys_dirent, sys_next) {
178
179                 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
180                          sys_dirent.d_name);
181                 evt_dir = opendir(dir_path);
182                 if (!evt_dir)
183                         continue;
184
185                 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
186
187                         snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
188                                  evt_dirent.d_name);
189                         fd = open(evt_path, O_RDONLY);
190                         if (fd < 0)
191                                 continue;
192                         if (read(fd, id_buf, sizeof(id_buf)) < 0) {
193                                 close(fd);
194                                 continue;
195                         }
196                         close(fd);
197                         id = atoll(id_buf);
198                         if (id == config) {
199                                 closedir(evt_dir);
200                                 closedir(sys_dir);
201                                 path = zalloc(sizeof(*path));
202                                 path->system = malloc(MAX_EVENT_LENGTH);
203                                 if (!path->system) {
204                                         free(path);
205                                         return NULL;
206                                 }
207                                 path->name = malloc(MAX_EVENT_LENGTH);
208                                 if (!path->name) {
209                                         zfree(&path->system);
210                                         free(path);
211                                         return NULL;
212                                 }
213                                 strncpy(path->system, sys_dirent.d_name,
214                                         MAX_EVENT_LENGTH);
215                                 strncpy(path->name, evt_dirent.d_name,
216                                         MAX_EVENT_LENGTH);
217                                 return path;
218                         }
219                 }
220                 closedir(evt_dir);
221         }
222
223         closedir(sys_dir);
224         return NULL;
225 }
226
227 struct tracepoint_path *tracepoint_name_to_path(const char *name)
228 {
229         struct tracepoint_path *path = zalloc(sizeof(*path));
230         char *str = strchr(name, ':');
231
232         if (path == NULL || str == NULL) {
233                 free(path);
234                 return NULL;
235         }
236
237         path->system = strndup(name, str - name);
238         path->name = strdup(str+1);
239
240         if (path->system == NULL || path->name == NULL) {
241                 zfree(&path->system);
242                 zfree(&path->name);
243                 free(path);
244                 path = NULL;
245         }
246
247         return path;
248 }
249
250 const char *event_type(int type)
251 {
252         switch (type) {
253         case PERF_TYPE_HARDWARE:
254                 return "hardware";
255
256         case PERF_TYPE_SOFTWARE:
257                 return "software";
258
259         case PERF_TYPE_TRACEPOINT:
260                 return "tracepoint";
261
262         case PERF_TYPE_HW_CACHE:
263                 return "hardware-cache";
264
265         default:
266                 break;
267         }
268
269         return "unknown";
270 }
271
272
273
274 static struct perf_evsel *
275 __add_event(struct list_head *list, int *idx,
276             struct perf_event_attr *attr,
277             char *name, struct cpu_map *cpus)
278 {
279         struct perf_evsel *evsel;
280
281         event_attr_init(attr);
282
283         evsel = perf_evsel__new_idx(attr, (*idx)++);
284         if (!evsel)
285                 return NULL;
286
287         evsel->cpus = cpus;
288         if (name)
289                 evsel->name = strdup(name);
290         list_add_tail(&evsel->node, list);
291         return evsel;
292 }
293
294 static int add_event(struct list_head *list, int *idx,
295                      struct perf_event_attr *attr, char *name)
296 {
297         return __add_event(list, idx, attr, name, NULL) ? 0 : -ENOMEM;
298 }
299
300 static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
301 {
302         int i, j;
303         int n, longest = -1;
304
305         for (i = 0; i < size; i++) {
306                 for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) {
307                         n = strlen(names[i][j]);
308                         if (n > longest && !strncasecmp(str, names[i][j], n))
309                                 longest = n;
310                 }
311                 if (longest > 0)
312                         return i;
313         }
314
315         return -1;
316 }
317
318 int parse_events_add_cache(struct list_head *list, int *idx,
319                            char *type, char *op_result1, char *op_result2)
320 {
321         struct perf_event_attr attr;
322         char name[MAX_NAME_LEN];
323         int cache_type = -1, cache_op = -1, cache_result = -1;
324         char *op_result[2] = { op_result1, op_result2 };
325         int i, n;
326
327         /*
328          * No fallback - if we cannot get a clear cache type
329          * then bail out:
330          */
331         cache_type = parse_aliases(type, perf_evsel__hw_cache,
332                                    PERF_COUNT_HW_CACHE_MAX);
333         if (cache_type == -1)
334                 return -EINVAL;
335
336         n = snprintf(name, MAX_NAME_LEN, "%s", type);
337
338         for (i = 0; (i < 2) && (op_result[i]); i++) {
339                 char *str = op_result[i];
340
341                 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
342
343                 if (cache_op == -1) {
344                         cache_op = parse_aliases(str, perf_evsel__hw_cache_op,
345                                                  PERF_COUNT_HW_CACHE_OP_MAX);
346                         if (cache_op >= 0) {
347                                 if (!perf_evsel__is_cache_op_valid(cache_type, cache_op))
348                                         return -EINVAL;
349                                 continue;
350                         }
351                 }
352
353                 if (cache_result == -1) {
354                         cache_result = parse_aliases(str, perf_evsel__hw_cache_result,
355                                                      PERF_COUNT_HW_CACHE_RESULT_MAX);
356                         if (cache_result >= 0)
357                                 continue;
358                 }
359         }
360
361         /*
362          * Fall back to reads:
363          */
364         if (cache_op == -1)
365                 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
366
367         /*
368          * Fall back to accesses:
369          */
370         if (cache_result == -1)
371                 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
372
373         memset(&attr, 0, sizeof(attr));
374         attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
375         attr.type = PERF_TYPE_HW_CACHE;
376         return add_event(list, idx, &attr, name);
377 }
378
379 static int add_tracepoint(struct list_head *list, int *idx,
380                           char *sys_name, char *evt_name)
381 {
382         struct perf_evsel *evsel;
383
384         evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++);
385         if (!evsel)
386                 return -ENOMEM;
387
388         list_add_tail(&evsel->node, list);
389
390         return 0;
391 }
392
393 static int add_tracepoint_multi_event(struct list_head *list, int *idx,
394                                       char *sys_name, char *evt_name)
395 {
396         char evt_path[MAXPATHLEN];
397         struct dirent *evt_ent;
398         DIR *evt_dir;
399         int ret = 0;
400
401         snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
402         evt_dir = opendir(evt_path);
403         if (!evt_dir) {
404                 perror("Can't open event dir");
405                 return -1;
406         }
407
408         while (!ret && (evt_ent = readdir(evt_dir))) {
409                 if (!strcmp(evt_ent->d_name, ".")
410                     || !strcmp(evt_ent->d_name, "..")
411                     || !strcmp(evt_ent->d_name, "enable")
412                     || !strcmp(evt_ent->d_name, "filter"))
413                         continue;
414
415                 if (!strglobmatch(evt_ent->d_name, evt_name))
416                         continue;
417
418                 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
419         }
420
421         closedir(evt_dir);
422         return ret;
423 }
424
425 static int add_tracepoint_event(struct list_head *list, int *idx,
426                                 char *sys_name, char *evt_name)
427 {
428         return strpbrk(evt_name, "*?") ?
429                add_tracepoint_multi_event(list, idx, sys_name, evt_name) :
430                add_tracepoint(list, idx, sys_name, evt_name);
431 }
432
433 static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
434                                     char *sys_name, char *evt_name)
435 {
436         struct dirent *events_ent;
437         DIR *events_dir;
438         int ret = 0;
439
440         events_dir = opendir(tracing_events_path);
441         if (!events_dir) {
442                 perror("Can't open event dir");
443                 return -1;
444         }
445
446         while (!ret && (events_ent = readdir(events_dir))) {
447                 if (!strcmp(events_ent->d_name, ".")
448                     || !strcmp(events_ent->d_name, "..")
449                     || !strcmp(events_ent->d_name, "enable")
450                     || !strcmp(events_ent->d_name, "header_event")
451                     || !strcmp(events_ent->d_name, "header_page"))
452                         continue;
453
454                 if (!strglobmatch(events_ent->d_name, sys_name))
455                         continue;
456
457                 ret = add_tracepoint_event(list, idx, events_ent->d_name,
458                                            evt_name);
459         }
460
461         closedir(events_dir);
462         return ret;
463 }
464
465 int parse_events_add_tracepoint(struct list_head *list, int *idx,
466                                 char *sys, char *event)
467 {
468         if (strpbrk(sys, "*?"))
469                 return add_tracepoint_multi_sys(list, idx, sys, event);
470         else
471                 return add_tracepoint_event(list, idx, sys, event);
472 }
473
474 static int
475 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
476 {
477         int i;
478
479         for (i = 0; i < 3; i++) {
480                 if (!type || !type[i])
481                         break;
482
483 #define CHECK_SET_TYPE(bit)             \
484 do {                                    \
485         if (attr->bp_type & bit)        \
486                 return -EINVAL;         \
487         else                            \
488                 attr->bp_type |= bit;   \
489 } while (0)
490
491                 switch (type[i]) {
492                 case 'r':
493                         CHECK_SET_TYPE(HW_BREAKPOINT_R);
494                         break;
495                 case 'w':
496                         CHECK_SET_TYPE(HW_BREAKPOINT_W);
497                         break;
498                 case 'x':
499                         CHECK_SET_TYPE(HW_BREAKPOINT_X);
500                         break;
501                 default:
502                         return -EINVAL;
503                 }
504         }
505
506 #undef CHECK_SET_TYPE
507
508         if (!attr->bp_type) /* Default */
509                 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
510
511         return 0;
512 }
513
514 int parse_events_add_breakpoint(struct list_head *list, int *idx,
515                                 void *ptr, char *type, u64 len)
516 {
517         struct perf_event_attr attr;
518
519         memset(&attr, 0, sizeof(attr));
520         attr.bp_addr = (unsigned long) ptr;
521
522         if (parse_breakpoint_type(type, &attr))
523                 return -EINVAL;
524
525         /* Provide some defaults if len is not specified */
526         if (!len) {
527                 if (attr.bp_type == HW_BREAKPOINT_X)
528                         len = sizeof(long);
529                 else
530                         len = HW_BREAKPOINT_LEN_4;
531         }
532
533         attr.bp_len = len;
534
535         attr.type = PERF_TYPE_BREAKPOINT;
536         attr.sample_period = 1;
537
538         return add_event(list, idx, &attr, NULL);
539 }
540
541 static int config_term(struct perf_event_attr *attr,
542                        struct parse_events_term *term)
543 {
544 #define CHECK_TYPE_VAL(type)                                    \
545 do {                                                            \
546         if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \
547                 return -EINVAL;                                 \
548 } while (0)
549
550         switch (term->type_term) {
551         case PARSE_EVENTS__TERM_TYPE_CONFIG:
552                 CHECK_TYPE_VAL(NUM);
553                 attr->config = term->val.num;
554                 break;
555         case PARSE_EVENTS__TERM_TYPE_CONFIG1:
556                 CHECK_TYPE_VAL(NUM);
557                 attr->config1 = term->val.num;
558                 break;
559         case PARSE_EVENTS__TERM_TYPE_CONFIG2:
560                 CHECK_TYPE_VAL(NUM);
561                 attr->config2 = term->val.num;
562                 break;
563         case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
564                 CHECK_TYPE_VAL(NUM);
565                 attr->sample_period = term->val.num;
566                 break;
567         case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
568                 /*
569                  * TODO uncomment when the field is available
570                  * attr->branch_sample_type = term->val.num;
571                  */
572                 break;
573         case PARSE_EVENTS__TERM_TYPE_NAME:
574                 CHECK_TYPE_VAL(STR);
575                 break;
576         default:
577                 return -EINVAL;
578         }
579
580         return 0;
581 #undef CHECK_TYPE_VAL
582 }
583
584 static int config_attr(struct perf_event_attr *attr,
585                        struct list_head *head, int fail)
586 {
587         struct parse_events_term *term;
588
589         list_for_each_entry(term, head, list)
590                 if (config_term(attr, term) && fail)
591                         return -EINVAL;
592
593         return 0;
594 }
595
596 int parse_events_add_numeric(struct list_head *list, int *idx,
597                              u32 type, u64 config,
598                              struct list_head *head_config)
599 {
600         struct perf_event_attr attr;
601
602         memset(&attr, 0, sizeof(attr));
603         attr.type = type;
604         attr.config = config;
605
606         if (head_config &&
607             config_attr(&attr, head_config, 1))
608                 return -EINVAL;
609
610         return add_event(list, idx, &attr, NULL);
611 }
612
613 static int parse_events__is_name_term(struct parse_events_term *term)
614 {
615         return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
616 }
617
618 static char *pmu_event_name(struct list_head *head_terms)
619 {
620         struct parse_events_term *term;
621
622         list_for_each_entry(term, head_terms, list)
623                 if (parse_events__is_name_term(term))
624                         return term->val.str;
625
626         return NULL;
627 }
628
629 int parse_events_add_pmu(struct list_head *list, int *idx,
630                          char *name, struct list_head *head_config)
631 {
632         struct perf_event_attr attr;
633         struct perf_pmu_info info;
634         struct perf_pmu *pmu;
635         struct perf_evsel *evsel;
636
637         pmu = perf_pmu__find(name);
638         if (!pmu)
639                 return -EINVAL;
640
641         if (pmu->default_config) {
642                 memcpy(&attr, pmu->default_config,
643                        sizeof(struct perf_event_attr));
644         } else {
645                 memset(&attr, 0, sizeof(attr));
646         }
647
648         if (!head_config) {
649                 attr.type = pmu->type;
650                 evsel = __add_event(list, idx, &attr, NULL, pmu->cpus);
651                 return evsel ? 0 : -ENOMEM;
652         }
653
654         if (perf_pmu__check_alias(pmu, head_config, &info))
655                 return -EINVAL;
656
657         /*
658          * Configure hardcoded terms first, no need to check
659          * return value when called with fail == 0 ;)
660          */
661         config_attr(&attr, head_config, 0);
662
663         if (perf_pmu__config(pmu, &attr, head_config))
664                 return -EINVAL;
665
666         evsel = __add_event(list, idx, &attr, pmu_event_name(head_config),
667                             pmu->cpus);
668         if (evsel) {
669                 evsel->unit = info.unit;
670                 evsel->scale = info.scale;
671                 evsel->per_pkg = info.per_pkg;
672                 evsel->snapshot = info.snapshot;
673         }
674
675         return evsel ? 0 : -ENOMEM;
676 }
677
678 int parse_events__modifier_group(struct list_head *list,
679                                  char *event_mod)
680 {
681         return parse_events__modifier_event(list, event_mod, true);
682 }
683
684 void parse_events__set_leader(char *name, struct list_head *list)
685 {
686         struct perf_evsel *leader;
687
688         __perf_evlist__set_leader(list);
689         leader = list_entry(list->next, struct perf_evsel, node);
690         leader->group_name = name ? strdup(name) : NULL;
691 }
692
693 /* list_event is assumed to point to malloc'ed memory */
694 void parse_events_update_lists(struct list_head *list_event,
695                                struct list_head *list_all)
696 {
697         /*
698          * Called for single event definition. Update the
699          * 'all event' list, and reinit the 'single event'
700          * list, for next event definition.
701          */
702         list_splice_tail(list_event, list_all);
703         free(list_event);
704 }
705
706 struct event_modifier {
707         int eu;
708         int ek;
709         int eh;
710         int eH;
711         int eG;
712         int eI;
713         int precise;
714         int exclude_GH;
715         int sample_read;
716         int pinned;
717 };
718
719 static int get_event_modifier(struct event_modifier *mod, char *str,
720                                struct perf_evsel *evsel)
721 {
722         int eu = evsel ? evsel->attr.exclude_user : 0;
723         int ek = evsel ? evsel->attr.exclude_kernel : 0;
724         int eh = evsel ? evsel->attr.exclude_hv : 0;
725         int eH = evsel ? evsel->attr.exclude_host : 0;
726         int eG = evsel ? evsel->attr.exclude_guest : 0;
727         int eI = evsel ? evsel->attr.exclude_idle : 0;
728         int precise = evsel ? evsel->attr.precise_ip : 0;
729         int sample_read = 0;
730         int pinned = evsel ? evsel->attr.pinned : 0;
731
732         int exclude = eu | ek | eh;
733         int exclude_GH = evsel ? evsel->exclude_GH : 0;
734
735         memset(mod, 0, sizeof(*mod));
736
737         while (*str) {
738                 if (*str == 'u') {
739                         if (!exclude)
740                                 exclude = eu = ek = eh = 1;
741                         eu = 0;
742                 } else if (*str == 'k') {
743                         if (!exclude)
744                                 exclude = eu = ek = eh = 1;
745                         ek = 0;
746                 } else if (*str == 'h') {
747                         if (!exclude)
748                                 exclude = eu = ek = eh = 1;
749                         eh = 0;
750                 } else if (*str == 'G') {
751                         if (!exclude_GH)
752                                 exclude_GH = eG = eH = 1;
753                         eG = 0;
754                 } else if (*str == 'H') {
755                         if (!exclude_GH)
756                                 exclude_GH = eG = eH = 1;
757                         eH = 0;
758                 } else if (*str == 'I') {
759                         eI = 1;
760                 } else if (*str == 'p') {
761                         precise++;
762                         /* use of precise requires exclude_guest */
763                         if (!exclude_GH)
764                                 eG = 1;
765                 } else if (*str == 'S') {
766                         sample_read = 1;
767                 } else if (*str == 'D') {
768                         pinned = 1;
769                 } else
770                         break;
771
772                 ++str;
773         }
774
775         /*
776          * precise ip:
777          *
778          *  0 - SAMPLE_IP can have arbitrary skid
779          *  1 - SAMPLE_IP must have constant skid
780          *  2 - SAMPLE_IP requested to have 0 skid
781          *  3 - SAMPLE_IP must have 0 skid
782          *
783          *  See also PERF_RECORD_MISC_EXACT_IP
784          */
785         if (precise > 3)
786                 return -EINVAL;
787
788         mod->eu = eu;
789         mod->ek = ek;
790         mod->eh = eh;
791         mod->eH = eH;
792         mod->eG = eG;
793         mod->eI = eI;
794         mod->precise = precise;
795         mod->exclude_GH = exclude_GH;
796         mod->sample_read = sample_read;
797         mod->pinned = pinned;
798
799         return 0;
800 }
801
802 /*
803  * Basic modifier sanity check to validate it contains only one
804  * instance of any modifier (apart from 'p') present.
805  */
806 static int check_modifier(char *str)
807 {
808         char *p = str;
809
810         /* The sizeof includes 0 byte as well. */
811         if (strlen(str) > (sizeof("ukhGHpppSDI") - 1))
812                 return -1;
813
814         while (*p) {
815                 if (*p != 'p' && strchr(p + 1, *p))
816                         return -1;
817                 p++;
818         }
819
820         return 0;
821 }
822
823 int parse_events__modifier_event(struct list_head *list, char *str, bool add)
824 {
825         struct perf_evsel *evsel;
826         struct event_modifier mod;
827
828         if (str == NULL)
829                 return 0;
830
831         if (check_modifier(str))
832                 return -EINVAL;
833
834         if (!add && get_event_modifier(&mod, str, NULL))
835                 return -EINVAL;
836
837         __evlist__for_each(list, evsel) {
838                 if (add && get_event_modifier(&mod, str, evsel))
839                         return -EINVAL;
840
841                 evsel->attr.exclude_user   = mod.eu;
842                 evsel->attr.exclude_kernel = mod.ek;
843                 evsel->attr.exclude_hv     = mod.eh;
844                 evsel->attr.precise_ip     = mod.precise;
845                 evsel->attr.exclude_host   = mod.eH;
846                 evsel->attr.exclude_guest  = mod.eG;
847                 evsel->attr.exclude_idle   = mod.eI;
848                 evsel->exclude_GH          = mod.exclude_GH;
849                 evsel->sample_read         = mod.sample_read;
850
851                 if (perf_evsel__is_group_leader(evsel))
852                         evsel->attr.pinned = mod.pinned;
853         }
854
855         return 0;
856 }
857
858 int parse_events_name(struct list_head *list, char *name)
859 {
860         struct perf_evsel *evsel;
861
862         __evlist__for_each(list, evsel) {
863                 if (!evsel->name)
864                         evsel->name = strdup(name);
865         }
866
867         return 0;
868 }
869
870 static int
871 comp_pmu(const void *p1, const void *p2)
872 {
873         struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1;
874         struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2;
875
876         return strcmp(pmu1->symbol, pmu2->symbol);
877 }
878
879 static void perf_pmu__parse_cleanup(void)
880 {
881         if (perf_pmu_events_list_num > 0) {
882                 struct perf_pmu_event_symbol *p;
883                 int i;
884
885                 for (i = 0; i < perf_pmu_events_list_num; i++) {
886                         p = perf_pmu_events_list + i;
887                         free(p->symbol);
888                 }
889                 free(perf_pmu_events_list);
890                 perf_pmu_events_list = NULL;
891                 perf_pmu_events_list_num = 0;
892         }
893 }
894
895 #define SET_SYMBOL(str, stype)          \
896 do {                                    \
897         p->symbol = str;                \
898         if (!p->symbol)                 \
899                 goto err;               \
900         p->type = stype;                \
901 } while (0)
902
903 /*
904  * Read the pmu events list from sysfs
905  * Save it into perf_pmu_events_list
906  */
907 static void perf_pmu__parse_init(void)
908 {
909
910         struct perf_pmu *pmu = NULL;
911         struct perf_pmu_alias *alias;
912         int len = 0;
913
914         pmu = perf_pmu__find("cpu");
915         if ((pmu == NULL) || list_empty(&pmu->aliases)) {
916                 perf_pmu_events_list_num = -1;
917                 return;
918         }
919         list_for_each_entry(alias, &pmu->aliases, list) {
920                 if (strchr(alias->name, '-'))
921                         len++;
922                 len++;
923         }
924         perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len);
925         if (!perf_pmu_events_list)
926                 return;
927         perf_pmu_events_list_num = len;
928
929         len = 0;
930         list_for_each_entry(alias, &pmu->aliases, list) {
931                 struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
932                 char *tmp = strchr(alias->name, '-');
933
934                 if (tmp != NULL) {
935                         SET_SYMBOL(strndup(alias->name, tmp - alias->name),
936                                         PMU_EVENT_SYMBOL_PREFIX);
937                         p++;
938                         SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX);
939                         len += 2;
940                 } else {
941                         SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL);
942                         len++;
943                 }
944         }
945         qsort(perf_pmu_events_list, len,
946                 sizeof(struct perf_pmu_event_symbol), comp_pmu);
947
948         return;
949 err:
950         perf_pmu__parse_cleanup();
951 }
952
953 enum perf_pmu_event_symbol_type
954 perf_pmu__parse_check(const char *name)
955 {
956         struct perf_pmu_event_symbol p, *r;
957
958         /* scan kernel pmu events from sysfs if needed */
959         if (perf_pmu_events_list_num == 0)
960                 perf_pmu__parse_init();
961         /*
962          * name "cpu" could be prefix of cpu-cycles or cpu// events.
963          * cpu-cycles has been handled by hardcode.
964          * So it must be cpu// events, not kernel pmu event.
965          */
966         if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu"))
967                 return PMU_EVENT_SYMBOL_ERR;
968
969         p.symbol = strdup(name);
970         r = bsearch(&p, perf_pmu_events_list,
971                         (size_t) perf_pmu_events_list_num,
972                         sizeof(struct perf_pmu_event_symbol), comp_pmu);
973         free(p.symbol);
974         return r ? r->type : PMU_EVENT_SYMBOL_ERR;
975 }
976
977 static int parse_events__scanner(const char *str, void *data, int start_token)
978 {
979         YY_BUFFER_STATE buffer;
980         void *scanner;
981         int ret;
982
983         ret = parse_events_lex_init_extra(start_token, &scanner);
984         if (ret)
985                 return ret;
986
987         buffer = parse_events__scan_string(str, scanner);
988
989 #ifdef PARSER_DEBUG
990         parse_events_debug = 1;
991 #endif
992         ret = parse_events_parse(data, scanner);
993
994         parse_events__flush_buffer(buffer, scanner);
995         parse_events__delete_buffer(buffer, scanner);
996         parse_events_lex_destroy(scanner);
997         return ret;
998 }
999
1000 /*
1001  * parse event config string, return a list of event terms.
1002  */
1003 int parse_events_terms(struct list_head *terms, const char *str)
1004 {
1005         struct parse_events_terms data = {
1006                 .terms = NULL,
1007         };
1008         int ret;
1009
1010         ret = parse_events__scanner(str, &data, PE_START_TERMS);
1011         if (!ret) {
1012                 list_splice(data.terms, terms);
1013                 zfree(&data.terms);
1014                 return 0;
1015         }
1016
1017         if (data.terms)
1018                 parse_events__free_terms(data.terms);
1019         return ret;
1020 }
1021
1022 int parse_events(struct perf_evlist *evlist, const char *str)
1023 {
1024         struct parse_events_evlist data = {
1025                 .list = LIST_HEAD_INIT(data.list),
1026                 .idx  = evlist->nr_entries,
1027         };
1028         int ret;
1029
1030         ret = parse_events__scanner(str, &data, PE_START_EVENTS);
1031         perf_pmu__parse_cleanup();
1032         if (!ret) {
1033                 int entries = data.idx - evlist->nr_entries;
1034                 perf_evlist__splice_list_tail(evlist, &data.list, entries);
1035                 evlist->nr_groups += data.nr_groups;
1036                 return 0;
1037         }
1038
1039         /*
1040          * There are 2 users - builtin-record and builtin-test objects.
1041          * Both call perf_evlist__delete in case of error, so we dont
1042          * need to bother.
1043          */
1044         return ret;
1045 }
1046
1047 int parse_events_option(const struct option *opt, const char *str,
1048                         int unset __maybe_unused)
1049 {
1050         struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
1051         int ret = parse_events(evlist, str);
1052
1053         if (ret) {
1054                 fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
1055                 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
1056         }
1057         return ret;
1058 }
1059
1060 int parse_filter(const struct option *opt, const char *str,
1061                  int unset __maybe_unused)
1062 {
1063         struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
1064         struct perf_evsel *last = NULL;
1065
1066         if (evlist->nr_entries > 0)
1067                 last = perf_evlist__last(evlist);
1068
1069         if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
1070                 fprintf(stderr,
1071                         "--filter option should follow a -e tracepoint option\n");
1072                 return -1;
1073         }
1074
1075         last->filter = strdup(str);
1076         if (last->filter == NULL) {
1077                 fprintf(stderr, "not enough memory to hold filter string\n");
1078                 return -1;
1079         }
1080
1081         return 0;
1082 }
1083
1084 static const char * const event_type_descriptors[] = {
1085         "Hardware event",
1086         "Software event",
1087         "Tracepoint event",
1088         "Hardware cache event",
1089         "Raw hardware event descriptor",
1090         "Hardware breakpoint",
1091 };
1092
1093 static int cmp_string(const void *a, const void *b)
1094 {
1095         const char * const *as = a;
1096         const char * const *bs = b;
1097
1098         return strcmp(*as, *bs);
1099 }
1100
1101 /*
1102  * Print the events from <debugfs_mount_point>/tracing/events
1103  */
1104
1105 void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
1106                              bool name_only)
1107 {
1108         DIR *sys_dir, *evt_dir;
1109         struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
1110         char evt_path[MAXPATHLEN];
1111         char dir_path[MAXPATHLEN];
1112         char **evt_list = NULL;
1113         unsigned int evt_i = 0, evt_num = 0;
1114         bool evt_num_known = false;
1115
1116 restart:
1117         sys_dir = opendir(tracing_events_path);
1118         if (!sys_dir)
1119                 return;
1120
1121         if (evt_num_known) {
1122                 evt_list = zalloc(sizeof(char *) * evt_num);
1123                 if (!evt_list)
1124                         goto out_close_sys_dir;
1125         }
1126
1127         for_each_subsystem(sys_dir, sys_dirent, sys_next) {
1128                 if (subsys_glob != NULL &&
1129                     !strglobmatch(sys_dirent.d_name, subsys_glob))
1130                         continue;
1131
1132                 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
1133                          sys_dirent.d_name);
1134                 evt_dir = opendir(dir_path);
1135                 if (!evt_dir)
1136                         continue;
1137
1138                 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
1139                         if (event_glob != NULL &&
1140                             !strglobmatch(evt_dirent.d_name, event_glob))
1141                                 continue;
1142
1143                         if (!evt_num_known) {
1144                                 evt_num++;
1145                                 continue;
1146                         }
1147
1148                         snprintf(evt_path, MAXPATHLEN, "%s:%s",
1149                                  sys_dirent.d_name, evt_dirent.d_name);
1150
1151                         evt_list[evt_i] = strdup(evt_path);
1152                         if (evt_list[evt_i] == NULL)
1153                                 goto out_close_evt_dir;
1154                         evt_i++;
1155                 }
1156                 closedir(evt_dir);
1157         }
1158         closedir(sys_dir);
1159
1160         if (!evt_num_known) {
1161                 evt_num_known = true;
1162                 goto restart;
1163         }
1164         qsort(evt_list, evt_num, sizeof(char *), cmp_string);
1165         evt_i = 0;
1166         while (evt_i < evt_num) {
1167                 if (name_only) {
1168                         printf("%s ", evt_list[evt_i++]);
1169                         continue;
1170                 }
1171                 printf("  %-50s [%s]\n", evt_list[evt_i++],
1172                                 event_type_descriptors[PERF_TYPE_TRACEPOINT]);
1173         }
1174         if (evt_num)
1175                 printf("\n");
1176
1177 out_free:
1178         evt_num = evt_i;
1179         for (evt_i = 0; evt_i < evt_num; evt_i++)
1180                 zfree(&evt_list[evt_i]);
1181         zfree(&evt_list);
1182         return;
1183
1184 out_close_evt_dir:
1185         closedir(evt_dir);
1186 out_close_sys_dir:
1187         closedir(sys_dir);
1188
1189         printf("FATAL: not enough memory to print %s\n",
1190                         event_type_descriptors[PERF_TYPE_TRACEPOINT]);
1191         if (evt_list)
1192                 goto out_free;
1193 }
1194
1195 /*
1196  * Check whether event is in <debugfs_mount_point>/tracing/events
1197  */
1198
1199 int is_valid_tracepoint(const char *event_string)
1200 {
1201         DIR *sys_dir, *evt_dir;
1202         struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
1203         char evt_path[MAXPATHLEN];
1204         char dir_path[MAXPATHLEN];
1205
1206         sys_dir = opendir(tracing_events_path);
1207         if (!sys_dir)
1208                 return 0;
1209
1210         for_each_subsystem(sys_dir, sys_dirent, sys_next) {
1211
1212                 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
1213                          sys_dirent.d_name);
1214                 evt_dir = opendir(dir_path);
1215                 if (!evt_dir)
1216                         continue;
1217
1218                 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
1219                         snprintf(evt_path, MAXPATHLEN, "%s:%s",
1220                                  sys_dirent.d_name, evt_dirent.d_name);
1221                         if (!strcmp(evt_path, event_string)) {
1222                                 closedir(evt_dir);
1223                                 closedir(sys_dir);
1224                                 return 1;
1225                         }
1226                 }
1227                 closedir(evt_dir);
1228         }
1229         closedir(sys_dir);
1230         return 0;
1231 }
1232
1233 static bool is_event_supported(u8 type, unsigned config)
1234 {
1235         bool ret = true;
1236         int open_return;
1237         struct perf_evsel *evsel;
1238         struct perf_event_attr attr = {
1239                 .type = type,
1240                 .config = config,
1241                 .disabled = 1,
1242         };
1243         struct {
1244                 struct thread_map map;
1245                 int threads[1];
1246         } tmap = {
1247                 .map.nr  = 1,
1248                 .threads = { 0 },
1249         };
1250
1251         evsel = perf_evsel__new(&attr);
1252         if (evsel) {
1253                 open_return = perf_evsel__open(evsel, NULL, &tmap.map);
1254                 ret = open_return >= 0;
1255
1256                 if (open_return == -EACCES) {
1257                         /*
1258                          * This happens if the paranoid value
1259                          * /proc/sys/kernel/perf_event_paranoid is set to 2
1260                          * Re-run with exclude_kernel set; we don't do that
1261                          * by default as some ARM machines do not support it.
1262                          *
1263                          */
1264                         evsel->attr.exclude_kernel = 1;
1265                         ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
1266                 }
1267                 perf_evsel__delete(evsel);
1268         }
1269
1270         return ret;
1271 }
1272
1273 int print_hwcache_events(const char *event_glob, bool name_only)
1274 {
1275         unsigned int type, op, i, evt_i = 0, evt_num = 0;
1276         char name[64];
1277         char **evt_list = NULL;
1278         bool evt_num_known = false;
1279
1280 restart:
1281         if (evt_num_known) {
1282                 evt_list = zalloc(sizeof(char *) * evt_num);
1283                 if (!evt_list)
1284                         goto out_enomem;
1285         }
1286
1287         for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
1288                 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
1289                         /* skip invalid cache type */
1290                         if (!perf_evsel__is_cache_op_valid(type, op))
1291                                 continue;
1292
1293                         for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
1294                                 __perf_evsel__hw_cache_type_op_res_name(type, op, i,
1295                                                                         name, sizeof(name));
1296                                 if (event_glob != NULL && !strglobmatch(name, event_glob))
1297                                         continue;
1298
1299                                 if (!is_event_supported(PERF_TYPE_HW_CACHE,
1300                                                         type | (op << 8) | (i << 16)))
1301                                         continue;
1302
1303                                 if (!evt_num_known) {
1304                                         evt_num++;
1305                                         continue;
1306                                 }
1307
1308                                 evt_list[evt_i] = strdup(name);
1309                                 if (evt_list[evt_i] == NULL)
1310                                         goto out_enomem;
1311                                 evt_i++;
1312                         }
1313                 }
1314         }
1315
1316         if (!evt_num_known) {
1317                 evt_num_known = true;
1318                 goto restart;
1319         }
1320         qsort(evt_list, evt_num, sizeof(char *), cmp_string);
1321         evt_i = 0;
1322         while (evt_i < evt_num) {
1323                 if (name_only) {
1324                         printf("%s ", evt_list[evt_i++]);
1325                         continue;
1326                 }
1327                 printf("  %-50s [%s]\n", evt_list[evt_i++],
1328                                 event_type_descriptors[PERF_TYPE_HW_CACHE]);
1329         }
1330         if (evt_num)
1331                 printf("\n");
1332
1333 out_free:
1334         evt_num = evt_i;
1335         for (evt_i = 0; evt_i < evt_num; evt_i++)
1336                 zfree(&evt_list[evt_i]);
1337         zfree(&evt_list);
1338         return evt_num;
1339
1340 out_enomem:
1341         printf("FATAL: not enough memory to print %s\n", event_type_descriptors[PERF_TYPE_HW_CACHE]);
1342         if (evt_list)
1343                 goto out_free;
1344         return evt_num;
1345 }
1346
1347 void print_symbol_events(const char *event_glob, unsigned type,
1348                                 struct event_symbol *syms, unsigned max,
1349                                 bool name_only)
1350 {
1351         unsigned int i, evt_i = 0, evt_num = 0;
1352         char name[MAX_NAME_LEN];
1353         char **evt_list = NULL;
1354         bool evt_num_known = false;
1355
1356 restart:
1357         if (evt_num_known) {
1358                 evt_list = zalloc(sizeof(char *) * evt_num);
1359                 if (!evt_list)
1360                         goto out_enomem;
1361                 syms -= max;
1362         }
1363
1364         for (i = 0; i < max; i++, syms++) {
1365
1366                 if (event_glob != NULL &&
1367                     !(strglobmatch(syms->symbol, event_glob) ||
1368                       (syms->alias && strglobmatch(syms->alias, event_glob))))
1369                         continue;
1370
1371                 if (!is_event_supported(type, i))
1372                         continue;
1373
1374                 if (!evt_num_known) {
1375                         evt_num++;
1376                         continue;
1377                 }
1378
1379                 if (!name_only && strlen(syms->alias))
1380                         snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
1381                 else
1382                         strncpy(name, syms->symbol, MAX_NAME_LEN);
1383
1384                 evt_list[evt_i] = strdup(name);
1385                 if (evt_list[evt_i] == NULL)
1386                         goto out_enomem;
1387                 evt_i++;
1388         }
1389
1390         if (!evt_num_known) {
1391                 evt_num_known = true;
1392                 goto restart;
1393         }
1394         qsort(evt_list, evt_num, sizeof(char *), cmp_string);
1395         evt_i = 0;
1396         while (evt_i < evt_num) {
1397                 if (name_only) {
1398                         printf("%s ", evt_list[evt_i++]);
1399                         continue;
1400                 }
1401                 printf("  %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]);
1402         }
1403         if (evt_num)
1404                 printf("\n");
1405
1406 out_free:
1407         evt_num = evt_i;
1408         for (evt_i = 0; evt_i < evt_num; evt_i++)
1409                 zfree(&evt_list[evt_i]);
1410         zfree(&evt_list);
1411         return;
1412
1413 out_enomem:
1414         printf("FATAL: not enough memory to print %s\n", event_type_descriptors[type]);
1415         if (evt_list)
1416                 goto out_free;
1417 }
1418
1419 /*
1420  * Print the help text for the event symbols:
1421  */
1422 void print_events(const char *event_glob, bool name_only)
1423 {
1424         print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
1425                             event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
1426
1427         print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
1428                             event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
1429
1430         print_hwcache_events(event_glob, name_only);
1431
1432         print_pmu_events(event_glob, name_only);
1433
1434         if (event_glob != NULL)
1435                 return;
1436
1437         if (!name_only) {
1438                 printf("  %-50s [%s]\n",
1439                        "rNNN",
1440                        event_type_descriptors[PERF_TYPE_RAW]);
1441                 printf("  %-50s [%s]\n",
1442                        "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
1443                        event_type_descriptors[PERF_TYPE_RAW]);
1444                 printf("   (see 'man perf-list' on how to encode it)\n");
1445                 printf("\n");
1446
1447                 printf("  %-50s [%s]\n",
1448                        "mem:<addr>[/len][:access]",
1449                         event_type_descriptors[PERF_TYPE_BREAKPOINT]);
1450                 printf("\n");
1451         }
1452
1453         print_tracepoint_events(NULL, NULL, name_only);
1454 }
1455
1456 int parse_events__is_hardcoded_term(struct parse_events_term *term)
1457 {
1458         return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
1459 }
1460
1461 static int new_term(struct parse_events_term **_term, int type_val,
1462                     int type_term, char *config,
1463                     char *str, u64 num)
1464 {
1465         struct parse_events_term *term;
1466
1467         term = zalloc(sizeof(*term));
1468         if (!term)
1469                 return -ENOMEM;
1470
1471         INIT_LIST_HEAD(&term->list);
1472         term->type_val  = type_val;
1473         term->type_term = type_term;
1474         term->config = config;
1475
1476         switch (type_val) {
1477         case PARSE_EVENTS__TERM_TYPE_NUM:
1478                 term->val.num = num;
1479                 break;
1480         case PARSE_EVENTS__TERM_TYPE_STR:
1481                 term->val.str = str;
1482                 break;
1483         default:
1484                 free(term);
1485                 return -EINVAL;
1486         }
1487
1488         *_term = term;
1489         return 0;
1490 }
1491
1492 int parse_events_term__num(struct parse_events_term **term,
1493                            int type_term, char *config, u64 num)
1494 {
1495         return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
1496                         config, NULL, num);
1497 }
1498
1499 int parse_events_term__str(struct parse_events_term **term,
1500                            int type_term, char *config, char *str)
1501 {
1502         return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
1503                         config, str, 0);
1504 }
1505
1506 int parse_events_term__sym_hw(struct parse_events_term **term,
1507                               char *config, unsigned idx)
1508 {
1509         struct event_symbol *sym;
1510
1511         BUG_ON(idx >= PERF_COUNT_HW_MAX);
1512         sym = &event_symbols_hw[idx];
1513
1514         if (config)
1515                 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR,
1516                                 PARSE_EVENTS__TERM_TYPE_USER, config,
1517                                 (char *) sym->symbol, 0);
1518         else
1519                 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR,
1520                                 PARSE_EVENTS__TERM_TYPE_USER,
1521                                 (char *) "event", (char *) sym->symbol, 0);
1522 }
1523
1524 int parse_events_term__clone(struct parse_events_term **new,
1525                              struct parse_events_term *term)
1526 {
1527         return new_term(new, term->type_val, term->type_term, term->config,
1528                         term->val.str, term->val.num);
1529 }
1530
1531 void parse_events__free_terms(struct list_head *terms)
1532 {
1533         struct parse_events_term *term, *h;
1534
1535         list_for_each_entry_safe(term, h, terms, list)
1536                 free(term);
1537 }