2 * Copyright 2014, Michael Ellerman, IBM Corp.
3 * Licensed under GPLv2.
6 #define _GNU_SOURCE /* For CPU_ZERO etc. */
15 #include <sys/ioctl.h>
22 void (*ebb_user_func)(void);
30 struct ebb_state ebb_state;
32 u64 sample_period = 0x40000000ull;
34 void reset_ebb_with_clear_mask(unsigned long mmcr0_clear_mask)
38 /* 2) clear MMCR0[PMAO] - docs say BESCR[PMEO] should do this */
39 /* 3) set MMCR0[PMAE] - docs say BESCR[PME] should do this */
40 val = mfspr(SPRN_MMCR0);
41 mtspr(SPRN_MMCR0, (val & ~mmcr0_clear_mask) | MMCR0_PMAE);
43 /* 4) clear BESCR[PMEO] */
44 mtspr(SPRN_BESCRR, BESCR_PMEO);
46 /* 5) set BESCR[PME] */
47 mtspr(SPRN_BESCRS, BESCR_PME);
49 /* 6) rfebb 1 - done in our caller */
54 reset_ebb_with_clear_mask(MMCR0_PMAO | MMCR0_FC);
57 /* Called outside of the EBB handler to check MMCR0 is sane */
58 int ebb_check_mmcr0(void)
62 val = mfspr(SPRN_MMCR0);
63 if ((val & (MMCR0_FC | MMCR0_PMAO)) == MMCR0_FC) {
64 /* It's OK if we see FC & PMAO, but not FC by itself */
65 printf("Outside of loop, only FC set 0x%llx\n", val);
72 bool ebb_check_count(int pmc, u64 sample_period, int fudge)
74 u64 count, upper, lower;
76 count = ebb_state.stats.pmc_count[PMC_INDEX(pmc)];
78 lower = ebb_state.stats.ebb_count * (sample_period - fudge);
81 printf("PMC%d count (0x%llx) below lower limit 0x%llx (-0x%llx)\n",
82 pmc, count, lower, lower - count);
86 upper = ebb_state.stats.ebb_count * (sample_period + fudge);
89 printf("PMC%d count (0x%llx) above upper limit 0x%llx (+0x%llx)\n",
90 pmc, count, upper, count - upper);
94 printf("PMC%d count (0x%llx) is between 0x%llx and 0x%llx delta +0x%llx/-0x%llx\n",
95 pmc, count, lower, upper, count - lower, upper - count);
100 void standard_ebb_callee(void)
105 val = mfspr(SPRN_BESCR);
106 if (!(val & BESCR_PMEO)) {
107 ebb_state.stats.spurious++;
111 ebb_state.stats.ebb_count++;
112 trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count);
114 val = mfspr(SPRN_MMCR0);
115 trace_log_reg(ebb_state.trace, SPRN_MMCR0, val);
118 for (i = 1; i <= 6; i++) {
119 if (ebb_state.pmc_enable[PMC_INDEX(i)])
120 found += count_pmc(i, sample_period);
124 ebb_state.stats.no_overflow++;
130 extern void ebb_handler(void);
132 void setup_ebb_handler(void (*callee)(void))
136 #if defined(_CALL_ELF) && _CALL_ELF == 2
137 entry = (u64)ebb_handler;
145 opd = (struct opd *)ebb_handler;
148 printf("EBB Handler is at %#llx\n", entry);
150 ebb_user_func = callee;
152 /* Ensure ebb_user_func is set before we set the handler */
154 mtspr(SPRN_EBBHR, entry);
156 /* Make sure the handler is set before we return */
160 void clear_ebb_stats(void)
162 memset(&ebb_state.stats, 0, sizeof(ebb_state.stats));
165 void dump_summary_ebb_state(void)
167 printf("ebb_state:\n" \
168 " ebb_count = %d\n" \
171 " no_overflow = %d\n" \
172 " pmc[1] count = 0x%llx\n" \
173 " pmc[2] count = 0x%llx\n" \
174 " pmc[3] count = 0x%llx\n" \
175 " pmc[4] count = 0x%llx\n" \
176 " pmc[5] count = 0x%llx\n" \
177 " pmc[6] count = 0x%llx\n",
178 ebb_state.stats.ebb_count, ebb_state.stats.spurious,
179 ebb_state.stats.negative, ebb_state.stats.no_overflow,
180 ebb_state.stats.pmc_count[0], ebb_state.stats.pmc_count[1],
181 ebb_state.stats.pmc_count[2], ebb_state.stats.pmc_count[3],
182 ebb_state.stats.pmc_count[4], ebb_state.stats.pmc_count[5]);
185 static char *decode_mmcr0(u32 value)
191 if (value & (1 << 31))
193 if (value & (1 << 26))
194 strcat(buf, "PMAE ");
195 if (value & (1 << 7))
196 strcat(buf, "PMAO ");
201 static char *decode_bescr(u64 value)
207 if (value & (1ull << 63))
209 if (value & (1ull << 32))
210 strcat(buf, "PMAE ");
212 strcat(buf, "PMAO ");
217 void dump_ebb_hw_state(void)
222 mmcr0 = mfspr(SPRN_MMCR0);
223 bescr = mfspr(SPRN_BESCR);
225 printf("HW state:\n" \
226 "MMCR0 0x%016x %s\n" \
229 "BESCR 0x%016llx %s\n" \
237 mmcr0, decode_mmcr0(mmcr0), mfspr(SPRN_MMCR2),
238 mfspr(SPRN_EBBHR), bescr, decode_bescr(bescr),
239 mfspr(SPRN_PMC1), mfspr(SPRN_PMC2), mfspr(SPRN_PMC3),
240 mfspr(SPRN_PMC4), mfspr(SPRN_PMC5), mfspr(SPRN_PMC6),
244 void dump_ebb_state(void)
246 dump_summary_ebb_state();
250 trace_buffer_print(ebb_state.trace);
253 int count_pmc(int pmc, uint32_t sample_period)
255 uint32_t start_value;
259 start_value = pmc_sample_period(sample_period);
262 if (val < start_value)
263 ebb_state.stats.negative++;
265 ebb_state.stats.pmc_count[PMC_INDEX(pmc)] += val - start_value;
267 trace_log_reg(ebb_state.trace, SPRN_PMC1 + pmc - 1, val);
270 write_pmc(pmc, start_value);
272 /* Report if we overflowed */
273 return val >= COUNTER_OVERFLOW;
276 int ebb_event_enable(struct event *e)
280 /* Ensure any SPR writes are ordered vs us */
283 rc = ioctl(e->fd, PERF_EVENT_IOC_ENABLE);
295 void ebb_freeze_pmcs(void)
297 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC);
301 void ebb_unfreeze_pmcs(void)
303 /* Unfreeze counters */
304 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC);
308 void ebb_global_enable(void)
310 /* Enable EBBs globally and PMU EBBs */
311 mtspr(SPRN_BESCR, 0x8000000100000000ull);
315 void ebb_global_disable(void)
317 /* Disable EBBs & freeze counters, events are still scheduled */
318 mtspr(SPRN_BESCRR, BESCR_PME);
322 void event_ebb_init(struct event *e)
324 e->attr.config |= (1ull << 63);
327 void event_bhrb_init(struct event *e, unsigned ifm)
329 e->attr.config |= (1ull << 62) | ((u64)ifm << 60);
332 void event_leader_ebb_init(struct event *e)
336 e->attr.exclusive = 1;
340 int ebb_child(union pipe read_pipe, union pipe write_pipe)
345 FAIL_IF(wait_for_parent(read_pipe));
347 event_init_named(&event, 0x1001e, "cycles");
348 event_leader_ebb_init(&event);
350 event.attr.exclude_kernel = 1;
351 event.attr.exclude_hv = 1;
352 event.attr.exclude_idle = 1;
354 FAIL_IF(event_open(&event));
356 ebb_enable_pmc_counting(1);
357 setup_ebb_handler(standard_ebb_callee);
360 FAIL_IF(event_enable(&event));
362 if (event_read(&event)) {
364 * Some tests expect to fail here, so don't report an error on
365 * this line, and return a distinguisable error code. Tell the
366 * parent an error happened.
368 notify_parent_of_error(write_pipe);
372 mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
374 FAIL_IF(notify_parent(write_pipe));
375 FAIL_IF(wait_for_parent(read_pipe));
376 FAIL_IF(notify_parent(write_pipe));
378 while (ebb_state.stats.ebb_count < 20) {
379 FAIL_IF(core_busy_loop());
381 /* To try and hit SIGILL case */
382 val = mfspr(SPRN_MMCRA);
383 val |= mfspr(SPRN_MMCR2);
384 val |= mfspr(SPRN_MMCR0);
387 ebb_global_disable();
390 count_pmc(1, sample_period);
396 FAIL_IF(ebb_state.stats.ebb_count == 0);
401 static jmp_buf setjmp_env;
403 static void sigill_handler(int signal)
405 printf("Took sigill\n");
406 longjmp(setjmp_env, 1);
409 static struct sigaction sigill_action = {
410 .sa_handler = sigill_handler,
413 int catch_sigill(void (*func)(void))
415 if (sigaction(SIGILL, &sigill_action, NULL)) {
420 if (setjmp(setjmp_env) == 0) {
428 void write_pmc1(void)
433 void write_pmc(int pmc, u64 value)
436 case 1: mtspr(SPRN_PMC1, value); break;
437 case 2: mtspr(SPRN_PMC2, value); break;
438 case 3: mtspr(SPRN_PMC3, value); break;
439 case 4: mtspr(SPRN_PMC4, value); break;
440 case 5: mtspr(SPRN_PMC5, value); break;
441 case 6: mtspr(SPRN_PMC6, value); break;
445 u64 read_pmc(int pmc)
448 case 1: return mfspr(SPRN_PMC1);
449 case 2: return mfspr(SPRN_PMC2);
450 case 3: return mfspr(SPRN_PMC3);
451 case 4: return mfspr(SPRN_PMC4);
452 case 5: return mfspr(SPRN_PMC5);
453 case 6: return mfspr(SPRN_PMC6);
459 static void term_handler(int signal)
461 dump_summary_ebb_state();
466 struct sigaction term_action = {
467 .sa_handler = term_handler,
470 static void __attribute__((constructor)) ebb_init(void)
474 if (sigaction(SIGTERM, &term_action, NULL))
477 ebb_state.trace = trace_buffer_allocate(1 * 1024 * 1024);