2 * Copyright 2014, Michael Ellerman, IBM Corp.
3 * Licensed under GPLv2.
16 * Test that tries to trigger CPU_FTR_PMAO_BUG. Which is a hardware defect
17 * where an exception triggers but we context switch before it is delivered and
21 static int test_body(void)
23 int i, orig_period, max_period;
26 /* We use PMC4 to make sure the kernel switches all counters correctly */
27 event_init_named(&event, 0x40002, "instructions");
28 event_leader_ebb_init(&event);
30 event.attr.exclude_kernel = 1;
31 event.attr.exclude_hv = 1;
32 event.attr.exclude_idle = 1;
34 FAIL_IF(event_open(&event));
36 ebb_enable_pmc_counting(4);
37 setup_ebb_handler(standard_ebb_callee);
39 FAIL_IF(ebb_event_enable(&event));
42 * We want a low sample period, but we also want to get out of the EBB
43 * handler without tripping up again.
45 * This value picked after much experimentation.
47 orig_period = max_period = sample_period = 400;
49 mtspr(SPRN_PMC4, pmc_sample_period(sample_period));
51 while (ebb_state.stats.ebb_count < 1000000) {
53 * We are trying to get the EBB exception to race exactly with
54 * us entering the kernel to do the syscall. We then need the
55 * kernel to decide our timeslice is up and context switch to
56 * the other thread. When we come back our EBB will have been
57 * lost and we'll spin in this while loop forever.
60 for (i = 0; i < 100000; i++)
63 /* Change the sample period slightly to try and hit the race */
64 if (sample_period >= (orig_period + 200))
65 sample_period = orig_period;
69 if (sample_period > max_period)
70 max_period = sample_period;
76 count_pmc(4, sample_period);
77 mtspr(SPRN_PMC4, 0xdead);
79 dump_summary_ebb_state();
84 FAIL_IF(ebb_state.stats.ebb_count == 0);
86 /* We vary our sample period so we need extra fudge here */
87 FAIL_IF(!ebb_check_count(4, orig_period, 2 * (max_period - orig_period)));
92 static int lost_exception(void)
94 return eat_cpu(test_body);
99 return test_harness(lost_exception, "lost_exception");