Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / mips / kernel / sync-r4k.c
1 /*
2  * Count register synchronisation.
3  *
4  * All CPUs will have their count registers synchronised to the CPU0 next time
5  * value. This can cause a small timewarp for CPU0. All other CPU's should
6  * not have done anything significant (but they may have had interrupts
7  * enabled briefly - prom_smp_finish() should not be responsible for enabling
8  * interrupts...)
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/irqflags.h>
13 #include <linux/cpumask.h>
14
15 #include <asm/r4k-timer.h>
16 #include <linux/atomic.h>
17 #include <asm/barrier.h>
18 #include <asm/mipsregs.h>
19
20 static atomic_t count_start_flag = ATOMIC_INIT(0);
21 static atomic_t count_count_start = ATOMIC_INIT(0);
22 static atomic_t count_count_stop = ATOMIC_INIT(0);
23 static atomic_t count_reference = ATOMIC_INIT(0);
24
25 #define COUNTON 100
26 #define NR_LOOPS 5
27
28 void synchronise_count_master(int cpu)
29 {
30         int i;
31         unsigned long flags;
32         unsigned int initcount;
33
34         printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
35
36         local_irq_save(flags);
37
38         /*
39          * Notify the slaves that it's time to start
40          */
41         atomic_set(&count_reference, read_c0_count());
42         atomic_set(&count_start_flag, cpu);
43         smp_wmb();
44
45         /* Count will be initialised to current timer for all CPU's */
46         initcount = read_c0_count();
47
48         /*
49          * We loop a few times to get a primed instruction cache,
50          * then the last pass is more or less synchronised and
51          * the master and slaves each set their cycle counters to a known
52          * value all at once. This reduces the chance of having random offsets
53          * between the processors, and guarantees that the maximum
54          * delay between the cycle counters is never bigger than
55          * the latency of information-passing (cachelines) between
56          * two CPUs.
57          */
58
59         for (i = 0; i < NR_LOOPS; i++) {
60                 /* slaves loop on '!= 2' */
61                 while (atomic_read(&count_count_start) != 1)
62                         mb();
63                 atomic_set(&count_count_stop, 0);
64                 smp_wmb();
65
66                 /* this lets the slaves write their count register */
67                 atomic_inc(&count_count_start);
68
69                 /*
70                  * Everyone initialises count in the last loop:
71                  */
72                 if (i == NR_LOOPS-1)
73                         write_c0_count(initcount);
74
75                 /*
76                  * Wait for all slaves to leave the synchronization point:
77                  */
78                 while (atomic_read(&count_count_stop) != 1)
79                         mb();
80                 atomic_set(&count_count_start, 0);
81                 smp_wmb();
82                 atomic_inc(&count_count_stop);
83         }
84         /* Arrange for an interrupt in a short while */
85         write_c0_compare(read_c0_count() + COUNTON);
86         atomic_set(&count_start_flag, 0);
87
88         local_irq_restore(flags);
89
90         /*
91          * i386 code reported the skew here, but the
92          * count registers were almost certainly out of sync
93          * so no point in alarming people
94          */
95         printk("done.\n");
96 }
97
98 void synchronise_count_slave(int cpu)
99 {
100         int i;
101         unsigned int initcount;
102
103         /*
104          * Not every cpu is online at the time this gets called,
105          * so we first wait for the master to say everyone is ready
106          */
107
108         while (atomic_read(&count_start_flag) != cpu)
109                 mb();
110
111         /* Count will be initialised to next expire for all CPU's */
112         initcount = atomic_read(&count_reference);
113
114         for (i = 0; i < NR_LOOPS; i++) {
115                 atomic_inc(&count_count_start);
116                 while (atomic_read(&count_count_start) != 2)
117                         mb();
118
119                 /*
120                  * Everyone initialises count in the last loop:
121                  */
122                 if (i == NR_LOOPS-1)
123                         write_c0_count(initcount);
124
125                 atomic_inc(&count_count_stop);
126                 while (atomic_read(&count_count_stop) != 2)
127                         mb();
128         }
129         /* Arrange for an interrupt in a short while */
130         write_c0_compare(read_c0_count() + COUNTON);
131 }
132 #undef NR_LOOPS