These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / mips / kernel / smp.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
15  *
16  * Copyright (C) 2000, 2001 Kanoj Sarcar
17  * Copyright (C) 2000, 2001 Ralf Baechle
18  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20  */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/module.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
36
37 #include <linux/atomic.h>
38 #include <asm/cpu.h>
39 #include <asm/processor.h>
40 #include <asm/idle.h>
41 #include <asm/r4k-timer.h>
42 #include <asm/mmu_context.h>
43 #include <asm/time.h>
44 #include <asm/setup.h>
45 #include <asm/maar.h>
46
47 cpumask_t cpu_callin_map;               /* Bitmask of started secondaries */
48
49 int __cpu_number_map[NR_CPUS];          /* Map physical to logical */
50 EXPORT_SYMBOL(__cpu_number_map);
51
52 int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
53 EXPORT_SYMBOL(__cpu_logical_map);
54
55 /* Number of TCs (or siblings in Intel speak) per CPU core */
56 int smp_num_siblings = 1;
57 EXPORT_SYMBOL(smp_num_siblings);
58
59 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
60 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
61 EXPORT_SYMBOL(cpu_sibling_map);
62
63 /* representing the core map of multi-core chips of each logical CPU */
64 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
65 EXPORT_SYMBOL(cpu_core_map);
66
67 /*
68  * A logcal cpu mask containing only one VPE per core to
69  * reduce the number of IPIs on large MT systems.
70  */
71 cpumask_t cpu_foreign_map __read_mostly;
72 EXPORT_SYMBOL(cpu_foreign_map);
73
74 /* representing cpus for which sibling maps can be computed */
75 static cpumask_t cpu_sibling_setup_map;
76
77 /* representing cpus for which core maps can be computed */
78 static cpumask_t cpu_core_setup_map;
79
80 cpumask_t cpu_coherent_mask;
81
82 static inline void set_cpu_sibling_map(int cpu)
83 {
84         int i;
85
86         cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
87
88         if (smp_num_siblings > 1) {
89                 for_each_cpu(i, &cpu_sibling_setup_map) {
90                         if (cpu_data[cpu].package == cpu_data[i].package &&
91                                     cpu_data[cpu].core == cpu_data[i].core) {
92                                 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
93                                 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
94                         }
95                 }
96         } else
97                 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
98 }
99
100 static inline void set_cpu_core_map(int cpu)
101 {
102         int i;
103
104         cpumask_set_cpu(cpu, &cpu_core_setup_map);
105
106         for_each_cpu(i, &cpu_core_setup_map) {
107                 if (cpu_data[cpu].package == cpu_data[i].package) {
108                         cpumask_set_cpu(i, &cpu_core_map[cpu]);
109                         cpumask_set_cpu(cpu, &cpu_core_map[i]);
110                 }
111         }
112 }
113
114 /*
115  * Calculate a new cpu_foreign_map mask whenever a
116  * new cpu appears or disappears.
117  */
118 static inline void calculate_cpu_foreign_map(void)
119 {
120         int i, k, core_present;
121         cpumask_t temp_foreign_map;
122
123         /* Re-calculate the mask */
124         cpumask_clear(&temp_foreign_map);
125         for_each_online_cpu(i) {
126                 core_present = 0;
127                 for_each_cpu(k, &temp_foreign_map)
128                         if (cpu_data[i].package == cpu_data[k].package &&
129                             cpu_data[i].core == cpu_data[k].core)
130                                 core_present = 1;
131                 if (!core_present)
132                         cpumask_set_cpu(i, &temp_foreign_map);
133         }
134
135         cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
136 }
137
138 struct plat_smp_ops *mp_ops;
139 EXPORT_SYMBOL(mp_ops);
140
141 void register_smp_ops(struct plat_smp_ops *ops)
142 {
143         if (mp_ops)
144                 printk(KERN_WARNING "Overriding previously set SMP ops\n");
145
146         mp_ops = ops;
147 }
148
149 /*
150  * First C code run on the secondary CPUs after being started up by
151  * the master.
152  */
153 asmlinkage void start_secondary(void)
154 {
155         unsigned int cpu;
156
157         cpu_probe();
158         per_cpu_trap_init(false);
159         mips_clockevent_init();
160         mp_ops->init_secondary();
161         cpu_report();
162         maar_init();
163
164         /*
165          * XXX parity protection should be folded in here when it's converted
166          * to an option instead of something based on .cputype
167          */
168
169         calibrate_delay();
170         preempt_disable();
171         cpu = smp_processor_id();
172         cpu_data[cpu].udelay_val = loops_per_jiffy;
173
174         cpumask_set_cpu(cpu, &cpu_coherent_mask);
175         notify_cpu_starting(cpu);
176
177         set_cpu_online(cpu, true);
178
179         set_cpu_sibling_map(cpu);
180         set_cpu_core_map(cpu);
181
182         calculate_cpu_foreign_map();
183
184         cpumask_set_cpu(cpu, &cpu_callin_map);
185
186         synchronise_count_slave(cpu);
187
188         /*
189          * irq will be enabled in ->smp_finish(), enabling it too early
190          * is dangerous.
191          */
192         WARN_ON_ONCE(!irqs_disabled());
193         mp_ops->smp_finish();
194
195         cpu_startup_entry(CPUHP_ONLINE);
196 }
197
198 static void stop_this_cpu(void *dummy)
199 {
200         /*
201          * Remove this CPU. Be a bit slow here and
202          * set the bits for every online CPU so we don't miss
203          * any IPI whilst taking this VPE down.
204          */
205
206         cpumask_copy(&cpu_foreign_map, cpu_online_mask);
207
208         /* Make it visible to every other CPU */
209         smp_mb();
210
211         set_cpu_online(smp_processor_id(), false);
212         calculate_cpu_foreign_map();
213         local_irq_disable();
214         while (1);
215 }
216
217 void smp_send_stop(void)
218 {
219         smp_call_function(stop_this_cpu, NULL, 0);
220 }
221
222 void __init smp_cpus_done(unsigned int max_cpus)
223 {
224 }
225
226 /* called from main before smp_init() */
227 void __init smp_prepare_cpus(unsigned int max_cpus)
228 {
229         init_new_context(current, &init_mm);
230         current_thread_info()->cpu = 0;
231         mp_ops->prepare_cpus(max_cpus);
232         set_cpu_sibling_map(0);
233         set_cpu_core_map(0);
234         calculate_cpu_foreign_map();
235 #ifndef CONFIG_HOTPLUG_CPU
236         init_cpu_present(cpu_possible_mask);
237 #endif
238         cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
239 }
240
241 /* preload SMP state for boot cpu */
242 void smp_prepare_boot_cpu(void)
243 {
244         set_cpu_possible(0, true);
245         set_cpu_online(0, true);
246         cpumask_set_cpu(0, &cpu_callin_map);
247 }
248
249 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
250 {
251         mp_ops->boot_secondary(cpu, tidle);
252
253         /*
254          * Trust is futile.  We should really have timeouts ...
255          */
256         while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
257                 udelay(100);
258                 schedule();
259         }
260
261         synchronise_count_master(cpu);
262         return 0;
263 }
264
265 /* Not really SMP stuff ... */
266 int setup_profiling_timer(unsigned int multiplier)
267 {
268         return 0;
269 }
270
271 static void flush_tlb_all_ipi(void *info)
272 {
273         local_flush_tlb_all();
274 }
275
276 void flush_tlb_all(void)
277 {
278         on_each_cpu(flush_tlb_all_ipi, NULL, 1);
279 }
280
281 static void flush_tlb_mm_ipi(void *mm)
282 {
283         local_flush_tlb_mm((struct mm_struct *)mm);
284 }
285
286 /*
287  * Special Variant of smp_call_function for use by TLB functions:
288  *
289  *  o No return value
290  *  o collapses to normal function call on UP kernels
291  *  o collapses to normal function call on systems with a single shared
292  *    primary cache.
293  */
294 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
295 {
296         smp_call_function(func, info, 1);
297 }
298
299 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
300 {
301         preempt_disable();
302
303         smp_on_other_tlbs(func, info);
304         func(info);
305
306         preempt_enable();
307 }
308
309 /*
310  * The following tlb flush calls are invoked when old translations are
311  * being torn down, or pte attributes are changing. For single threaded
312  * address spaces, a new context is obtained on the current cpu, and tlb
313  * context on other cpus are invalidated to force a new context allocation
314  * at switch_mm time, should the mm ever be used on other cpus. For
315  * multithreaded address spaces, intercpu interrupts have to be sent.
316  * Another case where intercpu interrupts are required is when the target
317  * mm might be active on another cpu (eg debuggers doing the flushes on
318  * behalf of debugees, kswapd stealing pages from another process etc).
319  * Kanoj 07/00.
320  */
321
322 void flush_tlb_mm(struct mm_struct *mm)
323 {
324         preempt_disable();
325
326         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
327                 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
328         } else {
329                 unsigned int cpu;
330
331                 for_each_online_cpu(cpu) {
332                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
333                                 cpu_context(cpu, mm) = 0;
334                 }
335         }
336         local_flush_tlb_mm(mm);
337
338         preempt_enable();
339 }
340
341 struct flush_tlb_data {
342         struct vm_area_struct *vma;
343         unsigned long addr1;
344         unsigned long addr2;
345 };
346
347 static void flush_tlb_range_ipi(void *info)
348 {
349         struct flush_tlb_data *fd = info;
350
351         local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
352 }
353
354 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
355 {
356         struct mm_struct *mm = vma->vm_mm;
357
358         preempt_disable();
359         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
360                 struct flush_tlb_data fd = {
361                         .vma = vma,
362                         .addr1 = start,
363                         .addr2 = end,
364                 };
365
366                 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
367         } else {
368                 unsigned int cpu;
369
370                 for_each_online_cpu(cpu) {
371                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
372                                 cpu_context(cpu, mm) = 0;
373                 }
374         }
375         local_flush_tlb_range(vma, start, end);
376         preempt_enable();
377 }
378
379 static void flush_tlb_kernel_range_ipi(void *info)
380 {
381         struct flush_tlb_data *fd = info;
382
383         local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
384 }
385
386 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
387 {
388         struct flush_tlb_data fd = {
389                 .addr1 = start,
390                 .addr2 = end,
391         };
392
393         on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
394 }
395
396 static void flush_tlb_page_ipi(void *info)
397 {
398         struct flush_tlb_data *fd = info;
399
400         local_flush_tlb_page(fd->vma, fd->addr1);
401 }
402
403 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
404 {
405         preempt_disable();
406         if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
407                 struct flush_tlb_data fd = {
408                         .vma = vma,
409                         .addr1 = page,
410                 };
411
412                 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
413         } else {
414                 unsigned int cpu;
415
416                 for_each_online_cpu(cpu) {
417                         if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
418                                 cpu_context(cpu, vma->vm_mm) = 0;
419                 }
420         }
421         local_flush_tlb_page(vma, page);
422         preempt_enable();
423 }
424
425 static void flush_tlb_one_ipi(void *info)
426 {
427         unsigned long vaddr = (unsigned long) info;
428
429         local_flush_tlb_one(vaddr);
430 }
431
432 void flush_tlb_one(unsigned long vaddr)
433 {
434         smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
435 }
436
437 EXPORT_SYMBOL(flush_tlb_page);
438 EXPORT_SYMBOL(flush_tlb_one);
439
440 #if defined(CONFIG_KEXEC)
441 void (*dump_ipi_function_ptr)(void *) = NULL;
442 void dump_send_ipi(void (*dump_ipi_callback)(void *))
443 {
444         int i;
445         int cpu = smp_processor_id();
446
447         dump_ipi_function_ptr = dump_ipi_callback;
448         smp_mb();
449         for_each_online_cpu(i)
450                 if (i != cpu)
451                         mp_ops->send_ipi_single(i, SMP_DUMP);
452
453 }
454 EXPORT_SYMBOL(dump_send_ipi);
455 #endif
456
457 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
458
459 static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
460 static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
461
462 void tick_broadcast(const struct cpumask *mask)
463 {
464         atomic_t *count;
465         struct call_single_data *csd;
466         int cpu;
467
468         for_each_cpu(cpu, mask) {
469                 count = &per_cpu(tick_broadcast_count, cpu);
470                 csd = &per_cpu(tick_broadcast_csd, cpu);
471
472                 if (atomic_inc_return(count) == 1)
473                         smp_call_function_single_async(cpu, csd);
474         }
475 }
476
477 static void tick_broadcast_callee(void *info)
478 {
479         int cpu = smp_processor_id();
480         tick_receive_broadcast();
481         atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
482 }
483
484 static int __init tick_broadcast_init(void)
485 {
486         struct call_single_data *csd;
487         int cpu;
488
489         for (cpu = 0; cpu < NR_CPUS; cpu++) {
490                 csd = &per_cpu(tick_broadcast_csd, cpu);
491                 csd->func = tick_broadcast_callee;
492         }
493
494         return 0;
495 }
496 early_initcall(tick_broadcast_init);
497
498 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */