Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / sh / kernel / smp.c
1 /*
2  * arch/sh/kernel/smp.c
3  *
4  * SMP support for the SuperH processors.
5  *
6  * Copyright (C) 2002 - 2010 Paul Mundt
7  * Copyright (C) 2006 - 2007 Akio Idehara
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 #include <linux/err.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/cpu.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <linux/atomic.h>
25 #include <asm/processor.h>
26 #include <asm/mmu_context.h>
27 #include <asm/smp.h>
28 #include <asm/cacheflush.h>
29 #include <asm/sections.h>
30 #include <asm/setup.h>
31
32 int __cpu_number_map[NR_CPUS];          /* Map physical to logical */
33 int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
34
35 struct plat_smp_ops *mp_ops = NULL;
36
37 /* State of each CPU */
38 DEFINE_PER_CPU(int, cpu_state) = { 0 };
39
40 void register_smp_ops(struct plat_smp_ops *ops)
41 {
42         if (mp_ops)
43                 printk(KERN_WARNING "Overriding previously set SMP ops\n");
44
45         mp_ops = ops;
46 }
47
48 static inline void smp_store_cpu_info(unsigned int cpu)
49 {
50         struct sh_cpuinfo *c = cpu_data + cpu;
51
52         memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
53
54         c->loops_per_jiffy = loops_per_jiffy;
55 }
56
57 void __init smp_prepare_cpus(unsigned int max_cpus)
58 {
59         unsigned int cpu = smp_processor_id();
60
61         init_new_context(current, &init_mm);
62         current_thread_info()->cpu = cpu;
63         mp_ops->prepare_cpus(max_cpus);
64
65 #ifndef CONFIG_HOTPLUG_CPU
66         init_cpu_present(cpu_possible_mask);
67 #endif
68 }
69
70 void __init smp_prepare_boot_cpu(void)
71 {
72         unsigned int cpu = smp_processor_id();
73
74         __cpu_number_map[0] = cpu;
75         __cpu_logical_map[0] = cpu;
76
77         set_cpu_online(cpu, true);
78         set_cpu_possible(cpu, true);
79
80         per_cpu(cpu_state, cpu) = CPU_ONLINE;
81 }
82
83 #ifdef CONFIG_HOTPLUG_CPU
84 void native_cpu_die(unsigned int cpu)
85 {
86         unsigned int i;
87
88         for (i = 0; i < 10; i++) {
89                 smp_rmb();
90                 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
91                         if (system_state == SYSTEM_RUNNING)
92                                 pr_info("CPU %u is now offline\n", cpu);
93
94                         return;
95                 }
96
97                 msleep(100);
98         }
99
100         pr_err("CPU %u didn't die...\n", cpu);
101 }
102
103 int native_cpu_disable(unsigned int cpu)
104 {
105         return cpu == 0 ? -EPERM : 0;
106 }
107
108 void play_dead_common(void)
109 {
110         idle_task_exit();
111         irq_ctx_exit(raw_smp_processor_id());
112         mb();
113
114         __this_cpu_write(cpu_state, CPU_DEAD);
115         local_irq_disable();
116 }
117
118 void native_play_dead(void)
119 {
120         play_dead_common();
121 }
122
123 int __cpu_disable(void)
124 {
125         unsigned int cpu = smp_processor_id();
126         int ret;
127
128         ret = mp_ops->cpu_disable(cpu);
129         if (ret)
130                 return ret;
131
132         /*
133          * Take this CPU offline.  Once we clear this, we can't return,
134          * and we must not schedule until we're ready to give up the cpu.
135          */
136         set_cpu_online(cpu, false);
137
138         /*
139          * OK - migrate IRQs away from this CPU
140          */
141         migrate_irqs();
142
143         /*
144          * Stop the local timer for this CPU.
145          */
146         local_timer_stop(cpu);
147
148         /*
149          * Flush user cache and TLB mappings, and then remove this CPU
150          * from the vm mask set of all processes.
151          */
152         flush_cache_all();
153         local_flush_tlb_all();
154
155         clear_tasks_mm_cpumask(cpu);
156
157         return 0;
158 }
159 #else /* ... !CONFIG_HOTPLUG_CPU */
160 int native_cpu_disable(unsigned int cpu)
161 {
162         return -ENOSYS;
163 }
164
165 void native_cpu_die(unsigned int cpu)
166 {
167         /* We said "no" in __cpu_disable */
168         BUG();
169 }
170
171 void native_play_dead(void)
172 {
173         BUG();
174 }
175 #endif
176
177 asmlinkage void start_secondary(void)
178 {
179         unsigned int cpu = smp_processor_id();
180         struct mm_struct *mm = &init_mm;
181
182         enable_mmu();
183         atomic_inc(&mm->mm_count);
184         atomic_inc(&mm->mm_users);
185         current->active_mm = mm;
186         enter_lazy_tlb(mm, current);
187         local_flush_tlb_all();
188
189         per_cpu_trap_init();
190
191         preempt_disable();
192
193         notify_cpu_starting(cpu);
194
195         local_irq_enable();
196
197         /* Enable local timers */
198         local_timer_setup(cpu);
199         calibrate_delay();
200
201         smp_store_cpu_info(cpu);
202
203         set_cpu_online(cpu, true);
204         per_cpu(cpu_state, cpu) = CPU_ONLINE;
205
206         cpu_startup_entry(CPUHP_ONLINE);
207 }
208
209 extern struct {
210         unsigned long sp;
211         unsigned long bss_start;
212         unsigned long bss_end;
213         void *start_kernel_fn;
214         void *cpu_init_fn;
215         void *thread_info;
216 } stack_start;
217
218 int __cpu_up(unsigned int cpu, struct task_struct *tsk)
219 {
220         unsigned long timeout;
221
222         per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
223
224         /* Fill in data in head.S for secondary cpus */
225         stack_start.sp = tsk->thread.sp;
226         stack_start.thread_info = tsk->stack;
227         stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
228         stack_start.start_kernel_fn = start_secondary;
229
230         flush_icache_range((unsigned long)&stack_start,
231                            (unsigned long)&stack_start + sizeof(stack_start));
232         wmb();
233
234         mp_ops->start_cpu(cpu, (unsigned long)_stext);
235
236         timeout = jiffies + HZ;
237         while (time_before(jiffies, timeout)) {
238                 if (cpu_online(cpu))
239                         break;
240
241                 udelay(10);
242                 barrier();
243         }
244
245         if (cpu_online(cpu))
246                 return 0;
247
248         return -ENOENT;
249 }
250
251 void __init smp_cpus_done(unsigned int max_cpus)
252 {
253         unsigned long bogosum = 0;
254         int cpu;
255
256         for_each_online_cpu(cpu)
257                 bogosum += cpu_data[cpu].loops_per_jiffy;
258
259         printk(KERN_INFO "SMP: Total of %d processors activated "
260                "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
261                bogosum / (500000/HZ),
262                (bogosum / (5000/HZ)) % 100);
263 }
264
265 void smp_send_reschedule(int cpu)
266 {
267         mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
268 }
269
270 void smp_send_stop(void)
271 {
272         smp_call_function(stop_this_cpu, 0, 0);
273 }
274
275 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
276 {
277         int cpu;
278
279         for_each_cpu(cpu, mask)
280                 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
281 }
282
283 void arch_send_call_function_single_ipi(int cpu)
284 {
285         mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
286 }
287
288 void smp_timer_broadcast(const struct cpumask *mask)
289 {
290         int cpu;
291
292         for_each_cpu(cpu, mask)
293                 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
294 }
295
296 static void ipi_timer(void)
297 {
298         irq_enter();
299         local_timer_interrupt();
300         irq_exit();
301 }
302
303 void smp_message_recv(unsigned int msg)
304 {
305         switch (msg) {
306         case SMP_MSG_FUNCTION:
307                 generic_smp_call_function_interrupt();
308                 break;
309         case SMP_MSG_RESCHEDULE:
310                 scheduler_ipi();
311                 break;
312         case SMP_MSG_FUNCTION_SINGLE:
313                 generic_smp_call_function_single_interrupt();
314                 break;
315         case SMP_MSG_TIMER:
316                 ipi_timer();
317                 break;
318         default:
319                 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
320                        smp_processor_id(), __func__, msg);
321                 break;
322         }
323 }
324
325 /* Not really SMP stuff ... */
326 int setup_profiling_timer(unsigned int multiplier)
327 {
328         return 0;
329 }
330
331 static void flush_tlb_all_ipi(void *info)
332 {
333         local_flush_tlb_all();
334 }
335
336 void flush_tlb_all(void)
337 {
338         on_each_cpu(flush_tlb_all_ipi, 0, 1);
339 }
340
341 static void flush_tlb_mm_ipi(void *mm)
342 {
343         local_flush_tlb_mm((struct mm_struct *)mm);
344 }
345
346 /*
347  * The following tlb flush calls are invoked when old translations are
348  * being torn down, or pte attributes are changing. For single threaded
349  * address spaces, a new context is obtained on the current cpu, and tlb
350  * context on other cpus are invalidated to force a new context allocation
351  * at switch_mm time, should the mm ever be used on other cpus. For
352  * multithreaded address spaces, intercpu interrupts have to be sent.
353  * Another case where intercpu interrupts are required is when the target
354  * mm might be active on another cpu (eg debuggers doing the flushes on
355  * behalf of debugees, kswapd stealing pages from another process etc).
356  * Kanoj 07/00.
357  */
358 void flush_tlb_mm(struct mm_struct *mm)
359 {
360         preempt_disable();
361
362         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
363                 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
364         } else {
365                 int i;
366                 for_each_online_cpu(i)
367                         if (smp_processor_id() != i)
368                                 cpu_context(i, mm) = 0;
369         }
370         local_flush_tlb_mm(mm);
371
372         preempt_enable();
373 }
374
375 struct flush_tlb_data {
376         struct vm_area_struct *vma;
377         unsigned long addr1;
378         unsigned long addr2;
379 };
380
381 static void flush_tlb_range_ipi(void *info)
382 {
383         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
384
385         local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
386 }
387
388 void flush_tlb_range(struct vm_area_struct *vma,
389                      unsigned long start, unsigned long end)
390 {
391         struct mm_struct *mm = vma->vm_mm;
392
393         preempt_disable();
394         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
395                 struct flush_tlb_data fd;
396
397                 fd.vma = vma;
398                 fd.addr1 = start;
399                 fd.addr2 = end;
400                 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
401         } else {
402                 int i;
403                 for_each_online_cpu(i)
404                         if (smp_processor_id() != i)
405                                 cpu_context(i, mm) = 0;
406         }
407         local_flush_tlb_range(vma, start, end);
408         preempt_enable();
409 }
410
411 static void flush_tlb_kernel_range_ipi(void *info)
412 {
413         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
414
415         local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
416 }
417
418 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
419 {
420         struct flush_tlb_data fd;
421
422         fd.addr1 = start;
423         fd.addr2 = end;
424         on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
425 }
426
427 static void flush_tlb_page_ipi(void *info)
428 {
429         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
430
431         local_flush_tlb_page(fd->vma, fd->addr1);
432 }
433
434 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
435 {
436         preempt_disable();
437         if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
438             (current->mm != vma->vm_mm)) {
439                 struct flush_tlb_data fd;
440
441                 fd.vma = vma;
442                 fd.addr1 = page;
443                 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
444         } else {
445                 int i;
446                 for_each_online_cpu(i)
447                         if (smp_processor_id() != i)
448                                 cpu_context(i, vma->vm_mm) = 0;
449         }
450         local_flush_tlb_page(vma, page);
451         preempt_enable();
452 }
453
454 static void flush_tlb_one_ipi(void *info)
455 {
456         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
457         local_flush_tlb_one(fd->addr1, fd->addr2);
458 }
459
460 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
461 {
462         struct flush_tlb_data fd;
463
464         fd.addr1 = asid;
465         fd.addr2 = vaddr;
466
467         smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
468         local_flush_tlb_one(asid, vaddr);
469 }