2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/seq_file.h>
23 #include <linux/irq.h>
24 #include <linux/nmi.h>
25 #include <linux/percpu.h>
26 #include <linux/clockchips.h>
27 #include <linux/completion.h>
28 #include <linux/cpufreq.h>
29 #include <linux/irq_work.h>
31 #include <linux/atomic.h>
33 #include <asm/cacheflush.h>
35 #include <asm/cputype.h>
36 #include <asm/exception.h>
37 #include <asm/idmap.h>
38 #include <asm/topology.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
41 #include <asm/pgalloc.h>
42 #include <asm/processor.h>
43 #include <asm/sections.h>
44 #include <asm/tlbflush.h>
45 #include <asm/ptrace.h>
46 #include <asm/smp_plat.h>
48 #include <asm/mach/arch.h>
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/ipi.h>
55 * as from 2.5, kernels no longer have an init_tasks structure
56 * so we need some other way of telling a new secondary core
57 * where to place its SVC stack
59 struct secondary_data secondary_data;
62 * control for which core is the next to come out of the secondary
65 volatile int pen_release = -1;
76 IPI_CPU_BACKTRACE = 15,
79 static DECLARE_COMPLETION(cpu_running);
81 static struct smp_operations smp_ops;
83 void __init smp_set_ops(const struct smp_operations *ops)
89 static unsigned long get_arch_pgd(pgd_t *pgd)
91 #ifdef CONFIG_ARM_LPAE
92 return __phys_to_pfn(virt_to_phys(pgd));
94 return virt_to_phys(pgd);
98 int __cpu_up(unsigned int cpu, struct task_struct *idle)
102 if (!smp_ops.smp_boot_secondary)
106 * We need to tell the secondary core where to find
107 * its stack and the page tables.
109 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
110 #ifdef CONFIG_ARM_MPU
111 secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
115 secondary_data.pgdir = virt_to_phys(idmap_pgd);
116 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
118 sync_cache_w(&secondary_data);
121 * Now bring the CPU into our world.
123 ret = smp_ops.smp_boot_secondary(cpu, idle);
126 * CPU was successfully started, wait for it
127 * to come online or time out.
129 wait_for_completion_timeout(&cpu_running,
130 msecs_to_jiffies(1000));
132 if (!cpu_online(cpu)) {
133 pr_crit("CPU%u: failed to come online\n", cpu);
137 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
141 memset(&secondary_data, 0, sizeof(secondary_data));
145 /* platform specific SMP operations */
146 void __init smp_init_cpus(void)
148 if (smp_ops.smp_init_cpus)
149 smp_ops.smp_init_cpus();
152 int platform_can_secondary_boot(void)
154 return !!smp_ops.smp_boot_secondary;
157 int platform_can_cpu_hotplug(void)
159 #ifdef CONFIG_HOTPLUG_CPU
160 if (smp_ops.cpu_kill)
167 #ifdef CONFIG_HOTPLUG_CPU
168 static int platform_cpu_kill(unsigned int cpu)
170 if (smp_ops.cpu_kill)
171 return smp_ops.cpu_kill(cpu);
175 static int platform_cpu_disable(unsigned int cpu)
177 if (smp_ops.cpu_disable)
178 return smp_ops.cpu_disable(cpu);
183 int platform_can_hotplug_cpu(unsigned int cpu)
185 /* cpu_die must be specified to support hotplug */
186 if (!smp_ops.cpu_die)
189 if (smp_ops.cpu_can_disable)
190 return smp_ops.cpu_can_disable(cpu);
193 * By default, allow disabling all CPUs except the first one,
194 * since this is special on a lot of platforms, e.g. because
195 * of clock tick interrupts.
201 * __cpu_disable runs on the processor to be shutdown.
203 int __cpu_disable(void)
205 unsigned int cpu = smp_processor_id();
208 ret = platform_cpu_disable(cpu);
213 * Take this CPU offline. Once we clear this, we can't return,
214 * and we must not schedule until we're ready to give up the cpu.
216 set_cpu_online(cpu, false);
219 * OK - migrate IRQs away from this CPU
224 * Flush user cache and TLB mappings, and then remove this CPU
225 * from the vm mask set of all processes.
227 * Caches are flushed to the Level of Unification Inner Shareable
228 * to write-back dirty lines to unified caches shared by all CPUs.
231 local_flush_tlb_all();
236 static DECLARE_COMPLETION(cpu_died);
239 * called on the thread which is asking for a CPU to be shutdown -
240 * waits until shutdown has completed, or it is timed out.
242 void __cpu_die(unsigned int cpu)
244 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
245 pr_err("CPU%u: cpu didn't die\n", cpu);
249 clear_tasks_mm_cpumask(cpu);
251 pr_notice("CPU%u: shutdown\n", cpu);
254 * platform_cpu_kill() is generally expected to do the powering off
255 * and/or cutting of clocks to the dying CPU. Optionally, this may
256 * be done by the CPU which is dying in preference to supporting
257 * this call, but that means there is _no_ synchronisation between
258 * the requesting CPU and the dying CPU actually losing power.
260 if (!platform_cpu_kill(cpu))
261 pr_err("CPU%u: unable to kill\n", cpu);
265 * Called from the idle thread for the CPU which has been shutdown.
267 * Note that we disable IRQs here, but do not re-enable them
268 * before returning to the caller. This is also the behaviour
269 * of the other hotplug-cpu capable cores, so presumably coming
270 * out of idle fixes this.
272 void arch_cpu_idle_dead(void)
274 unsigned int cpu = smp_processor_id();
281 * Flush the data out of the L1 cache for this CPU. This must be
282 * before the completion to ensure that data is safely written out
283 * before platform_cpu_kill() gets called - which may disable
284 * *this* CPU and power down its cache.
289 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
290 * this returns, power and/or clocks can be removed at any point
291 * from this CPU and its cache by platform_cpu_kill().
296 * Ensure that the cache lines associated with that completion are
297 * written out. This covers the case where _this_ CPU is doing the
298 * powering down, to ensure that the completion is visible to the
299 * CPU waiting for this one.
304 * The actual CPU shutdown procedure is at least platform (if not
305 * CPU) specific. This may remove power, or it may simply spin.
307 * Platforms are generally expected *NOT* to return from this call,
308 * although there are some which do because they have no way to
309 * power down the CPU. These platforms are the _only_ reason we
310 * have a return path which uses the fragment of assembly below.
312 * The return path should not be used for platforms which can
316 smp_ops.cpu_die(cpu);
318 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
322 * Do not return to the idle loop - jump back to the secondary
323 * cpu initialisation. There's some initialisation which needs
324 * to be repeated to undo the effects of taking the CPU offline.
326 __asm__("mov sp, %0\n"
328 " b secondary_start_kernel"
330 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
332 #endif /* CONFIG_HOTPLUG_CPU */
335 * Called by both boot and secondaries to move global data into
336 * per-processor storage.
338 static void smp_store_cpu_info(unsigned int cpuid)
340 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
342 cpu_info->loops_per_jiffy = loops_per_jiffy;
343 cpu_info->cpuid = read_cpuid_id();
345 store_cpu_topology(cpuid);
349 * This is the secondary CPU boot entry. We're using this CPUs
350 * idle thread stack, but a set of temporary page tables.
352 asmlinkage void secondary_start_kernel(void)
354 struct mm_struct *mm = &init_mm;
358 * The identity mapping is uncached (strongly ordered), so
359 * switch away from it before attempting any exclusive accesses.
361 cpu_switch_mm(mm->pgd, mm);
362 local_flush_bp_all();
363 enter_lazy_tlb(mm, current);
364 local_flush_tlb_all();
367 * All kernel threads share the same mm context; grab a
368 * reference and switch to it.
370 cpu = smp_processor_id();
371 atomic_inc(&mm->mm_count);
372 current->active_mm = mm;
373 cpumask_set_cpu(cpu, mm_cpumask(mm));
377 pr_debug("CPU%u: Booted secondary processor\n", cpu);
380 trace_hardirqs_off();
383 * Give the platform a chance to do its own initialisation.
385 if (smp_ops.smp_secondary_init)
386 smp_ops.smp_secondary_init(cpu);
388 notify_cpu_starting(cpu);
392 smp_store_cpu_info(cpu);
395 * OK, now it's safe to let the boot CPU continue. Wait for
396 * the CPU migration code to notice that the CPU is online
397 * before we continue - which happens after __cpu_up returns.
399 set_cpu_online(cpu, true);
400 complete(&cpu_running);
407 * OK, it's off to the idle thread for us
409 cpu_startup_entry(CPUHP_ONLINE);
412 void __init smp_cpus_done(unsigned int max_cpus)
415 unsigned long bogosum = 0;
417 for_each_online_cpu(cpu)
418 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
420 printk(KERN_INFO "SMP: Total of %d processors activated "
421 "(%lu.%02lu BogoMIPS).\n",
423 bogosum / (500000/HZ),
424 (bogosum / (5000/HZ)) % 100);
429 void __init smp_prepare_boot_cpu(void)
431 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
434 void __init smp_prepare_cpus(unsigned int max_cpus)
436 unsigned int ncores = num_possible_cpus();
440 smp_store_cpu_info(smp_processor_id());
443 * are we trying to boot more cores than exist?
445 if (max_cpus > ncores)
447 if (ncores > 1 && max_cpus) {
449 * Initialise the present map, which describes the set of CPUs
450 * actually populated at the present time. A platform should
451 * re-initialize the map in the platforms smp_prepare_cpus()
452 * if present != possible (e.g. physical hotplug).
454 init_cpu_present(cpu_possible_mask);
457 * Initialise the SCU if there are more than one CPU
458 * and let them know where to start.
460 if (smp_ops.smp_prepare_cpus)
461 smp_ops.smp_prepare_cpus(max_cpus);
465 static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
467 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
469 if (!__smp_cross_call)
470 __smp_cross_call = fn;
473 static const char *ipi_types[NR_IPI] __tracepoint_string = {
474 #define S(x,s) [x] = s
475 S(IPI_WAKEUP, "CPU wakeup interrupts"),
476 S(IPI_TIMER, "Timer broadcast interrupts"),
477 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
478 S(IPI_CALL_FUNC, "Function call interrupts"),
479 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
480 S(IPI_CPU_STOP, "CPU stop interrupts"),
481 S(IPI_IRQ_WORK, "IRQ work interrupts"),
482 S(IPI_COMPLETION, "completion interrupts"),
485 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
487 trace_ipi_raise(target, ipi_types[ipinr]);
488 __smp_cross_call(target, ipinr);
491 void show_ipi_list(struct seq_file *p, int prec)
495 for (i = 0; i < NR_IPI; i++) {
496 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
498 for_each_online_cpu(cpu)
499 seq_printf(p, "%10u ",
500 __get_irq_stat(cpu, ipi_irqs[i]));
502 seq_printf(p, " %s\n", ipi_types[i]);
506 u64 smp_irq_stat_cpu(unsigned int cpu)
511 for (i = 0; i < NR_IPI; i++)
512 sum += __get_irq_stat(cpu, ipi_irqs[i]);
517 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
519 smp_cross_call(mask, IPI_CALL_FUNC);
522 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
524 smp_cross_call(mask, IPI_WAKEUP);
527 void arch_send_call_function_single_ipi(int cpu)
529 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
532 #ifdef CONFIG_IRQ_WORK
533 void arch_irq_work_raise(void)
535 if (arch_irq_work_has_interrupt())
536 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
540 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
541 void tick_broadcast(const struct cpumask *mask)
543 smp_cross_call(mask, IPI_TIMER);
547 static DEFINE_RAW_SPINLOCK(stop_lock);
550 * ipi_cpu_stop - handle IPI from smp_send_stop()
552 static void ipi_cpu_stop(unsigned int cpu)
554 if (system_state == SYSTEM_BOOTING ||
555 system_state == SYSTEM_RUNNING) {
556 raw_spin_lock(&stop_lock);
557 pr_crit("CPU%u: stopping\n", cpu);
559 raw_spin_unlock(&stop_lock);
562 set_cpu_online(cpu, false);
571 static DEFINE_PER_CPU(struct completion *, cpu_completion);
573 int register_ipi_completion(struct completion *completion, int cpu)
575 per_cpu(cpu_completion, cpu) = completion;
576 return IPI_COMPLETION;
579 static void ipi_complete(unsigned int cpu)
581 complete(per_cpu(cpu_completion, cpu));
585 * Main handler for inter-processor interrupts
587 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
589 handle_IPI(ipinr, regs);
592 void handle_IPI(int ipinr, struct pt_regs *regs)
594 unsigned int cpu = smp_processor_id();
595 struct pt_regs *old_regs = set_irq_regs(regs);
597 if ((unsigned)ipinr < NR_IPI) {
598 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
599 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
606 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
609 tick_receive_broadcast();
620 generic_smp_call_function_interrupt();
624 case IPI_CALL_FUNC_SINGLE:
626 generic_smp_call_function_single_interrupt();
636 #ifdef CONFIG_IRQ_WORK
650 case IPI_CPU_BACKTRACE:
652 nmi_cpu_backtrace(regs);
657 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
662 if ((unsigned)ipinr < NR_IPI)
663 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
664 set_irq_regs(old_regs);
667 void smp_send_reschedule(int cpu)
669 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
672 void smp_send_stop(void)
674 unsigned long timeout;
677 cpumask_copy(&mask, cpu_online_mask);
678 cpumask_clear_cpu(smp_processor_id(), &mask);
679 if (!cpumask_empty(&mask))
680 smp_cross_call(&mask, IPI_CPU_STOP);
682 /* Wait up to one second for other CPUs to stop */
683 timeout = USEC_PER_SEC;
684 while (num_online_cpus() > 1 && timeout--)
687 if (num_online_cpus() > 1)
688 pr_warn("SMP: failed to stop secondary CPUs\n");
694 int setup_profiling_timer(unsigned int multiplier)
699 #ifdef CONFIG_CPU_FREQ
701 static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
702 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
703 static unsigned long global_l_p_j_ref;
704 static unsigned long global_l_p_j_ref_freq;
706 static int cpufreq_callback(struct notifier_block *nb,
707 unsigned long val, void *data)
709 struct cpufreq_freqs *freq = data;
712 if (freq->flags & CPUFREQ_CONST_LOOPS)
715 if (!per_cpu(l_p_j_ref, cpu)) {
716 per_cpu(l_p_j_ref, cpu) =
717 per_cpu(cpu_data, cpu).loops_per_jiffy;
718 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
719 if (!global_l_p_j_ref) {
720 global_l_p_j_ref = loops_per_jiffy;
721 global_l_p_j_ref_freq = freq->old;
725 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
726 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
727 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
728 global_l_p_j_ref_freq,
730 per_cpu(cpu_data, cpu).loops_per_jiffy =
731 cpufreq_scale(per_cpu(l_p_j_ref, cpu),
732 per_cpu(l_p_j_ref_freq, cpu),
738 static struct notifier_block cpufreq_notifier = {
739 .notifier_call = cpufreq_callback,
742 static int __init register_cpufreq_notifier(void)
744 return cpufreq_register_notifier(&cpufreq_notifier,
745 CPUFREQ_TRANSITION_NOTIFIER);
747 core_initcall(register_cpufreq_notifier);
751 static void raise_nmi(cpumask_t *mask)
754 * Generate the backtrace directly if we are running in a calling
755 * context that is not preemptible by the backtrace IPI. Note
756 * that nmi_cpu_backtrace() automatically removes the current cpu
759 if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
760 nmi_cpu_backtrace(NULL);
762 smp_cross_call(mask, IPI_CPU_BACKTRACE);
765 void arch_trigger_all_cpu_backtrace(bool include_self)
767 nmi_trigger_all_cpu_backtrace(include_self, raise_nmi);