2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <trace/events/power.h>
29 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
30 static DEFINE_MUTEX(cpu_add_remove_lock);
33 * The following two APIs (cpu_maps_update_begin/done) must be used when
34 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
35 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
36 * hotplug callback (un)registration performed using __register_cpu_notifier()
37 * or __unregister_cpu_notifier().
39 void cpu_maps_update_begin(void)
41 mutex_lock(&cpu_add_remove_lock);
43 EXPORT_SYMBOL(cpu_notifier_register_begin);
45 void cpu_maps_update_done(void)
47 mutex_unlock(&cpu_add_remove_lock);
49 EXPORT_SYMBOL(cpu_notifier_register_done);
51 static RAW_NOTIFIER_HEAD(cpu_chain);
53 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
54 * Should always be manipulated under cpu_add_remove_lock
56 static int cpu_hotplug_disabled;
58 #ifdef CONFIG_HOTPLUG_CPU
61 struct task_struct *active_writer;
62 /* wait queue to wake up the active_writer */
64 /* verifies that no writer will get active while readers are active */
67 * Also blocks the new readers during
68 * an ongoing cpu hotplug operation.
72 #ifdef CONFIG_DEBUG_LOCK_ALLOC
73 struct lockdep_map dep_map;
76 .active_writer = NULL,
77 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
78 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
79 #ifdef CONFIG_DEBUG_LOCK_ALLOC
80 .dep_map = {.name = "cpu_hotplug.lock" },
84 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
85 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
86 #define cpuhp_lock_acquire_tryread() \
87 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
88 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
89 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
92 * hotplug_pcp - per cpu hotplug descriptor
93 * @unplug: set when pin_current_cpu() needs to sync tasks
94 * @sync_tsk: the task that waits for tasks to finish pinned sections
95 * @refcount: counter of tasks in pinned sections
96 * @grab_lock: set when the tasks entering pinned sections should wait
97 * @synced: notifier for @sync_tsk to tell cpu_down it's finished
98 * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
99 * @mutex_init: zero if the mutex hasn't been initialized yet.
101 * Although @unplug and @sync_tsk may point to the same task, the @unplug
102 * is used as a flag and still exists after @sync_tsk has exited and
103 * @sync_tsk set to NULL.
106 struct task_struct *unplug;
107 struct task_struct *sync_tsk;
110 struct completion synced;
111 struct completion unplug_wait;
112 #ifdef CONFIG_PREEMPT_RT_FULL
114 * Note, on PREEMPT_RT, the hotplug lock must save the state of
115 * the task, otherwise the mutex will cause the task to fail
116 * to sleep when required. (Because it's called from migrate_disable())
118 * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
128 #ifdef CONFIG_PREEMPT_RT_FULL
129 # define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
130 # define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
132 # define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
133 # define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
136 static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
139 * pin_current_cpu - Prevent the current cpu from being unplugged
141 * Lightweight version of get_online_cpus() to prevent cpu from being
142 * unplugged when code runs in a migration disabled region.
144 * Must be called with preemption disabled (preempt_count = 1)!
146 void pin_current_cpu(void)
148 struct hotplug_pcp *hp;
152 hp = this_cpu_ptr(&hotplug_pcp);
154 if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
155 hp->unplug == current) {
166 * Try to push this task off of this CPU.
170 hp = this_cpu_ptr(&hotplug_pcp);
171 if (!hp->grab_lock) {
173 * Just let it continue it's already pinned
187 * unpin_current_cpu - Allow unplug of current cpu
189 * Must be called with preemption or interrupts disabled!
191 void unpin_current_cpu(void)
193 struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
195 WARN_ON(hp->refcount <= 0);
197 /* This is safe. sync_unplug_thread is pinned to this cpu */
198 if (!--hp->refcount && hp->unplug && hp->unplug != current)
199 wake_up_process(hp->unplug);
202 static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
204 set_current_state(TASK_UNINTERRUPTIBLE);
205 while (hp->refcount) {
206 schedule_preempt_disabled();
207 set_current_state(TASK_UNINTERRUPTIBLE);
211 static int sync_unplug_thread(void *data)
213 struct hotplug_pcp *hp = data;
215 wait_for_completion(&hp->unplug_wait);
217 hp->unplug = current;
218 wait_for_pinned_cpus(hp);
221 * This thread will synchronize the cpu_down() with threads
222 * that have pinned the CPU. When the pinned CPU count reaches
223 * zero, we inform the cpu_down code to continue to the next step.
225 set_current_state(TASK_UNINTERRUPTIBLE);
227 complete(&hp->synced);
230 * If all succeeds, the next step will need tasks to wait till
231 * the CPU is offline before continuing. To do this, the grab_lock
232 * is set and tasks going into pin_current_cpu() will block on the
233 * mutex. But we still need to wait for those that are already in
234 * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
235 * will kick this thread out.
237 while (!hp->grab_lock && !kthread_should_stop()) {
239 set_current_state(TASK_UNINTERRUPTIBLE);
242 /* Make sure grab_lock is seen before we see a stale completion */
246 * Now just before cpu_down() enters stop machine, we need to make
247 * sure all tasks that are in pinned CPU sections are out, and new
248 * tasks will now grab the lock, keeping them from entering pinned
251 if (!kthread_should_stop()) {
253 wait_for_pinned_cpus(hp);
255 complete(&hp->synced);
258 set_current_state(TASK_UNINTERRUPTIBLE);
259 while (!kthread_should_stop()) {
261 set_current_state(TASK_UNINTERRUPTIBLE);
263 set_current_state(TASK_RUNNING);
266 * Force this thread off this CPU as it's going down and
267 * we don't want any more work on this CPU.
269 current->flags &= ~PF_NO_SETAFFINITY;
270 set_cpus_allowed_ptr(current, cpu_present_mask);
275 static void __cpu_unplug_sync(struct hotplug_pcp *hp)
277 wake_up_process(hp->sync_tsk);
278 wait_for_completion(&hp->synced);
281 static void __cpu_unplug_wait(unsigned int cpu)
283 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
285 complete(&hp->unplug_wait);
286 wait_for_completion(&hp->synced);
290 * Start the sync_unplug_thread on the target cpu and wait for it to
293 static int cpu_unplug_begin(unsigned int cpu)
295 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
298 /* Protected by cpu_hotplug.lock */
299 if (!hp->mutex_init) {
300 #ifdef CONFIG_PREEMPT_RT_FULL
301 spin_lock_init(&hp->lock);
303 mutex_init(&hp->mutex);
308 /* Inform the scheduler to migrate tasks off this CPU */
309 tell_sched_cpu_down_begin(cpu);
311 init_completion(&hp->synced);
312 init_completion(&hp->unplug_wait);
314 hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
315 if (IS_ERR(hp->sync_tsk)) {
316 err = PTR_ERR(hp->sync_tsk);
320 kthread_bind(hp->sync_tsk, cpu);
323 * Wait for tasks to get out of the pinned sections,
324 * it's still OK if new tasks enter. Some CPU notifiers will
325 * wait for tasks that are going to enter these sections and
326 * we must not have them block.
328 wake_up_process(hp->sync_tsk);
332 static void cpu_unplug_sync(unsigned int cpu)
334 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
336 init_completion(&hp->synced);
337 /* The completion needs to be initialzied before setting grab_lock */
340 /* Grab the mutex before setting grab_lock */
345 * The CPU notifiers have been completed.
346 * Wait for tasks to get out of pinned CPU sections and have new
347 * tasks block until the CPU is completely down.
349 __cpu_unplug_sync(hp);
351 /* All done with the sync thread */
352 kthread_stop(hp->sync_tsk);
356 static void cpu_unplug_done(unsigned int cpu)
358 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
361 /* Let all tasks know cpu unplug is finished before cleaning up */
365 kthread_stop(hp->sync_tsk);
369 /* protected by cpu_hotplug.lock */
372 tell_sched_cpu_down_done(cpu);
375 void get_online_cpus(void)
378 if (cpu_hotplug.active_writer == current)
380 cpuhp_lock_acquire_read();
381 mutex_lock(&cpu_hotplug.lock);
382 atomic_inc(&cpu_hotplug.refcount);
383 mutex_unlock(&cpu_hotplug.lock);
385 EXPORT_SYMBOL_GPL(get_online_cpus);
387 bool try_get_online_cpus(void)
389 if (cpu_hotplug.active_writer == current)
391 if (!mutex_trylock(&cpu_hotplug.lock))
393 cpuhp_lock_acquire_tryread();
394 atomic_inc(&cpu_hotplug.refcount);
395 mutex_unlock(&cpu_hotplug.lock);
398 EXPORT_SYMBOL_GPL(try_get_online_cpus);
400 void put_online_cpus(void)
404 if (cpu_hotplug.active_writer == current)
407 refcount = atomic_dec_return(&cpu_hotplug.refcount);
408 if (WARN_ON(refcount < 0)) /* try to fix things up */
409 atomic_inc(&cpu_hotplug.refcount);
411 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
412 wake_up(&cpu_hotplug.wq);
414 cpuhp_lock_release();
417 EXPORT_SYMBOL_GPL(put_online_cpus);
420 * This ensures that the hotplug operation can begin only when the
421 * refcount goes to zero.
423 * Note that during a cpu-hotplug operation, the new readers, if any,
424 * will be blocked by the cpu_hotplug.lock
426 * Since cpu_hotplug_begin() is always called after invoking
427 * cpu_maps_update_begin(), we can be sure that only one writer is active.
429 * Note that theoretically, there is a possibility of a livelock:
430 * - Refcount goes to zero, last reader wakes up the sleeping
432 * - Last reader unlocks the cpu_hotplug.lock.
433 * - A new reader arrives at this moment, bumps up the refcount.
434 * - The writer acquires the cpu_hotplug.lock finds the refcount
435 * non zero and goes to sleep again.
437 * However, this is very difficult to achieve in practice since
438 * get_online_cpus() not an api which is called all that often.
441 void cpu_hotplug_begin(void)
445 cpu_hotplug.active_writer = current;
446 cpuhp_lock_acquire();
449 mutex_lock(&cpu_hotplug.lock);
450 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
451 if (likely(!atomic_read(&cpu_hotplug.refcount)))
453 mutex_unlock(&cpu_hotplug.lock);
456 finish_wait(&cpu_hotplug.wq, &wait);
459 void cpu_hotplug_done(void)
461 cpu_hotplug.active_writer = NULL;
462 mutex_unlock(&cpu_hotplug.lock);
463 cpuhp_lock_release();
467 * Wait for currently running CPU hotplug operations to complete (if any) and
468 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
469 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
470 * hotplug path before performing hotplug operations. So acquiring that lock
471 * guarantees mutual exclusion from any currently running hotplug operations.
473 void cpu_hotplug_disable(void)
475 cpu_maps_update_begin();
476 cpu_hotplug_disabled = 1;
477 cpu_maps_update_done();
480 void cpu_hotplug_enable(void)
482 cpu_maps_update_begin();
483 cpu_hotplug_disabled = 0;
484 cpu_maps_update_done();
487 #endif /* CONFIG_HOTPLUG_CPU */
489 /* Need to know about CPUs going up/down? */
490 int __ref register_cpu_notifier(struct notifier_block *nb)
493 cpu_maps_update_begin();
494 ret = raw_notifier_chain_register(&cpu_chain, nb);
495 cpu_maps_update_done();
499 int __ref __register_cpu_notifier(struct notifier_block *nb)
501 return raw_notifier_chain_register(&cpu_chain, nb);
504 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
509 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
512 return notifier_to_errno(ret);
515 static int cpu_notify(unsigned long val, void *v)
517 return __cpu_notify(val, v, -1, NULL);
520 #ifdef CONFIG_HOTPLUG_CPU
522 static void cpu_notify_nofail(unsigned long val, void *v)
524 BUG_ON(cpu_notify(val, v));
526 EXPORT_SYMBOL(register_cpu_notifier);
527 EXPORT_SYMBOL(__register_cpu_notifier);
529 void __ref unregister_cpu_notifier(struct notifier_block *nb)
531 cpu_maps_update_begin();
532 raw_notifier_chain_unregister(&cpu_chain, nb);
533 cpu_maps_update_done();
535 EXPORT_SYMBOL(unregister_cpu_notifier);
537 void __ref __unregister_cpu_notifier(struct notifier_block *nb)
539 raw_notifier_chain_unregister(&cpu_chain, nb);
541 EXPORT_SYMBOL(__unregister_cpu_notifier);
544 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
547 * This function walks all processes, finds a valid mm struct for each one and
548 * then clears a corresponding bit in mm's cpumask. While this all sounds
549 * trivial, there are various non-obvious corner cases, which this function
550 * tries to solve in a safe manner.
552 * Also note that the function uses a somewhat relaxed locking scheme, so it may
553 * be called only for an already offlined CPU.
555 void clear_tasks_mm_cpumask(int cpu)
557 struct task_struct *p;
560 * This function is called after the cpu is taken down and marked
561 * offline, so its not like new tasks will ever get this cpu set in
562 * their mm mask. -- Peter Zijlstra
563 * Thus, we may use rcu_read_lock() here, instead of grabbing
564 * full-fledged tasklist_lock.
566 WARN_ON(cpu_online(cpu));
568 for_each_process(p) {
569 struct task_struct *t;
572 * Main thread might exit, but other threads may still have
573 * a valid mm. Find one.
575 t = find_lock_task_mm(p);
578 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
584 static inline void check_for_tasks(int dead_cpu)
586 struct task_struct *g, *p;
588 read_lock_irq(&tasklist_lock);
589 do_each_thread(g, p) {
593 * We do the check with unlocked task_rq(p)->lock.
594 * Order the reading to do not warn about a task,
595 * which was running on this cpu in the past, and
596 * it's just been woken on another cpu.
599 if (task_cpu(p) != dead_cpu)
602 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
603 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
604 } while_each_thread(g, p);
605 read_unlock_irq(&tasklist_lock);
608 struct take_cpu_down_param {
613 /* Take this CPU down. */
614 static int __ref take_cpu_down(void *_param)
616 struct take_cpu_down_param *param = _param;
619 /* Ensure this CPU doesn't handle any more interrupts. */
620 err = __cpu_disable();
624 cpu_notify(CPU_DYING | param->mod, param->hcpu);
625 /* Give up timekeeping duties */
626 tick_handover_do_timer();
627 /* Park the stopper thread */
628 kthread_park(current);
632 /* Requires cpu_add_remove_lock to be held */
633 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
635 int mycpu, err, nr_calls = 0;
636 void *hcpu = (void *)(long)cpu;
637 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
638 struct take_cpu_down_param tcd_param = {
642 cpumask_var_t cpumask;
643 cpumask_var_t cpumask_org;
645 if (num_online_cpus() == 1)
648 if (!cpu_online(cpu))
651 /* Move the downtaker off the unplug cpu */
652 if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
654 if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
655 free_cpumask_var(cpumask);
659 cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
660 cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
661 set_cpus_allowed_ptr(current, cpumask);
662 free_cpumask_var(cpumask);
664 mycpu = smp_processor_id();
666 printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
674 err = cpu_unplug_begin(cpu);
676 printk("cpu_unplug_begin(%d) failed\n", cpu);
680 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
683 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
684 pr_warn("%s: attempt to take down CPU %u failed\n",
690 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
691 * and RCU users of this state to go away such that all new such users
694 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
695 * not imply sync_sched(), so explicitly call both.
697 * Do sync before park smpboot threads to take care the rcu boost case.
699 #ifdef CONFIG_PREEMPT
704 __cpu_unplug_wait(cpu);
705 smpboot_park_threads(cpu);
707 /* Notifiers are done. Don't let any more tasks pin this CPU. */
708 cpu_unplug_sync(cpu);
711 * So now all preempt/rcu users must observe !cpu_active().
714 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
716 /* CPU didn't die: tell everyone. Can't complain. */
717 smpboot_unpark_threads(cpu);
718 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
721 BUG_ON(cpu_online(cpu));
724 * The migration_call() CPU_DYING callback will have removed all
725 * runnable tasks from the cpu, there's only the idle task left now
726 * that the migration thread is done doing the stop_machine thing.
728 * Wait for the stop thread to go away.
730 while (!per_cpu(cpu_dead_idle, cpu))
732 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
733 per_cpu(cpu_dead_idle, cpu) = false;
735 hotplug_cpu__broadcast_tick_pull(cpu);
736 /* This actually kills the CPU. */
739 /* CPU is completely dead: tell everyone. Too late to complain. */
740 tick_cleanup_dead_cpu(cpu);
741 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
743 check_for_tasks(cpu);
746 cpu_unplug_done(cpu);
750 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
752 set_cpus_allowed_ptr(current, cpumask_org);
753 free_cpumask_var(cpumask_org);
757 int __ref cpu_down(unsigned int cpu)
761 cpu_maps_update_begin();
763 if (cpu_hotplug_disabled) {
768 err = _cpu_down(cpu, 0);
771 cpu_maps_update_done();
774 EXPORT_SYMBOL(cpu_down);
775 #endif /*CONFIG_HOTPLUG_CPU*/
778 * Unpark per-CPU smpboot kthreads at CPU-online time.
780 static int smpboot_thread_call(struct notifier_block *nfb,
781 unsigned long action, void *hcpu)
783 int cpu = (long)hcpu;
785 switch (action & ~CPU_TASKS_FROZEN) {
788 smpboot_unpark_threads(cpu);
798 static struct notifier_block smpboot_thread_notifier = {
799 .notifier_call = smpboot_thread_call,
800 .priority = CPU_PRI_SMPBOOT,
803 void __cpuinit smpboot_thread_init(void)
805 register_cpu_notifier(&smpboot_thread_notifier);
808 /* Requires cpu_add_remove_lock to be held */
809 static int _cpu_up(unsigned int cpu, int tasks_frozen)
811 int ret, nr_calls = 0;
812 void *hcpu = (void *)(long)cpu;
813 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
814 struct task_struct *idle;
818 if (cpu_online(cpu) || !cpu_present(cpu)) {
823 idle = idle_thread_get(cpu);
829 ret = smpboot_create_threads(cpu);
833 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
836 pr_warn("%s: attempt to bring up CPU %u failed\n",
841 /* Arch-specific enabling code. */
842 ret = __cpu_up(cpu, idle);
845 BUG_ON(!cpu_online(cpu));
847 /* Now call notifier in preparation. */
848 cpu_notify(CPU_ONLINE | mod, hcpu);
852 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
859 int cpu_up(unsigned int cpu)
863 if (!cpu_possible(cpu)) {
864 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
866 #if defined(CONFIG_IA64)
867 pr_err("please check additional_cpus= boot parameter\n");
872 err = try_online_node(cpu_to_node(cpu));
876 cpu_maps_update_begin();
878 if (cpu_hotplug_disabled) {
883 err = _cpu_up(cpu, 0);
886 cpu_maps_update_done();
889 EXPORT_SYMBOL_GPL(cpu_up);
891 #ifdef CONFIG_PM_SLEEP_SMP
892 static cpumask_var_t frozen_cpus;
894 int disable_nonboot_cpus(void)
896 int cpu, first_cpu, error = 0;
898 cpu_maps_update_begin();
899 first_cpu = cpumask_first(cpu_online_mask);
901 * We take down all of the non-boot CPUs in one shot to avoid races
902 * with the userspace trying to use the CPU hotplug at the same time
904 cpumask_clear(frozen_cpus);
906 pr_info("Disabling non-boot CPUs ...\n");
907 for_each_online_cpu(cpu) {
908 if (cpu == first_cpu)
910 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
911 error = _cpu_down(cpu, 1);
912 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
914 cpumask_set_cpu(cpu, frozen_cpus);
916 pr_err("Error taking CPU%d down: %d\n", cpu, error);
922 BUG_ON(num_online_cpus() > 1);
923 /* Make sure the CPUs won't be enabled by someone else */
924 cpu_hotplug_disabled = 1;
926 pr_err("Non-boot CPUs are not disabled\n");
928 cpu_maps_update_done();
932 void __weak arch_enable_nonboot_cpus_begin(void)
936 void __weak arch_enable_nonboot_cpus_end(void)
940 void __ref enable_nonboot_cpus(void)
944 /* Allow everyone to use the CPU hotplug again */
945 cpu_maps_update_begin();
946 cpu_hotplug_disabled = 0;
947 if (cpumask_empty(frozen_cpus))
950 pr_info("Enabling non-boot CPUs ...\n");
952 arch_enable_nonboot_cpus_begin();
954 for_each_cpu(cpu, frozen_cpus) {
955 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
956 error = _cpu_up(cpu, 1);
957 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
959 pr_info("CPU%d is up\n", cpu);
962 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
965 arch_enable_nonboot_cpus_end();
967 cpumask_clear(frozen_cpus);
969 cpu_maps_update_done();
972 static int __init alloc_frozen_cpus(void)
974 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
978 core_initcall(alloc_frozen_cpus);
981 * When callbacks for CPU hotplug notifications are being executed, we must
982 * ensure that the state of the system with respect to the tasks being frozen
983 * or not, as reported by the notification, remains unchanged *throughout the
984 * duration* of the execution of the callbacks.
985 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
987 * This synchronization is implemented by mutually excluding regular CPU
988 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
989 * Hibernate notifications.
992 cpu_hotplug_pm_callback(struct notifier_block *nb,
993 unsigned long action, void *ptr)
997 case PM_SUSPEND_PREPARE:
998 case PM_HIBERNATION_PREPARE:
999 cpu_hotplug_disable();
1002 case PM_POST_SUSPEND:
1003 case PM_POST_HIBERNATION:
1004 cpu_hotplug_enable();
1015 static int __init cpu_hotplug_pm_sync_init(void)
1018 * cpu_hotplug_pm_callback has higher priority than x86
1019 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1020 * to disable cpu hotplug to avoid cpu hotplug race.
1022 pm_notifier(cpu_hotplug_pm_callback, 0);
1025 core_initcall(cpu_hotplug_pm_sync_init);
1027 #endif /* CONFIG_PM_SLEEP_SMP */
1030 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
1031 * @cpu: cpu that just started
1033 * This function calls the cpu_chain notifiers with CPU_STARTING.
1034 * It must be called by the arch code on the new cpu, before the new cpu
1035 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1037 void notify_cpu_starting(unsigned int cpu)
1039 unsigned long val = CPU_STARTING;
1041 #ifdef CONFIG_PM_SLEEP_SMP
1042 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
1043 val = CPU_STARTING_FROZEN;
1044 #endif /* CONFIG_PM_SLEEP_SMP */
1045 cpu_notify(val, (void *)(long)cpu);
1048 #endif /* CONFIG_SMP */
1051 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1052 * represents all NR_CPUS bits binary values of 1<<nr.
1054 * It is used by cpumask_of() to get a constant address to a CPU
1055 * mask value that has a single bit set only.
1058 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1059 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1060 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1061 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1062 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1064 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1066 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1067 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1068 #if BITS_PER_LONG > 32
1069 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1070 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1073 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
1075 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1076 EXPORT_SYMBOL(cpu_all_bits);
1078 #ifdef CONFIG_INIT_ALL_POSSIBLE
1079 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
1082 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
1084 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
1085 EXPORT_SYMBOL(cpu_possible_mask);
1087 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
1088 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
1089 EXPORT_SYMBOL(cpu_online_mask);
1091 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
1092 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
1093 EXPORT_SYMBOL(cpu_present_mask);
1095 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
1096 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
1097 EXPORT_SYMBOL(cpu_active_mask);
1099 void set_cpu_possible(unsigned int cpu, bool possible)
1102 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
1104 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
1107 void set_cpu_present(unsigned int cpu, bool present)
1110 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
1112 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
1115 void set_cpu_online(unsigned int cpu, bool online)
1118 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
1119 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
1121 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
1125 void set_cpu_active(unsigned int cpu, bool active)
1128 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
1130 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
1133 void init_cpu_present(const struct cpumask *src)
1135 cpumask_copy(to_cpumask(cpu_present_bits), src);
1138 void init_cpu_possible(const struct cpumask *src)
1140 cpumask_copy(to_cpumask(cpu_possible_bits), src);
1143 void init_cpu_online(const struct cpumask *src)
1145 cpumask_copy(to_cpumask(cpu_online_bits), src);