2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
25 #include <trace/events/power.h>
30 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
31 static DEFINE_MUTEX(cpu_add_remove_lock);
34 * The following two APIs (cpu_maps_update_begin/done) must be used when
35 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
36 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
37 * hotplug callback (un)registration performed using __register_cpu_notifier()
38 * or __unregister_cpu_notifier().
40 void cpu_maps_update_begin(void)
42 mutex_lock(&cpu_add_remove_lock);
44 EXPORT_SYMBOL(cpu_notifier_register_begin);
46 void cpu_maps_update_done(void)
48 mutex_unlock(&cpu_add_remove_lock);
50 EXPORT_SYMBOL(cpu_notifier_register_done);
52 static RAW_NOTIFIER_HEAD(cpu_chain);
54 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
55 * Should always be manipulated under cpu_add_remove_lock
57 static int cpu_hotplug_disabled;
59 #ifdef CONFIG_HOTPLUG_CPU
62 struct task_struct *active_writer;
63 /* wait queue to wake up the active_writer */
65 /* verifies that no writer will get active while readers are active */
68 * Also blocks the new readers during
69 * an ongoing cpu hotplug operation.
73 #ifdef CONFIG_DEBUG_LOCK_ALLOC
74 struct lockdep_map dep_map;
77 .active_writer = NULL,
78 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
79 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
80 #ifdef CONFIG_DEBUG_LOCK_ALLOC
81 .dep_map = {.name = "cpu_hotplug.lock" },
85 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
86 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
87 #define cpuhp_lock_acquire_tryread() \
88 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
89 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
90 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
93 * hotplug_pcp - per cpu hotplug descriptor
94 * @unplug: set when pin_current_cpu() needs to sync tasks
95 * @sync_tsk: the task that waits for tasks to finish pinned sections
96 * @refcount: counter of tasks in pinned sections
97 * @grab_lock: set when the tasks entering pinned sections should wait
98 * @synced: notifier for @sync_tsk to tell cpu_down it's finished
99 * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
100 * @mutex_init: zero if the mutex hasn't been initialized yet.
102 * Although @unplug and @sync_tsk may point to the same task, the @unplug
103 * is used as a flag and still exists after @sync_tsk has exited and
104 * @sync_tsk set to NULL.
107 struct task_struct *unplug;
108 struct task_struct *sync_tsk;
111 struct completion synced;
112 struct completion unplug_wait;
113 #ifdef CONFIG_PREEMPT_RT_FULL
115 * Note, on PREEMPT_RT, the hotplug lock must save the state of
116 * the task, otherwise the mutex will cause the task to fail
117 * to sleep when required. (Because it's called from migrate_disable())
119 * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
129 #ifdef CONFIG_PREEMPT_RT_FULL
130 # define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock)
131 # define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock)
133 # define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
134 # define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
137 static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
140 * pin_current_cpu - Prevent the current cpu from being unplugged
142 * Lightweight version of get_online_cpus() to prevent cpu from being
143 * unplugged when code runs in a migration disabled region.
145 * Must be called with preemption disabled (preempt_count = 1)!
147 void pin_current_cpu(void)
149 struct hotplug_pcp *hp;
153 hp = this_cpu_ptr(&hotplug_pcp);
155 if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
156 hp->unplug == current) {
167 * Try to push this task off of this CPU.
171 hp = this_cpu_ptr(&hotplug_pcp);
172 if (!hp->grab_lock) {
174 * Just let it continue it's already pinned
188 * unpin_current_cpu - Allow unplug of current cpu
190 * Must be called with preemption or interrupts disabled!
192 void unpin_current_cpu(void)
194 struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
196 WARN_ON(hp->refcount <= 0);
198 /* This is safe. sync_unplug_thread is pinned to this cpu */
199 if (!--hp->refcount && hp->unplug && hp->unplug != current)
200 wake_up_process(hp->unplug);
203 static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
205 set_current_state(TASK_UNINTERRUPTIBLE);
206 while (hp->refcount) {
207 schedule_preempt_disabled();
208 set_current_state(TASK_UNINTERRUPTIBLE);
212 static int sync_unplug_thread(void *data)
214 struct hotplug_pcp *hp = data;
216 wait_for_completion(&hp->unplug_wait);
218 hp->unplug = current;
219 wait_for_pinned_cpus(hp);
222 * This thread will synchronize the cpu_down() with threads
223 * that have pinned the CPU. When the pinned CPU count reaches
224 * zero, we inform the cpu_down code to continue to the next step.
226 set_current_state(TASK_UNINTERRUPTIBLE);
228 complete(&hp->synced);
231 * If all succeeds, the next step will need tasks to wait till
232 * the CPU is offline before continuing. To do this, the grab_lock
233 * is set and tasks going into pin_current_cpu() will block on the
234 * mutex. But we still need to wait for those that are already in
235 * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
236 * will kick this thread out.
238 while (!hp->grab_lock && !kthread_should_stop()) {
240 set_current_state(TASK_UNINTERRUPTIBLE);
243 /* Make sure grab_lock is seen before we see a stale completion */
247 * Now just before cpu_down() enters stop machine, we need to make
248 * sure all tasks that are in pinned CPU sections are out, and new
249 * tasks will now grab the lock, keeping them from entering pinned
252 if (!kthread_should_stop()) {
254 wait_for_pinned_cpus(hp);
256 complete(&hp->synced);
259 set_current_state(TASK_UNINTERRUPTIBLE);
260 while (!kthread_should_stop()) {
262 set_current_state(TASK_UNINTERRUPTIBLE);
264 set_current_state(TASK_RUNNING);
267 * Force this thread off this CPU as it's going down and
268 * we don't want any more work on this CPU.
270 current->flags &= ~PF_NO_SETAFFINITY;
271 set_cpus_allowed_ptr(current, cpu_present_mask);
276 static void __cpu_unplug_sync(struct hotplug_pcp *hp)
278 wake_up_process(hp->sync_tsk);
279 wait_for_completion(&hp->synced);
282 static void __cpu_unplug_wait(unsigned int cpu)
284 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
286 complete(&hp->unplug_wait);
287 wait_for_completion(&hp->synced);
291 * Start the sync_unplug_thread on the target cpu and wait for it to
294 static int cpu_unplug_begin(unsigned int cpu)
296 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
299 /* Protected by cpu_hotplug.lock */
300 if (!hp->mutex_init) {
301 #ifdef CONFIG_PREEMPT_RT_FULL
302 spin_lock_init(&hp->lock);
304 mutex_init(&hp->mutex);
309 /* Inform the scheduler to migrate tasks off this CPU */
310 tell_sched_cpu_down_begin(cpu);
312 init_completion(&hp->synced);
313 init_completion(&hp->unplug_wait);
315 hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
316 if (IS_ERR(hp->sync_tsk)) {
317 err = PTR_ERR(hp->sync_tsk);
321 kthread_bind(hp->sync_tsk, cpu);
324 * Wait for tasks to get out of the pinned sections,
325 * it's still OK if new tasks enter. Some CPU notifiers will
326 * wait for tasks that are going to enter these sections and
327 * we must not have them block.
329 wake_up_process(hp->sync_tsk);
333 static void cpu_unplug_sync(unsigned int cpu)
335 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
337 init_completion(&hp->synced);
338 /* The completion needs to be initialzied before setting grab_lock */
341 /* Grab the mutex before setting grab_lock */
346 * The CPU notifiers have been completed.
347 * Wait for tasks to get out of pinned CPU sections and have new
348 * tasks block until the CPU is completely down.
350 __cpu_unplug_sync(hp);
352 /* All done with the sync thread */
353 kthread_stop(hp->sync_tsk);
357 static void cpu_unplug_done(unsigned int cpu)
359 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
362 /* Let all tasks know cpu unplug is finished before cleaning up */
366 kthread_stop(hp->sync_tsk);
370 /* protected by cpu_hotplug.lock */
373 tell_sched_cpu_down_done(cpu);
376 void get_online_cpus(void)
379 if (cpu_hotplug.active_writer == current)
381 cpuhp_lock_acquire_read();
382 mutex_lock(&cpu_hotplug.lock);
383 atomic_inc(&cpu_hotplug.refcount);
384 mutex_unlock(&cpu_hotplug.lock);
386 EXPORT_SYMBOL_GPL(get_online_cpus);
388 void put_online_cpus(void)
392 if (cpu_hotplug.active_writer == current)
395 refcount = atomic_dec_return(&cpu_hotplug.refcount);
396 if (WARN_ON(refcount < 0)) /* try to fix things up */
397 atomic_inc(&cpu_hotplug.refcount);
399 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
400 wake_up(&cpu_hotplug.wq);
402 cpuhp_lock_release();
405 EXPORT_SYMBOL_GPL(put_online_cpus);
408 * This ensures that the hotplug operation can begin only when the
409 * refcount goes to zero.
411 * Note that during a cpu-hotplug operation, the new readers, if any,
412 * will be blocked by the cpu_hotplug.lock
414 * Since cpu_hotplug_begin() is always called after invoking
415 * cpu_maps_update_begin(), we can be sure that only one writer is active.
417 * Note that theoretically, there is a possibility of a livelock:
418 * - Refcount goes to zero, last reader wakes up the sleeping
420 * - Last reader unlocks the cpu_hotplug.lock.
421 * - A new reader arrives at this moment, bumps up the refcount.
422 * - The writer acquires the cpu_hotplug.lock finds the refcount
423 * non zero and goes to sleep again.
425 * However, this is very difficult to achieve in practice since
426 * get_online_cpus() not an api which is called all that often.
429 void cpu_hotplug_begin(void)
433 cpu_hotplug.active_writer = current;
434 cpuhp_lock_acquire();
437 mutex_lock(&cpu_hotplug.lock);
438 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
439 if (likely(!atomic_read(&cpu_hotplug.refcount)))
441 mutex_unlock(&cpu_hotplug.lock);
444 finish_wait(&cpu_hotplug.wq, &wait);
447 void cpu_hotplug_done(void)
449 cpu_hotplug.active_writer = NULL;
450 mutex_unlock(&cpu_hotplug.lock);
451 cpuhp_lock_release();
455 * Wait for currently running CPU hotplug operations to complete (if any) and
456 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
457 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
458 * hotplug path before performing hotplug operations. So acquiring that lock
459 * guarantees mutual exclusion from any currently running hotplug operations.
461 void cpu_hotplug_disable(void)
463 cpu_maps_update_begin();
464 cpu_hotplug_disabled++;
465 cpu_maps_update_done();
467 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
469 void cpu_hotplug_enable(void)
471 cpu_maps_update_begin();
472 WARN_ON(--cpu_hotplug_disabled < 0);
473 cpu_maps_update_done();
475 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
476 #endif /* CONFIG_HOTPLUG_CPU */
478 /* Need to know about CPUs going up/down? */
479 int register_cpu_notifier(struct notifier_block *nb)
482 cpu_maps_update_begin();
483 ret = raw_notifier_chain_register(&cpu_chain, nb);
484 cpu_maps_update_done();
488 int __register_cpu_notifier(struct notifier_block *nb)
490 return raw_notifier_chain_register(&cpu_chain, nb);
493 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
498 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
501 return notifier_to_errno(ret);
504 static int cpu_notify(unsigned long val, void *v)
506 return __cpu_notify(val, v, -1, NULL);
509 EXPORT_SYMBOL(register_cpu_notifier);
510 EXPORT_SYMBOL(__register_cpu_notifier);
512 void unregister_cpu_notifier(struct notifier_block *nb)
514 cpu_maps_update_begin();
515 raw_notifier_chain_unregister(&cpu_chain, nb);
516 cpu_maps_update_done();
518 EXPORT_SYMBOL(unregister_cpu_notifier);
520 void __unregister_cpu_notifier(struct notifier_block *nb)
522 raw_notifier_chain_unregister(&cpu_chain, nb);
524 EXPORT_SYMBOL(__unregister_cpu_notifier);
526 #ifdef CONFIG_HOTPLUG_CPU
527 static void cpu_notify_nofail(unsigned long val, void *v)
529 BUG_ON(cpu_notify(val, v));
533 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
536 * This function walks all processes, finds a valid mm struct for each one and
537 * then clears a corresponding bit in mm's cpumask. While this all sounds
538 * trivial, there are various non-obvious corner cases, which this function
539 * tries to solve in a safe manner.
541 * Also note that the function uses a somewhat relaxed locking scheme, so it may
542 * be called only for an already offlined CPU.
544 void clear_tasks_mm_cpumask(int cpu)
546 struct task_struct *p;
549 * This function is called after the cpu is taken down and marked
550 * offline, so its not like new tasks will ever get this cpu set in
551 * their mm mask. -- Peter Zijlstra
552 * Thus, we may use rcu_read_lock() here, instead of grabbing
553 * full-fledged tasklist_lock.
555 WARN_ON(cpu_online(cpu));
557 for_each_process(p) {
558 struct task_struct *t;
561 * Main thread might exit, but other threads may still have
562 * a valid mm. Find one.
564 t = find_lock_task_mm(p);
567 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
573 static inline void check_for_tasks(int dead_cpu)
575 struct task_struct *g, *p;
577 read_lock(&tasklist_lock);
578 for_each_process_thread(g, p) {
582 * We do the check with unlocked task_rq(p)->lock.
583 * Order the reading to do not warn about a task,
584 * which was running on this cpu in the past, and
585 * it's just been woken on another cpu.
588 if (task_cpu(p) != dead_cpu)
591 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
592 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
594 read_unlock(&tasklist_lock);
597 struct take_cpu_down_param {
602 /* Take this CPU down. */
603 static int take_cpu_down(void *_param)
605 struct take_cpu_down_param *param = _param;
608 /* Ensure this CPU doesn't handle any more interrupts. */
609 err = __cpu_disable();
613 cpu_notify(CPU_DYING | param->mod, param->hcpu);
614 /* Give up timekeeping duties */
615 tick_handover_do_timer();
616 /* Park the stopper thread */
617 stop_machine_park((long)param->hcpu);
621 /* Requires cpu_add_remove_lock to be held */
622 static int _cpu_down(unsigned int cpu, int tasks_frozen)
624 int mycpu, err, nr_calls = 0;
625 void *hcpu = (void *)(long)cpu;
626 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
627 struct take_cpu_down_param tcd_param = {
631 cpumask_var_t cpumask;
632 cpumask_var_t cpumask_org;
634 if (num_online_cpus() == 1)
637 if (!cpu_online(cpu))
640 /* Move the downtaker off the unplug cpu */
641 if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
643 if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
644 free_cpumask_var(cpumask);
648 cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
649 cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
650 set_cpus_allowed_ptr(current, cpumask);
651 free_cpumask_var(cpumask);
653 mycpu = smp_processor_id();
655 printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
663 err = cpu_unplug_begin(cpu);
665 printk("cpu_unplug_begin(%d) failed\n", cpu);
669 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
672 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
673 pr_warn("%s: attempt to take down CPU %u failed\n",
679 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
680 * and RCU users of this state to go away such that all new such users
683 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
684 * not imply sync_sched(), so wait for both.
686 * Do sync before park smpboot threads to take care the rcu boost case.
688 if (IS_ENABLED(CONFIG_PREEMPT))
689 synchronize_rcu_mult(call_rcu, call_rcu_sched);
693 __cpu_unplug_wait(cpu);
694 smpboot_park_threads(cpu);
696 /* Notifiers are done. Don't let any more tasks pin this CPU. */
697 cpu_unplug_sync(cpu);
700 * Prevent irq alloc/free while the dying cpu reorganizes the
701 * interrupt affinities.
706 * So now all preempt/rcu users must observe !cpu_active().
708 err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
710 /* CPU didn't die: tell everyone. Can't complain. */
711 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
715 BUG_ON(cpu_online(cpu));
718 * The migration_call() CPU_DYING callback will have removed all
719 * runnable tasks from the cpu, there's only the idle task left now
720 * that the migration thread is done doing the stop_machine thing.
722 * Wait for the stop thread to go away.
724 while (!per_cpu(cpu_dead_idle, cpu))
726 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
727 per_cpu(cpu_dead_idle, cpu) = false;
729 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
732 hotplug_cpu__broadcast_tick_pull(cpu);
733 /* This actually kills the CPU. */
736 /* CPU is completely dead: tell everyone. Too late to complain. */
737 tick_cleanup_dead_cpu(cpu);
738 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
740 check_for_tasks(cpu);
743 cpu_unplug_done(cpu);
747 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
749 set_cpus_allowed_ptr(current, cpumask_org);
750 free_cpumask_var(cpumask_org);
754 int cpu_down(unsigned int cpu)
758 cpu_maps_update_begin();
760 if (cpu_hotplug_disabled) {
765 err = _cpu_down(cpu, 0);
768 cpu_maps_update_done();
771 EXPORT_SYMBOL(cpu_down);
772 #endif /*CONFIG_HOTPLUG_CPU*/
775 * Unpark per-CPU smpboot kthreads at CPU-online time.
777 static int smpboot_thread_call(struct notifier_block *nfb,
778 unsigned long action, void *hcpu)
780 int cpu = (long)hcpu;
782 switch (action & ~CPU_TASKS_FROZEN) {
784 case CPU_DOWN_FAILED:
786 smpboot_unpark_threads(cpu);
796 static struct notifier_block smpboot_thread_notifier = {
797 .notifier_call = smpboot_thread_call,
798 .priority = CPU_PRI_SMPBOOT,
801 void smpboot_thread_init(void)
803 register_cpu_notifier(&smpboot_thread_notifier);
806 /* Requires cpu_add_remove_lock to be held */
807 static int _cpu_up(unsigned int cpu, int tasks_frozen)
809 int ret, nr_calls = 0;
810 void *hcpu = (void *)(long)cpu;
811 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
812 struct task_struct *idle;
816 if (cpu_online(cpu) || !cpu_present(cpu)) {
821 idle = idle_thread_get(cpu);
827 ret = smpboot_create_threads(cpu);
831 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
834 pr_warn("%s: attempt to bring up CPU %u failed\n",
839 /* Arch-specific enabling code. */
840 ret = __cpu_up(cpu, idle);
844 BUG_ON(!cpu_online(cpu));
846 /* Now call notifier in preparation. */
847 cpu_notify(CPU_ONLINE | mod, hcpu);
851 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
858 int cpu_up(unsigned int cpu)
862 if (!cpu_possible(cpu)) {
863 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
865 #if defined(CONFIG_IA64)
866 pr_err("please check additional_cpus= boot parameter\n");
871 err = try_online_node(cpu_to_node(cpu));
875 cpu_maps_update_begin();
877 if (cpu_hotplug_disabled) {
882 err = _cpu_up(cpu, 0);
885 cpu_maps_update_done();
888 EXPORT_SYMBOL_GPL(cpu_up);
890 #ifdef CONFIG_PM_SLEEP_SMP
891 static cpumask_var_t frozen_cpus;
893 int disable_nonboot_cpus(void)
895 int cpu, first_cpu, error = 0;
897 cpu_maps_update_begin();
898 first_cpu = cpumask_first(cpu_online_mask);
900 * We take down all of the non-boot CPUs in one shot to avoid races
901 * with the userspace trying to use the CPU hotplug at the same time
903 cpumask_clear(frozen_cpus);
905 pr_info("Disabling non-boot CPUs ...\n");
906 for_each_online_cpu(cpu) {
907 if (cpu == first_cpu)
909 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
910 error = _cpu_down(cpu, 1);
911 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
913 cpumask_set_cpu(cpu, frozen_cpus);
915 pr_err("Error taking CPU%d down: %d\n", cpu, error);
921 BUG_ON(num_online_cpus() > 1);
923 pr_err("Non-boot CPUs are not disabled\n");
926 * Make sure the CPUs won't be enabled by someone else. We need to do
927 * this even in case of failure as all disable_nonboot_cpus() users are
928 * supposed to do enable_nonboot_cpus() on the failure path.
930 cpu_hotplug_disabled++;
932 cpu_maps_update_done();
936 void __weak arch_enable_nonboot_cpus_begin(void)
940 void __weak arch_enable_nonboot_cpus_end(void)
944 void enable_nonboot_cpus(void)
948 /* Allow everyone to use the CPU hotplug again */
949 cpu_maps_update_begin();
950 WARN_ON(--cpu_hotplug_disabled < 0);
951 if (cpumask_empty(frozen_cpus))
954 pr_info("Enabling non-boot CPUs ...\n");
956 arch_enable_nonboot_cpus_begin();
958 for_each_cpu(cpu, frozen_cpus) {
959 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
960 error = _cpu_up(cpu, 1);
961 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
963 pr_info("CPU%d is up\n", cpu);
966 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
969 arch_enable_nonboot_cpus_end();
971 cpumask_clear(frozen_cpus);
973 cpu_maps_update_done();
976 static int __init alloc_frozen_cpus(void)
978 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
982 core_initcall(alloc_frozen_cpus);
985 * When callbacks for CPU hotplug notifications are being executed, we must
986 * ensure that the state of the system with respect to the tasks being frozen
987 * or not, as reported by the notification, remains unchanged *throughout the
988 * duration* of the execution of the callbacks.
989 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
991 * This synchronization is implemented by mutually excluding regular CPU
992 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
993 * Hibernate notifications.
996 cpu_hotplug_pm_callback(struct notifier_block *nb,
997 unsigned long action, void *ptr)
1001 case PM_SUSPEND_PREPARE:
1002 case PM_HIBERNATION_PREPARE:
1003 cpu_hotplug_disable();
1006 case PM_POST_SUSPEND:
1007 case PM_POST_HIBERNATION:
1008 cpu_hotplug_enable();
1019 static int __init cpu_hotplug_pm_sync_init(void)
1022 * cpu_hotplug_pm_callback has higher priority than x86
1023 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1024 * to disable cpu hotplug to avoid cpu hotplug race.
1026 pm_notifier(cpu_hotplug_pm_callback, 0);
1029 core_initcall(cpu_hotplug_pm_sync_init);
1031 #endif /* CONFIG_PM_SLEEP_SMP */
1034 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
1035 * @cpu: cpu that just started
1037 * This function calls the cpu_chain notifiers with CPU_STARTING.
1038 * It must be called by the arch code on the new cpu, before the new cpu
1039 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1041 void notify_cpu_starting(unsigned int cpu)
1043 unsigned long val = CPU_STARTING;
1045 #ifdef CONFIG_PM_SLEEP_SMP
1046 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
1047 val = CPU_STARTING_FROZEN;
1048 #endif /* CONFIG_PM_SLEEP_SMP */
1049 cpu_notify(val, (void *)(long)cpu);
1052 #endif /* CONFIG_SMP */
1055 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1056 * represents all NR_CPUS bits binary values of 1<<nr.
1058 * It is used by cpumask_of() to get a constant address to a CPU
1059 * mask value that has a single bit set only.
1062 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1063 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1064 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1065 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1066 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1068 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1070 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1071 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1072 #if BITS_PER_LONG > 32
1073 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1074 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1077 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
1079 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1080 EXPORT_SYMBOL(cpu_all_bits);
1082 #ifdef CONFIG_INIT_ALL_POSSIBLE
1083 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
1086 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
1088 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
1089 EXPORT_SYMBOL(cpu_possible_mask);
1091 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
1092 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
1093 EXPORT_SYMBOL(cpu_online_mask);
1095 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
1096 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
1097 EXPORT_SYMBOL(cpu_present_mask);
1099 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
1100 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
1101 EXPORT_SYMBOL(cpu_active_mask);
1103 void set_cpu_possible(unsigned int cpu, bool possible)
1106 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
1108 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
1111 void set_cpu_present(unsigned int cpu, bool present)
1114 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
1116 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
1119 void set_cpu_online(unsigned int cpu, bool online)
1122 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
1123 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
1125 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
1129 void set_cpu_active(unsigned int cpu, bool active)
1132 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
1134 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
1137 void init_cpu_present(const struct cpumask *src)
1139 cpumask_copy(to_cpumask(cpu_present_bits), src);
1142 void init_cpu_possible(const struct cpumask *src)
1144 cpumask_copy(to_cpumask(cpu_possible_bits), src);
1147 void init_cpu_online(const struct cpumask *src)
1149 cpumask_copy(to_cpumask(cpu_online_bits), src);