2 * kernel/stop_machine.c
4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
9 * This file is released under the GPLv2 and any later version.
11 #include <linux/completion.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kthread.h>
15 #include <linux/export.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h>
21 #include <linux/smpboot.h>
22 #include <linux/atomic.h>
23 #include <linux/lglock.h>
26 * Structure to determine completion condition and record errors. May
27 * be shared by works on different cpus.
29 struct cpu_stop_done {
30 atomic_t nr_todo; /* nr left to execute */
31 bool executed; /* actually executed? */
32 int ret; /* collected return value */
33 struct task_struct *waiter; /* woken when nr_todo reaches 0 */
36 /* the actual stopper, one per every possible cpu, enabled on online cpus */
39 bool enabled; /* is this stopper enabled? */
40 struct list_head works; /* list of pending works */
43 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
44 static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
45 static bool stop_machine_initialized = false;
48 * Avoids a race between stop_two_cpus and global stop_cpus, where
49 * the stoppers could get queued up in reverse order, leading to
50 * system deadlock. Using an lglock means stop_two_cpus remains
53 DEFINE_STATIC_LGLOCK(stop_cpus_lock);
55 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
57 memset(done, 0, sizeof(*done));
58 atomic_set(&done->nr_todo, nr_todo);
59 done->waiter = current;
62 /* signal completion unless @done is NULL */
63 static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
67 done->executed = true;
68 if (atomic_dec_and_test(&done->nr_todo)) {
69 wake_up_process(done->waiter);
75 /* queue @work to @stopper. if offline, @work is completed immediately */
76 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
78 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
79 struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
83 raw_spin_lock_irqsave(&stopper->lock, flags);
85 if (stopper->enabled) {
86 list_add_tail(&work->list, &stopper->works);
89 cpu_stop_signal_done(work->done, false);
91 raw_spin_unlock_irqrestore(&stopper->lock, flags);
94 static void wait_for_stop_done(struct cpu_stop_done *done)
96 set_current_state(TASK_UNINTERRUPTIBLE);
97 while (atomic_read(&done->nr_todo)) {
99 set_current_state(TASK_UNINTERRUPTIBLE);
102 * We need to wait until cpu_stop_signal_done() has cleared
107 set_current_state(TASK_RUNNING);
111 * stop_one_cpu - stop a cpu
113 * @fn: function to execute
114 * @arg: argument to @fn
116 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
117 * the highest priority preempting any task on the cpu and
118 * monopolizing it. This function returns after the execution is
121 * This function doesn't guarantee @cpu stays online till @fn
122 * completes. If @cpu goes down in the middle, execution may happen
123 * partially or fully on different cpus. @fn should either be ready
124 * for that or the caller should ensure that @cpu stays online until
125 * this function completes.
131 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
132 * otherwise, the return value of @fn.
134 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
136 struct cpu_stop_done done;
137 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
139 cpu_stop_init_done(&done, 1);
140 cpu_stop_queue_work(cpu, &work);
141 wait_for_stop_done(&done);
142 return done.executed ? done.ret : -ENOENT;
145 /* This controls the threads on each CPU. */
146 enum multi_stop_state {
147 /* Dummy starting state for thread. */
149 /* Awaiting everyone to be scheduled. */
151 /* Disable interrupts. */
152 MULTI_STOP_DISABLE_IRQ,
153 /* Run the function */
159 struct multi_stop_data {
162 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
163 unsigned int num_threads;
164 const struct cpumask *active_cpus;
166 enum multi_stop_state state;
170 static void set_state(struct multi_stop_data *msdata,
171 enum multi_stop_state newstate)
173 /* Reset ack counter. */
174 atomic_set(&msdata->thread_ack, msdata->num_threads);
176 msdata->state = newstate;
179 /* Last one to ack a state moves to the next state. */
180 static void ack_state(struct multi_stop_data *msdata)
182 if (atomic_dec_and_test(&msdata->thread_ack))
183 set_state(msdata, msdata->state + 1);
186 /* This is the cpu_stop function which stops the CPU. */
187 static int multi_cpu_stop(void *data)
189 struct multi_stop_data *msdata = data;
190 enum multi_stop_state curstate = MULTI_STOP_NONE;
191 int cpu = smp_processor_id(), err = 0;
196 * When called from stop_machine_from_inactive_cpu(), irq might
197 * already be disabled. Save the state and restore it on exit.
199 local_save_flags(flags);
201 if (!msdata->active_cpus)
202 is_active = cpu == cpumask_first(cpu_online_mask);
204 is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
206 /* Simple state machine */
208 /* Chill out and ensure we re-read multi_stop_state. */
210 if (msdata->state != curstate) {
211 curstate = msdata->state;
213 case MULTI_STOP_DISABLE_IRQ:
219 err = msdata->fn(msdata->data);
226 } while (curstate != MULTI_STOP_EXIT);
228 local_irq_restore(flags);
232 struct irq_cpu_stop_queue_work_info {
235 struct cpu_stop_work *work1;
236 struct cpu_stop_work *work2;
240 * This function is always run with irqs and preemption disabled.
241 * This guarantees that both work1 and work2 get queued, before
242 * our local migrate thread gets the chance to preempt us.
244 static void irq_cpu_stop_queue_work(void *arg)
246 struct irq_cpu_stop_queue_work_info *info = arg;
247 cpu_stop_queue_work(info->cpu1, info->work1);
248 cpu_stop_queue_work(info->cpu2, info->work2);
252 * stop_two_cpus - stops two cpus
253 * @cpu1: the cpu to stop
254 * @cpu2: the other cpu to stop
255 * @fn: function to execute
256 * @arg: argument to @fn
258 * Stops both the current and specified CPU and runs @fn on one of them.
260 * returns when both are completed.
262 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
264 struct cpu_stop_done done;
265 struct cpu_stop_work work1, work2;
266 struct irq_cpu_stop_queue_work_info call_args;
267 struct multi_stop_data msdata;
269 preempt_disable_nort();
270 msdata = (struct multi_stop_data){
274 .active_cpus = cpumask_of(cpu1),
277 work1 = work2 = (struct cpu_stop_work){
278 .fn = multi_cpu_stop,
283 call_args = (struct irq_cpu_stop_queue_work_info){
290 cpu_stop_init_done(&done, 2);
291 set_state(&msdata, MULTI_STOP_PREPARE);
294 * If we observe both CPUs active we know _cpu_down() cannot yet have
295 * queued its stop_machine works and therefore ours will get executed
296 * first. Or its not either one of our CPUs that's getting unplugged,
297 * in which case we don't care.
299 * This relies on the stopper workqueues to be FIFO.
301 if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
302 preempt_enable_nort();
306 lg_local_lock(&stop_cpus_lock);
308 * Queuing needs to be done by the lowest numbered CPU, to ensure
309 * that works are always queued in the same order on every CPU.
310 * This prevents deadlocks.
312 smp_call_function_single(min(cpu1, cpu2),
313 &irq_cpu_stop_queue_work,
315 lg_local_unlock(&stop_cpus_lock);
316 preempt_enable_nort();
318 wait_for_stop_done(&done);
320 return done.executed ? done.ret : -ENOENT;
324 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
326 * @fn: function to execute
327 * @arg: argument to @fn
328 * @work_buf: pointer to cpu_stop_work structure
330 * Similar to stop_one_cpu() but doesn't wait for completion. The
331 * caller is responsible for ensuring @work_buf is currently unused
332 * and will remain untouched until stopper starts executing @fn.
337 void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
338 struct cpu_stop_work *work_buf)
340 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
341 cpu_stop_queue_work(cpu, work_buf);
344 /* static data for stop_cpus */
345 static DEFINE_MUTEX(stop_cpus_mutex);
346 static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
348 static void queue_stop_cpus_work(const struct cpumask *cpumask,
349 cpu_stop_fn_t fn, void *arg,
350 struct cpu_stop_done *done, bool inactive)
352 struct cpu_stop_work *work;
355 /* initialize works and done */
356 for_each_cpu(cpu, cpumask) {
357 work = &per_cpu(stop_cpus_work, cpu);
364 * Make sure that all work is queued on all cpus before
365 * any of the cpus can execute it.
368 lg_global_lock(&stop_cpus_lock);
370 lg_global_trylock_relax(&stop_cpus_lock);
371 for_each_cpu(cpu, cpumask)
372 cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
373 lg_global_unlock(&stop_cpus_lock);
376 static int __stop_cpus(const struct cpumask *cpumask,
377 cpu_stop_fn_t fn, void *arg)
379 struct cpu_stop_done done;
381 cpu_stop_init_done(&done, cpumask_weight(cpumask));
382 queue_stop_cpus_work(cpumask, fn, arg, &done, false);
383 wait_for_stop_done(&done);
384 return done.executed ? done.ret : -ENOENT;
388 * stop_cpus - stop multiple cpus
389 * @cpumask: cpus to stop
390 * @fn: function to execute
391 * @arg: argument to @fn
393 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
394 * @fn is run in a process context with the highest priority
395 * preempting any task on the cpu and monopolizing it. This function
396 * returns after all executions are complete.
398 * This function doesn't guarantee the cpus in @cpumask stay online
399 * till @fn completes. If some cpus go down in the middle, execution
400 * on the cpu may happen partially or fully on different cpus. @fn
401 * should either be ready for that or the caller should ensure that
402 * the cpus stay online until this function completes.
404 * All stop_cpus() calls are serialized making it safe for @fn to wait
405 * for all cpus to start executing it.
411 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
412 * @cpumask were offline; otherwise, 0 if all executions of @fn
413 * returned 0, any non zero return value if any returned non zero.
415 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
419 /* static works are used, process one request at a time */
420 mutex_lock(&stop_cpus_mutex);
421 ret = __stop_cpus(cpumask, fn, arg);
422 mutex_unlock(&stop_cpus_mutex);
427 * try_stop_cpus - try to stop multiple cpus
428 * @cpumask: cpus to stop
429 * @fn: function to execute
430 * @arg: argument to @fn
432 * Identical to stop_cpus() except that it fails with -EAGAIN if
433 * someone else is already using the facility.
439 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
440 * @fn(@arg) was not executed at all because all cpus in @cpumask were
441 * offline; otherwise, 0 if all executions of @fn returned 0, any non
442 * zero return value if any returned non zero.
444 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
448 /* static works are used, process one request at a time */
449 if (!mutex_trylock(&stop_cpus_mutex))
451 ret = __stop_cpus(cpumask, fn, arg);
452 mutex_unlock(&stop_cpus_mutex);
456 static int cpu_stop_should_run(unsigned int cpu)
458 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
462 raw_spin_lock_irqsave(&stopper->lock, flags);
463 run = !list_empty(&stopper->works);
464 raw_spin_unlock_irqrestore(&stopper->lock, flags);
468 static void cpu_stopper_thread(unsigned int cpu)
470 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
471 struct cpu_stop_work *work;
476 raw_spin_lock_irq(&stopper->lock);
477 if (!list_empty(&stopper->works)) {
478 work = list_first_entry(&stopper->works,
479 struct cpu_stop_work, list);
480 list_del_init(&work->list);
482 raw_spin_unlock_irq(&stopper->lock);
485 cpu_stop_fn_t fn = work->fn;
486 void *arg = work->arg;
487 struct cpu_stop_done *done = work->done;
488 char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
491 * Wait until the stopper finished scheduling on all
494 lg_global_lock(&stop_cpus_lock);
496 * Let other cpu threads continue as well
498 lg_global_unlock(&stop_cpus_lock);
500 /* cpu stop callbacks are not allowed to sleep */
507 /* restore preemption and check it's still balanced */
509 WARN_ONCE(preempt_count(),
510 "cpu_stop: %s(%p) leaked preempt count\n",
511 kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
515 * Make sure that the wakeup and setting done->waiter
519 cpu_stop_signal_done(done, true);
525 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
527 static void cpu_stop_create(unsigned int cpu)
529 sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
532 static void cpu_stop_park(unsigned int cpu)
534 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
535 struct cpu_stop_work *work;
538 /* drain remaining works */
539 raw_spin_lock_irqsave(&stopper->lock, flags);
540 list_for_each_entry(work, &stopper->works, list)
541 cpu_stop_signal_done(work->done, false);
542 stopper->enabled = false;
543 raw_spin_unlock_irqrestore(&stopper->lock, flags);
546 static void cpu_stop_unpark(unsigned int cpu)
548 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
550 raw_spin_lock_irq(&stopper->lock);
551 stopper->enabled = true;
552 raw_spin_unlock_irq(&stopper->lock);
555 static struct smp_hotplug_thread cpu_stop_threads = {
556 .store = &cpu_stopper_task,
557 .thread_should_run = cpu_stop_should_run,
558 .thread_fn = cpu_stopper_thread,
559 .thread_comm = "migration/%u",
560 .create = cpu_stop_create,
561 .setup = cpu_stop_unpark,
562 .park = cpu_stop_park,
563 .pre_unpark = cpu_stop_unpark,
567 static int __init cpu_stop_init(void)
571 for_each_possible_cpu(cpu) {
572 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
574 raw_spin_lock_init(&stopper->lock);
575 INIT_LIST_HEAD(&stopper->works);
578 lg_lock_init(&stop_cpus_lock, "stop_cpus_lock");
580 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
581 stop_machine_initialized = true;
584 early_initcall(cpu_stop_init);
586 #ifdef CONFIG_STOP_MACHINE
588 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
590 struct multi_stop_data msdata = {
593 .num_threads = num_online_cpus(),
597 if (!stop_machine_initialized) {
599 * Handle the case where stop_machine() is called
600 * early in boot before stop_machine() has been
606 WARN_ON_ONCE(msdata.num_threads != 1);
608 local_irq_save(flags);
611 local_irq_restore(flags);
616 /* Set the initial state and stop all online cpus. */
617 set_state(&msdata, MULTI_STOP_PREPARE);
618 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
621 int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
625 /* No CPUs can come up or down during this. */
627 ret = __stop_machine(fn, data, cpus);
631 EXPORT_SYMBOL_GPL(stop_machine);
634 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
635 * @fn: the function to run
636 * @data: the data ptr for the @fn()
637 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
639 * This is identical to stop_machine() but can be called from a CPU which
640 * is not active. The local CPU is in the process of hotplug (so no other
641 * CPU hotplug can start) and not marked active and doesn't have enough
644 * This function provides stop_machine() functionality for such state by
645 * using busy-wait for synchronization and executing @fn directly for local
649 * Local CPU is inactive. Temporarily stops all active CPUs.
652 * 0 if all executions of @fn returned 0, any non zero return value if any
655 int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
656 const struct cpumask *cpus)
658 struct multi_stop_data msdata = { .fn = fn, .data = data,
659 .active_cpus = cpus };
660 struct cpu_stop_done done;
663 /* Local CPU must be inactive and CPU hotplug in progress. */
664 BUG_ON(cpu_active(raw_smp_processor_id()));
665 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
667 /* No proper task established and can't sleep - busy wait for lock. */
668 while (!mutex_trylock(&stop_cpus_mutex))
671 /* Schedule work on other CPUs and execute directly for local CPU */
672 set_state(&msdata, MULTI_STOP_PREPARE);
673 cpu_stop_init_done(&done, num_active_cpus());
674 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
676 ret = multi_cpu_stop(&msdata);
678 /* Busy wait for completion. */
679 while (atomic_read(&done.nr_todo))
682 mutex_unlock(&stop_cpus_mutex);
683 return ret ?: done.ret;
686 #endif /* CONFIG_STOP_MACHINE */