2 * kernel/stop_machine.c
4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
9 * This file is released under the GPLv2 and any later version.
11 #include <linux/completion.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kthread.h>
15 #include <linux/export.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h>
21 #include <linux/smpboot.h>
22 #include <linux/atomic.h>
23 #include <linux/lglock.h>
26 * Structure to determine completion condition and record errors. May
27 * be shared by works on different cpus.
29 struct cpu_stop_done {
30 atomic_t nr_todo; /* nr left to execute */
31 bool executed; /* actually executed? */
32 int ret; /* collected return value */
33 struct completion completion; /* fired if nr_todo reaches 0 */
36 /* the actual stopper, one per every possible cpu, enabled on online cpus */
38 struct task_struct *thread;
41 bool enabled; /* is this stopper enabled? */
42 struct list_head works; /* list of pending works */
44 struct cpu_stop_work stop_work; /* for stop_cpus */
47 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
48 static bool stop_machine_initialized = false;
51 * Avoids a race between stop_two_cpus and global stop_cpus, where
52 * the stoppers could get queued up in reverse order, leading to
53 * system deadlock. Using an lglock means stop_two_cpus remains
56 DEFINE_STATIC_LGLOCK(stop_cpus_lock);
58 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
60 memset(done, 0, sizeof(*done));
61 atomic_set(&done->nr_todo, nr_todo);
62 init_completion(&done->completion);
65 /* signal completion unless @done is NULL */
66 static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
70 done->executed = true;
71 if (atomic_dec_and_test(&done->nr_todo))
72 complete(&done->completion);
76 static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
77 struct cpu_stop_work *work)
79 list_add_tail(&work->list, &stopper->works);
80 wake_up_process(stopper->thread);
83 /* queue @work to @stopper. if offline, @work is completed immediately */
84 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
86 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
89 raw_spin_lock_irqsave(&stopper->lock, flags);
91 __cpu_stop_queue_work(stopper, work);
93 cpu_stop_signal_done(work->done, false);
94 raw_spin_unlock_irqrestore(&stopper->lock, flags);
98 * stop_one_cpu - stop a cpu
100 * @fn: function to execute
101 * @arg: argument to @fn
103 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
104 * the highest priority preempting any task on the cpu and
105 * monopolizing it. This function returns after the execution is
108 * This function doesn't guarantee @cpu stays online till @fn
109 * completes. If @cpu goes down in the middle, execution may happen
110 * partially or fully on different cpus. @fn should either be ready
111 * for that or the caller should ensure that @cpu stays online until
112 * this function completes.
118 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
119 * otherwise, the return value of @fn.
121 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
123 struct cpu_stop_done done;
124 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
126 cpu_stop_init_done(&done, 1);
127 cpu_stop_queue_work(cpu, &work);
128 wait_for_completion(&done.completion);
129 return done.executed ? done.ret : -ENOENT;
132 /* This controls the threads on each CPU. */
133 enum multi_stop_state {
134 /* Dummy starting state for thread. */
136 /* Awaiting everyone to be scheduled. */
138 /* Disable interrupts. */
139 MULTI_STOP_DISABLE_IRQ,
140 /* Run the function */
146 struct multi_stop_data {
149 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
150 unsigned int num_threads;
151 const struct cpumask *active_cpus;
153 enum multi_stop_state state;
157 static void set_state(struct multi_stop_data *msdata,
158 enum multi_stop_state newstate)
160 /* Reset ack counter. */
161 atomic_set(&msdata->thread_ack, msdata->num_threads);
163 msdata->state = newstate;
166 /* Last one to ack a state moves to the next state. */
167 static void ack_state(struct multi_stop_data *msdata)
169 if (atomic_dec_and_test(&msdata->thread_ack))
170 set_state(msdata, msdata->state + 1);
173 /* This is the cpu_stop function which stops the CPU. */
174 static int multi_cpu_stop(void *data)
176 struct multi_stop_data *msdata = data;
177 enum multi_stop_state curstate = MULTI_STOP_NONE;
178 int cpu = smp_processor_id(), err = 0;
183 * When called from stop_machine_from_inactive_cpu(), irq might
184 * already be disabled. Save the state and restore it on exit.
186 local_save_flags(flags);
188 if (!msdata->active_cpus)
189 is_active = cpu == cpumask_first(cpu_online_mask);
191 is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
193 /* Simple state machine */
195 /* Chill out and ensure we re-read multi_stop_state. */
197 if (msdata->state != curstate) {
198 curstate = msdata->state;
200 case MULTI_STOP_DISABLE_IRQ:
206 err = msdata->fn(msdata->data);
213 } while (curstate != MULTI_STOP_EXIT);
215 local_irq_restore(flags);
219 static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
220 int cpu2, struct cpu_stop_work *work2)
222 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
223 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
226 lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
227 raw_spin_lock_irq(&stopper1->lock);
228 raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
231 if (!stopper1->enabled || !stopper2->enabled)
235 __cpu_stop_queue_work(stopper1, work1);
236 __cpu_stop_queue_work(stopper2, work2);
238 raw_spin_unlock(&stopper2->lock);
239 raw_spin_unlock_irq(&stopper1->lock);
240 lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
245 * stop_two_cpus - stops two cpus
246 * @cpu1: the cpu to stop
247 * @cpu2: the other cpu to stop
248 * @fn: function to execute
249 * @arg: argument to @fn
251 * Stops both the current and specified CPU and runs @fn on one of them.
253 * returns when both are completed.
255 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
257 struct cpu_stop_done done;
258 struct cpu_stop_work work1, work2;
259 struct multi_stop_data msdata;
261 preempt_disable_nort();
262 msdata = (struct multi_stop_data){
266 .active_cpus = cpumask_of(cpu1),
269 work1 = work2 = (struct cpu_stop_work){
270 .fn = multi_cpu_stop,
275 cpu_stop_init_done(&done, 2);
276 set_state(&msdata, MULTI_STOP_PREPARE);
280 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) {
281 preempt_enable_nort();
285 preempt_enable_nort();
287 wait_for_completion(&done.completion);
289 return done.executed ? done.ret : -ENOENT;
293 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
295 * @fn: function to execute
296 * @arg: argument to @fn
297 * @work_buf: pointer to cpu_stop_work structure
299 * Similar to stop_one_cpu() but doesn't wait for completion. The
300 * caller is responsible for ensuring @work_buf is currently unused
301 * and will remain untouched until stopper starts executing @fn.
306 void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
307 struct cpu_stop_work *work_buf)
309 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
310 cpu_stop_queue_work(cpu, work_buf);
313 /* static data for stop_cpus */
314 static DEFINE_MUTEX(stop_cpus_mutex);
316 static void queue_stop_cpus_work(const struct cpumask *cpumask,
317 cpu_stop_fn_t fn, void *arg,
318 struct cpu_stop_done *done, bool inactive)
320 struct cpu_stop_work *work;
324 * Make sure that all work is queued on all cpus before
325 * any of the cpus can execute it.
328 lg_global_lock(&stop_cpus_lock);
330 lg_global_trylock_relax(&stop_cpus_lock);
332 for_each_cpu(cpu, cpumask) {
333 work = &per_cpu(cpu_stopper.stop_work, cpu);
337 cpu_stop_queue_work(cpu, work);
339 lg_global_unlock(&stop_cpus_lock);
342 static int __stop_cpus(const struct cpumask *cpumask,
343 cpu_stop_fn_t fn, void *arg)
345 struct cpu_stop_done done;
347 cpu_stop_init_done(&done, cpumask_weight(cpumask));
348 queue_stop_cpus_work(cpumask, fn, arg, &done, false);
349 wait_for_completion(&done.completion);
350 return done.executed ? done.ret : -ENOENT;
354 * stop_cpus - stop multiple cpus
355 * @cpumask: cpus to stop
356 * @fn: function to execute
357 * @arg: argument to @fn
359 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
360 * @fn is run in a process context with the highest priority
361 * preempting any task on the cpu and monopolizing it. This function
362 * returns after all executions are complete.
364 * This function doesn't guarantee the cpus in @cpumask stay online
365 * till @fn completes. If some cpus go down in the middle, execution
366 * on the cpu may happen partially or fully on different cpus. @fn
367 * should either be ready for that or the caller should ensure that
368 * the cpus stay online until this function completes.
370 * All stop_cpus() calls are serialized making it safe for @fn to wait
371 * for all cpus to start executing it.
377 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
378 * @cpumask were offline; otherwise, 0 if all executions of @fn
379 * returned 0, any non zero return value if any returned non zero.
381 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
385 /* static works are used, process one request at a time */
386 mutex_lock(&stop_cpus_mutex);
387 ret = __stop_cpus(cpumask, fn, arg);
388 mutex_unlock(&stop_cpus_mutex);
393 * try_stop_cpus - try to stop multiple cpus
394 * @cpumask: cpus to stop
395 * @fn: function to execute
396 * @arg: argument to @fn
398 * Identical to stop_cpus() except that it fails with -EAGAIN if
399 * someone else is already using the facility.
405 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
406 * @fn(@arg) was not executed at all because all cpus in @cpumask were
407 * offline; otherwise, 0 if all executions of @fn returned 0, any non
408 * zero return value if any returned non zero.
410 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
414 /* static works are used, process one request at a time */
415 if (!mutex_trylock(&stop_cpus_mutex))
417 ret = __stop_cpus(cpumask, fn, arg);
418 mutex_unlock(&stop_cpus_mutex);
422 static int cpu_stop_should_run(unsigned int cpu)
424 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
428 raw_spin_lock_irqsave(&stopper->lock, flags);
429 run = !list_empty(&stopper->works);
430 raw_spin_unlock_irqrestore(&stopper->lock, flags);
434 static void cpu_stopper_thread(unsigned int cpu)
436 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
437 struct cpu_stop_work *work;
442 raw_spin_lock_irq(&stopper->lock);
443 if (!list_empty(&stopper->works)) {
444 work = list_first_entry(&stopper->works,
445 struct cpu_stop_work, list);
446 list_del_init(&work->list);
448 raw_spin_unlock_irq(&stopper->lock);
451 cpu_stop_fn_t fn = work->fn;
452 void *arg = work->arg;
453 struct cpu_stop_done *done = work->done;
454 char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
457 * Wait until the stopper finished scheduling on all
460 lg_global_lock(&stop_cpus_lock);
462 * Let other cpu threads continue as well
464 lg_global_unlock(&stop_cpus_lock);
466 /* cpu stop callbacks are not allowed to sleep */
473 /* restore preemption and check it's still balanced */
475 WARN_ONCE(preempt_count(),
476 "cpu_stop: %s(%p) leaked preempt count\n",
477 kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
480 cpu_stop_signal_done(done, true);
485 void stop_machine_park(int cpu)
487 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
489 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
490 * the pending works before it parks, until then it is fine to queue
493 stopper->enabled = false;
494 kthread_park(stopper->thread);
497 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
499 static void cpu_stop_create(unsigned int cpu)
501 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
504 static void cpu_stop_park(unsigned int cpu)
506 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
508 WARN_ON(!list_empty(&stopper->works));
511 void stop_machine_unpark(int cpu)
513 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
515 stopper->enabled = true;
516 kthread_unpark(stopper->thread);
519 static struct smp_hotplug_thread cpu_stop_threads = {
520 .store = &cpu_stopper.thread,
521 .thread_should_run = cpu_stop_should_run,
522 .thread_fn = cpu_stopper_thread,
523 .thread_comm = "migration/%u",
524 .create = cpu_stop_create,
525 .park = cpu_stop_park,
529 static int __init cpu_stop_init(void)
533 for_each_possible_cpu(cpu) {
534 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
536 raw_spin_lock_init(&stopper->lock);
537 INIT_LIST_HEAD(&stopper->works);
540 lg_lock_init(&stop_cpus_lock, "stop_cpus_lock");
542 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
543 stop_machine_unpark(raw_smp_processor_id());
544 stop_machine_initialized = true;
547 early_initcall(cpu_stop_init);
549 #if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
551 static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
553 struct multi_stop_data msdata = {
556 .num_threads = num_online_cpus(),
560 if (!stop_machine_initialized) {
562 * Handle the case where stop_machine() is called
563 * early in boot before stop_machine() has been
569 WARN_ON_ONCE(msdata.num_threads != 1);
571 local_irq_save(flags);
574 local_irq_restore(flags);
579 /* Set the initial state and stop all online cpus. */
580 set_state(&msdata, MULTI_STOP_PREPARE);
581 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
584 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
588 /* No CPUs can come up or down during this. */
590 ret = __stop_machine(fn, data, cpus);
594 EXPORT_SYMBOL_GPL(stop_machine);
597 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
598 * @fn: the function to run
599 * @data: the data ptr for the @fn()
600 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
602 * This is identical to stop_machine() but can be called from a CPU which
603 * is not active. The local CPU is in the process of hotplug (so no other
604 * CPU hotplug can start) and not marked active and doesn't have enough
607 * This function provides stop_machine() functionality for such state by
608 * using busy-wait for synchronization and executing @fn directly for local
612 * Local CPU is inactive. Temporarily stops all active CPUs.
615 * 0 if all executions of @fn returned 0, any non zero return value if any
618 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
619 const struct cpumask *cpus)
621 struct multi_stop_data msdata = { .fn = fn, .data = data,
622 .active_cpus = cpus };
623 struct cpu_stop_done done;
626 /* Local CPU must be inactive and CPU hotplug in progress. */
627 BUG_ON(cpu_active(raw_smp_processor_id()));
628 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
630 /* No proper task established and can't sleep - busy wait for lock. */
631 while (!mutex_trylock(&stop_cpus_mutex))
634 /* Schedule work on other CPUs and execute directly for local CPU */
635 set_state(&msdata, MULTI_STOP_PREPARE);
636 cpu_stop_init_done(&done, num_active_cpus());
637 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
639 ret = multi_cpu_stop(&msdata);
641 /* Busy wait for completion. */
642 while (!completion_done(&done.completion))
645 mutex_unlock(&stop_cpus_mutex);
646 return ret ?: done.ret;
649 #endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */