2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2001
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/interrupt.h>
39 #include <linux/sched.h>
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/percpu.h>
43 #include <linux/notifier.h>
44 #include <linux/cpu.h>
45 #include <linux/mutex.h>
46 #include <linux/export.h>
47 #include <linux/hardirq.h>
48 #include <linux/delay.h>
49 #include <linux/module.h>
50 #include <linux/kthread.h>
51 #include <linux/tick.h>
53 #define CREATE_TRACE_POINTS
57 MODULE_ALIAS("rcupdate");
58 #ifdef MODULE_PARAM_PREFIX
59 #undef MODULE_PARAM_PREFIX
61 #define MODULE_PARAM_PREFIX "rcupdate."
63 module_param(rcu_expedited, int, 0);
65 #ifndef CONFIG_TINY_RCU
67 static atomic_t rcu_expedited_nesting =
68 ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
71 * Should normal grace-period primitives be expedited? Intended for
72 * use within RCU. Note that this function takes the rcu_expedited
73 * sysfs/boot variable into account as well as the rcu_expedite_gp()
74 * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
75 * returns false is a -really- bad idea.
77 bool rcu_gp_is_expedited(void)
79 return rcu_expedited || atomic_read(&rcu_expedited_nesting);
81 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
84 * rcu_expedite_gp - Expedite future RCU grace periods
86 * After a call to this function, future calls to synchronize_rcu() and
87 * friends act as the corresponding synchronize_rcu_expedited() function
88 * had instead been called.
90 void rcu_expedite_gp(void)
92 atomic_inc(&rcu_expedited_nesting);
94 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
97 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
99 * Undo a prior call to rcu_expedite_gp(). If all prior calls to
100 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
101 * and if the rcu_expedited sysfs/boot parameter is not set, then all
102 * subsequent calls to synchronize_rcu() and friends will return to
103 * their normal non-expedited behavior.
105 void rcu_unexpedite_gp(void)
107 atomic_dec(&rcu_expedited_nesting);
109 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
111 #endif /* #ifndef CONFIG_TINY_RCU */
114 * Inform RCU of the end of the in-kernel boot sequence.
116 void rcu_end_inkernel_boot(void)
118 if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
122 #ifdef CONFIG_PREEMPT_RCU
125 * Preemptible RCU implementation for rcu_read_lock().
126 * Just increment ->rcu_read_lock_nesting, shared state will be updated
129 void __rcu_read_lock(void)
131 current->rcu_read_lock_nesting++;
132 barrier(); /* critical section after entry code. */
134 EXPORT_SYMBOL_GPL(__rcu_read_lock);
137 * Preemptible RCU implementation for rcu_read_unlock().
138 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
139 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
140 * invoke rcu_read_unlock_special() to clean up after a context switch
141 * in an RCU read-side critical section and other special cases.
143 void __rcu_read_unlock(void)
145 struct task_struct *t = current;
147 if (t->rcu_read_lock_nesting != 1) {
148 --t->rcu_read_lock_nesting;
150 barrier(); /* critical section before exit code. */
151 t->rcu_read_lock_nesting = INT_MIN;
152 barrier(); /* assign before ->rcu_read_unlock_special load */
153 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special.s)))
154 rcu_read_unlock_special(t);
155 barrier(); /* ->rcu_read_unlock_special load before assign */
156 t->rcu_read_lock_nesting = 0;
158 #ifdef CONFIG_PROVE_LOCKING
160 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
162 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
164 #endif /* #ifdef CONFIG_PROVE_LOCKING */
166 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
168 #endif /* #ifdef CONFIG_PREEMPT_RCU */
170 #ifdef CONFIG_DEBUG_LOCK_ALLOC
171 static struct lock_class_key rcu_lock_key;
172 struct lockdep_map rcu_lock_map =
173 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
174 EXPORT_SYMBOL_GPL(rcu_lock_map);
176 static struct lock_class_key rcu_bh_lock_key;
177 struct lockdep_map rcu_bh_lock_map =
178 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
179 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
181 static struct lock_class_key rcu_sched_lock_key;
182 struct lockdep_map rcu_sched_lock_map =
183 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
184 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
186 static struct lock_class_key rcu_callback_key;
187 struct lockdep_map rcu_callback_map =
188 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
189 EXPORT_SYMBOL_GPL(rcu_callback_map);
191 int notrace debug_lockdep_rcu_enabled(void)
193 return rcu_scheduler_active && debug_locks &&
194 current->lockdep_recursion == 0;
196 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
199 * rcu_read_lock_held() - might we be in RCU read-side critical section?
201 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
202 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
203 * this assumes we are in an RCU read-side critical section unless it can
204 * prove otherwise. This is useful for debug checks in functions that
205 * require that they be called within an RCU read-side critical section.
207 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
208 * and while lockdep is disabled.
210 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
211 * occur in the same context, for example, it is illegal to invoke
212 * rcu_read_unlock() in process context if the matching rcu_read_lock()
213 * was invoked from within an irq handler.
215 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
216 * offline from an RCU perspective, so check for those as well.
218 int rcu_read_lock_held(void)
220 if (!debug_lockdep_rcu_enabled())
222 if (!rcu_is_watching())
224 if (!rcu_lockdep_current_cpu_online())
226 return lock_is_held(&rcu_lock_map);
228 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
230 #ifndef CONFIG_PREEMPT_RT_FULL
232 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
234 * Check for bottom half being disabled, which covers both the
235 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
236 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
237 * will show the situation. This is useful for debug checks in functions
238 * that require that they be called within an RCU read-side critical
241 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
243 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
244 * offline from an RCU perspective, so check for those as well.
246 int rcu_read_lock_bh_held(void)
248 if (!debug_lockdep_rcu_enabled())
250 if (!rcu_is_watching())
252 if (!rcu_lockdep_current_cpu_online())
254 return in_softirq() || irqs_disabled();
256 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
259 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
262 * wakeme_after_rcu() - Callback function to awaken a task after grace period
263 * @head: Pointer to rcu_head member within rcu_synchronize structure
265 * Awaken the corresponding task now that a grace period has elapsed.
267 void wakeme_after_rcu(struct rcu_head *head)
269 struct rcu_synchronize *rcu;
271 rcu = container_of(head, struct rcu_synchronize, head);
272 complete(&rcu->completion);
275 void wait_rcu_gp(call_rcu_func_t crf)
277 struct rcu_synchronize rcu;
279 init_rcu_head_on_stack(&rcu.head);
280 init_completion(&rcu.completion);
281 /* Will wake me after RCU finished. */
282 crf(&rcu.head, wakeme_after_rcu);
284 wait_for_completion(&rcu.completion);
285 destroy_rcu_head_on_stack(&rcu.head);
287 EXPORT_SYMBOL_GPL(wait_rcu_gp);
289 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
290 void init_rcu_head(struct rcu_head *head)
292 debug_object_init(head, &rcuhead_debug_descr);
295 void destroy_rcu_head(struct rcu_head *head)
297 debug_object_free(head, &rcuhead_debug_descr);
301 * fixup_activate is called when:
302 * - an active object is activated
303 * - an unknown object is activated (might be a statically initialized object)
304 * Activation is performed internally by call_rcu().
306 static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
308 struct rcu_head *head = addr;
312 case ODEBUG_STATE_NOTAVAILABLE:
314 * This is not really a fixup. We just make sure that it is
315 * tracked in the object tracker.
317 debug_object_init(head, &rcuhead_debug_descr);
318 debug_object_activate(head, &rcuhead_debug_descr);
326 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
327 * @head: pointer to rcu_head structure to be initialized
329 * This function informs debugobjects of a new rcu_head structure that
330 * has been allocated as an auto variable on the stack. This function
331 * is not required for rcu_head structures that are statically defined or
332 * that are dynamically allocated on the heap. This function has no
333 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
335 void init_rcu_head_on_stack(struct rcu_head *head)
337 debug_object_init_on_stack(head, &rcuhead_debug_descr);
339 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
342 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
343 * @head: pointer to rcu_head structure to be initialized
345 * This function informs debugobjects that an on-stack rcu_head structure
346 * is about to go out of scope. As with init_rcu_head_on_stack(), this
347 * function is not required for rcu_head structures that are statically
348 * defined or that are dynamically allocated on the heap. Also as with
349 * init_rcu_head_on_stack(), this function has no effect for
350 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
352 void destroy_rcu_head_on_stack(struct rcu_head *head)
354 debug_object_free(head, &rcuhead_debug_descr);
356 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
358 struct debug_obj_descr rcuhead_debug_descr = {
360 .fixup_activate = rcuhead_fixup_activate,
362 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
363 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
365 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
366 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
368 unsigned long c_old, unsigned long c)
370 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
372 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
374 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
378 #ifdef CONFIG_RCU_STALL_COMMON
380 #ifdef CONFIG_PROVE_RCU
381 #define RCU_STALL_DELAY_DELTA (5 * HZ)
383 #define RCU_STALL_DELAY_DELTA 0
386 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
387 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
389 module_param(rcu_cpu_stall_suppress, int, 0644);
390 module_param(rcu_cpu_stall_timeout, int, 0644);
392 int rcu_jiffies_till_stall_check(void)
394 int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
397 * Limit check must be consistent with the Kconfig limits
398 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
400 if (till_stall_check < 3) {
401 ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
402 till_stall_check = 3;
403 } else if (till_stall_check > 300) {
404 ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
405 till_stall_check = 300;
407 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
410 void rcu_sysrq_start(void)
412 if (!rcu_cpu_stall_suppress)
413 rcu_cpu_stall_suppress = 2;
416 void rcu_sysrq_end(void)
418 if (rcu_cpu_stall_suppress == 2)
419 rcu_cpu_stall_suppress = 0;
422 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
424 rcu_cpu_stall_suppress = 1;
428 static struct notifier_block rcu_panic_block = {
429 .notifier_call = rcu_panic,
432 static int __init check_cpu_stall_init(void)
434 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
437 early_initcall(check_cpu_stall_init);
439 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
441 #ifdef CONFIG_TASKS_RCU
444 * Simple variant of RCU whose quiescent states are voluntary context switch,
445 * user-space execution, and idle. As such, grace periods can take one good
446 * long time. There are no read-side primitives similar to rcu_read_lock()
447 * and rcu_read_unlock() because this implementation is intended to get
448 * the system into a safe state for some of the manipulations involved in
449 * tracing and the like. Finally, this implementation does not support
450 * high call_rcu_tasks() rates from multiple CPUs. If this is required,
451 * per-CPU callback lists will be needed.
454 /* Global list of callbacks and associated lock. */
455 static struct rcu_head *rcu_tasks_cbs_head;
456 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
457 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
458 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
460 /* Track exiting tasks in order to allow them to be waited for. */
461 DEFINE_SRCU(tasks_rcu_exit_srcu);
463 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
464 static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10;
465 module_param(rcu_task_stall_timeout, int, 0644);
467 static void rcu_spawn_tasks_kthread(void);
470 * Post an RCU-tasks callback. First call must be from process context
471 * after the scheduler if fully operational.
473 void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp))
480 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
481 needwake = !rcu_tasks_cbs_head;
482 *rcu_tasks_cbs_tail = rhp;
483 rcu_tasks_cbs_tail = &rhp->next;
484 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
486 rcu_spawn_tasks_kthread();
487 wake_up(&rcu_tasks_cbs_wq);
490 EXPORT_SYMBOL_GPL(call_rcu_tasks);
493 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
495 * Control will return to the caller some time after a full rcu-tasks
496 * grace period has elapsed, in other words after all currently
497 * executing rcu-tasks read-side critical sections have elapsed. These
498 * read-side critical sections are delimited by calls to schedule(),
499 * cond_resched_rcu_qs(), idle execution, userspace execution, calls
500 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
502 * This is a very specialized primitive, intended only for a few uses in
503 * tracing and other situations requiring manipulation of function
504 * preambles and profiling hooks. The synchronize_rcu_tasks() function
505 * is not (yet) intended for heavy use from multiple CPUs.
507 * Note that this guarantee implies further memory-ordering guarantees.
508 * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
509 * each CPU is guaranteed to have executed a full memory barrier since the
510 * end of its last RCU-tasks read-side critical section whose beginning
511 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU
512 * having an RCU-tasks read-side critical section that extends beyond
513 * the return from synchronize_rcu_tasks() is guaranteed to have executed
514 * a full memory barrier after the beginning of synchronize_rcu_tasks()
515 * and before the beginning of that RCU-tasks read-side critical section.
516 * Note that these guarantees include CPUs that are offline, idle, or
517 * executing in user mode, as well as CPUs that are executing in the kernel.
519 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
520 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
521 * to have executed a full memory barrier during the execution of
522 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
523 * (but again only if the system has more than one CPU).
525 void synchronize_rcu_tasks(void)
527 /* Complain if the scheduler has not started. */
528 rcu_lockdep_assert(!rcu_scheduler_active,
529 "synchronize_rcu_tasks called too soon");
531 /* Wait for the grace period. */
532 wait_rcu_gp(call_rcu_tasks);
534 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
537 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
539 * Although the current implementation is guaranteed to wait, it is not
540 * obligated to, for example, if there are no pending callbacks.
542 void rcu_barrier_tasks(void)
544 /* There is only one callback queue, so this is easy. ;-) */
545 synchronize_rcu_tasks();
547 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
549 /* See if tasks are still holding out, complain if so. */
550 static void check_holdout_task(struct task_struct *t,
551 bool needreport, bool *firstreport)
555 if (!ACCESS_ONCE(t->rcu_tasks_holdout) ||
556 t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) ||
557 !ACCESS_ONCE(t->on_rq) ||
558 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
559 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
560 ACCESS_ONCE(t->rcu_tasks_holdout) = false;
561 list_del_init(&t->rcu_tasks_holdout_list);
568 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
569 *firstreport = false;
572 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
573 t, ".I"[is_idle_task(t)],
574 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
575 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
576 t->rcu_tasks_idle_cpu, cpu);
580 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
581 static int __noreturn rcu_tasks_kthread(void *arg)
584 struct task_struct *g, *t;
585 unsigned long lastreport;
586 struct rcu_head *list;
587 struct rcu_head *next;
588 LIST_HEAD(rcu_tasks_holdouts);
590 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
591 housekeeping_affine(current);
594 * Each pass through the following loop makes one check for
595 * newly arrived callbacks, and, if there are some, waits for
596 * one RCU-tasks grace period and then invokes the callbacks.
597 * This loop is terminated by the system going down. ;-)
601 /* Pick up any new callbacks. */
602 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
603 list = rcu_tasks_cbs_head;
604 rcu_tasks_cbs_head = NULL;
605 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
606 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
608 /* If there were none, wait a bit and start over. */
610 wait_event_interruptible(rcu_tasks_cbs_wq,
612 if (!rcu_tasks_cbs_head) {
613 WARN_ON(signal_pending(current));
614 schedule_timeout_interruptible(HZ/10);
620 * Wait for all pre-existing t->on_rq and t->nvcsw
621 * transitions to complete. Invoking synchronize_sched()
622 * suffices because all these transitions occur with
623 * interrupts disabled. Without this synchronize_sched(),
624 * a read-side critical section that started before the
625 * grace period might be incorrectly seen as having started
626 * after the grace period.
628 * This synchronize_sched() also dispenses with the
629 * need for a memory barrier on the first store to
630 * ->rcu_tasks_holdout, as it forces the store to happen
631 * after the beginning of the grace period.
636 * There were callbacks, so we need to wait for an
637 * RCU-tasks grace period. Start off by scanning
638 * the task list for tasks that are not already
639 * voluntarily blocked. Mark these tasks and make
640 * a list of them in rcu_tasks_holdouts.
643 for_each_process_thread(g, t) {
644 if (t != current && ACCESS_ONCE(t->on_rq) &&
647 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
648 ACCESS_ONCE(t->rcu_tasks_holdout) = true;
649 list_add(&t->rcu_tasks_holdout_list,
650 &rcu_tasks_holdouts);
656 * Wait for tasks that are in the process of exiting.
657 * This does only part of the job, ensuring that all
658 * tasks that were previously exiting reach the point
659 * where they have disabled preemption, allowing the
660 * later synchronize_sched() to finish the job.
662 synchronize_srcu(&tasks_rcu_exit_srcu);
665 * Each pass through the following loop scans the list
666 * of holdout tasks, removing any that are no longer
667 * holdouts. When the list is empty, we are done.
669 lastreport = jiffies;
670 while (!list_empty(&rcu_tasks_holdouts)) {
674 struct task_struct *t1;
676 schedule_timeout_interruptible(HZ);
677 rtst = ACCESS_ONCE(rcu_task_stall_timeout);
678 needreport = rtst > 0 &&
679 time_after(jiffies, lastreport + rtst);
681 lastreport = jiffies;
683 WARN_ON(signal_pending(current));
684 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
685 rcu_tasks_holdout_list) {
686 check_holdout_task(t, needreport, &firstreport);
692 * Because ->on_rq and ->nvcsw are not guaranteed
693 * to have a full memory barriers prior to them in the
694 * schedule() path, memory reordering on other CPUs could
695 * cause their RCU-tasks read-side critical sections to
696 * extend past the end of the grace period. However,
697 * because these ->nvcsw updates are carried out with
698 * interrupts disabled, we can use synchronize_sched()
699 * to force the needed ordering on all such CPUs.
701 * This synchronize_sched() also confines all
702 * ->rcu_tasks_holdout accesses to be within the grace
703 * period, avoiding the need for memory barriers for
704 * ->rcu_tasks_holdout accesses.
706 * In addition, this synchronize_sched() waits for exiting
707 * tasks to complete their final preempt_disable() region
708 * of execution, cleaning up after the synchronize_srcu()
713 /* Invoke the callbacks. */
722 schedule_timeout_uninterruptible(HZ/10);
726 /* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */
727 static void rcu_spawn_tasks_kthread(void)
729 static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
730 static struct task_struct *rcu_tasks_kthread_ptr;
731 struct task_struct *t;
733 if (ACCESS_ONCE(rcu_tasks_kthread_ptr)) {
734 smp_mb(); /* Ensure caller sees full kthread. */
737 mutex_lock(&rcu_tasks_kthread_mutex);
738 if (rcu_tasks_kthread_ptr) {
739 mutex_unlock(&rcu_tasks_kthread_mutex);
742 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
744 smp_mb(); /* Ensure others see full kthread. */
745 ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
746 mutex_unlock(&rcu_tasks_kthread_mutex);
749 #endif /* #ifdef CONFIG_TASKS_RCU */
751 #ifdef CONFIG_PROVE_RCU
754 * Early boot self test parameters, one for each flavor
756 static bool rcu_self_test;
757 static bool rcu_self_test_bh;
758 static bool rcu_self_test_sched;
760 module_param(rcu_self_test, bool, 0444);
761 module_param(rcu_self_test_bh, bool, 0444);
762 module_param(rcu_self_test_sched, bool, 0444);
764 static int rcu_self_test_counter;
766 static void test_callback(struct rcu_head *r)
768 rcu_self_test_counter++;
769 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
772 static void early_boot_test_call_rcu(void)
774 static struct rcu_head head;
776 call_rcu(&head, test_callback);
779 static void early_boot_test_call_rcu_bh(void)
781 static struct rcu_head head;
783 call_rcu_bh(&head, test_callback);
786 static void early_boot_test_call_rcu_sched(void)
788 static struct rcu_head head;
790 call_rcu_sched(&head, test_callback);
793 void rcu_early_boot_tests(void)
795 pr_info("Running RCU self tests\n");
798 early_boot_test_call_rcu();
799 if (rcu_self_test_bh)
800 early_boot_test_call_rcu_bh();
801 if (rcu_self_test_sched)
802 early_boot_test_call_rcu_sched();
805 static int rcu_verify_early_boot_tests(void)
808 int early_boot_test_counter = 0;
811 early_boot_test_counter++;
814 if (rcu_self_test_bh) {
815 early_boot_test_counter++;
818 if (rcu_self_test_sched) {
819 early_boot_test_counter++;
823 if (rcu_self_test_counter != early_boot_test_counter) {
830 late_initcall(rcu_verify_early_boot_tests);
832 void rcu_early_boot_tests(void) {}
833 #endif /* CONFIG_PROVE_RCU */