2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@joshtriplett.org>
23 * See also: Documentation/RCU/torture.txt
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <linux/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/reboot.h>
43 #include <linux/freezer.h>
44 #include <linux/cpu.h>
45 #include <linux/delay.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <linux/trace_clock.h>
50 #include <asm/byteorder.h>
51 #include <linux/torture.h>
52 #include <linux/vmalloc.h>
54 MODULE_LICENSE("GPL");
55 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
58 torture_param(int, cbflood_inter_holdoff, HZ,
59 "Holdoff between floods (jiffies)");
60 torture_param(int, cbflood_intra_holdoff, 1,
61 "Holdoff between bursts (jiffies)");
62 torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
63 torture_param(int, cbflood_n_per_burst, 20000,
64 "# callbacks per burst in flood");
65 torture_param(int, fqs_duration, 0,
66 "Duration of fqs bursts (us), 0 to disable");
67 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
68 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
69 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
70 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
71 torture_param(bool, gp_normal, false,
72 "Use normal (non-expedited) GP wait primitives");
73 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
74 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
75 torture_param(int, n_barrier_cbs, 0,
76 "# of callbacks/kthreads for barrier testing");
77 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
78 torture_param(int, nreaders, -1, "Number of RCU reader threads");
79 torture_param(int, object_debug, 0,
80 "Enable debug-object double call_rcu() testing");
81 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
82 torture_param(int, onoff_interval, 0,
83 "Time between CPU hotplugs (s), 0=disable");
84 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
85 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
86 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
87 torture_param(int, stall_cpu_holdoff, 10,
88 "Time to wait before starting stall (s).");
89 torture_param(int, stat_interval, 60,
90 "Number of seconds between stats printk()s");
91 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
92 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
93 torture_param(int, test_boost_duration, 4,
94 "Duration of each boost test, seconds.");
95 torture_param(int, test_boost_interval, 7,
96 "Interval between boost tests, seconds.");
97 torture_param(bool, test_no_idle_hz, true,
98 "Test support for tickless idle CPUs");
99 torture_param(bool, verbose, true,
100 "Enable verbose debugging printk()s");
102 static char *torture_type = "rcu";
103 module_param(torture_type, charp, 0444);
104 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
106 static int nrealreaders;
107 static int ncbflooders;
108 static struct task_struct *writer_task;
109 static struct task_struct **fakewriter_tasks;
110 static struct task_struct **reader_tasks;
111 static struct task_struct *stats_task;
112 static struct task_struct **cbflood_task;
113 static struct task_struct *fqs_task;
114 static struct task_struct *boost_tasks[NR_CPUS];
115 static struct task_struct *stall_task;
116 static struct task_struct **barrier_cbs_tasks;
117 static struct task_struct *barrier_task;
119 #define RCU_TORTURE_PIPE_LEN 10
122 struct rcu_head rtort_rcu;
123 int rtort_pipe_count;
124 struct list_head rtort_free;
128 static LIST_HEAD(rcu_torture_freelist);
129 static struct rcu_torture __rcu *rcu_torture_current;
130 static unsigned long rcu_torture_current_version;
131 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
132 static DEFINE_SPINLOCK(rcu_torture_lock);
133 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
134 rcu_torture_count) = { 0 };
135 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
136 rcu_torture_batch) = { 0 };
137 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
138 static atomic_t n_rcu_torture_alloc;
139 static atomic_t n_rcu_torture_alloc_fail;
140 static atomic_t n_rcu_torture_free;
141 static atomic_t n_rcu_torture_mberror;
142 static atomic_t n_rcu_torture_error;
143 static long n_rcu_torture_barrier_error;
144 static long n_rcu_torture_boost_ktrerror;
145 static long n_rcu_torture_boost_rterror;
146 static long n_rcu_torture_boost_failure;
147 static long n_rcu_torture_boosts;
148 static long n_rcu_torture_timers;
149 static long n_barrier_attempts;
150 static long n_barrier_successes;
151 static atomic_long_t n_cbfloods;
152 static struct list_head rcu_torture_removed;
154 static int rcu_torture_writer_state;
155 #define RTWS_FIXED_DELAY 0
157 #define RTWS_REPLACE 2
158 #define RTWS_DEF_FREE 3
159 #define RTWS_EXP_SYNC 4
160 #define RTWS_COND_GET 5
161 #define RTWS_COND_SYNC 6
163 #define RTWS_STUTTER 8
164 #define RTWS_STOPPING 9
166 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
167 #define RCUTORTURE_RUNNABLE_INIT 1
169 #define RCUTORTURE_RUNNABLE_INIT 0
171 static int torture_runnable = RCUTORTURE_RUNNABLE_INIT;
172 module_param(torture_runnable, int, 0444);
173 MODULE_PARM_DESC(torture_runnable, "Start rcutorture at boot");
175 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
176 #define rcu_can_boost() 1
177 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
178 #define rcu_can_boost() 0
179 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
181 #ifdef CONFIG_RCU_TRACE
182 static u64 notrace rcu_trace_clock_local(void)
184 u64 ts = trace_clock_local();
185 unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC);
188 #else /* #ifdef CONFIG_RCU_TRACE */
189 static u64 notrace rcu_trace_clock_local(void)
193 #endif /* #else #ifdef CONFIG_RCU_TRACE */
195 static unsigned long boost_starttime; /* jiffies of next boost test start. */
196 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
197 /* and boost task create/destroy. */
198 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
199 static bool barrier_phase; /* Test phase. */
200 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
201 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
202 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
205 * Allocate an element from the rcu_tortures pool.
207 static struct rcu_torture *
208 rcu_torture_alloc(void)
212 spin_lock_bh(&rcu_torture_lock);
213 if (list_empty(&rcu_torture_freelist)) {
214 atomic_inc(&n_rcu_torture_alloc_fail);
215 spin_unlock_bh(&rcu_torture_lock);
218 atomic_inc(&n_rcu_torture_alloc);
219 p = rcu_torture_freelist.next;
221 spin_unlock_bh(&rcu_torture_lock);
222 return container_of(p, struct rcu_torture, rtort_free);
226 * Free an element to the rcu_tortures pool.
229 rcu_torture_free(struct rcu_torture *p)
231 atomic_inc(&n_rcu_torture_free);
232 spin_lock_bh(&rcu_torture_lock);
233 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
234 spin_unlock_bh(&rcu_torture_lock);
238 * Operations vector for selecting different types of tests.
241 struct rcu_torture_ops {
244 void (*cleanup)(void);
245 int (*readlock)(void);
246 void (*read_delay)(struct torture_random_state *rrsp);
247 void (*readunlock)(int idx);
248 unsigned long (*started)(void);
249 unsigned long (*completed)(void);
250 void (*deferred_free)(struct rcu_torture *p);
252 void (*exp_sync)(void);
253 unsigned long (*get_state)(void);
254 void (*cond_sync)(unsigned long oldstate);
255 call_rcu_func_t call;
256 void (*cb_barrier)(void);
264 static struct rcu_torture_ops *cur_ops;
267 * Definitions for rcu torture testing.
270 static int rcu_torture_read_lock(void) __acquires(RCU)
276 static void rcu_read_delay(struct torture_random_state *rrsp)
278 const unsigned long shortdelay_us = 200;
279 const unsigned long longdelay_ms = 50;
281 /* We want a short delay sometimes to make a reader delay the grace
282 * period, and we want a long delay occasionally to trigger
283 * force_quiescent_state. */
285 if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
286 mdelay(longdelay_ms);
287 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
288 udelay(shortdelay_us);
289 #ifdef CONFIG_PREEMPT
290 if (!preempt_count() &&
291 !(torture_random(rrsp) % (nrealreaders * 20000)))
292 preempt_schedule(); /* No QS if preempt_disable() in effect */
296 static void rcu_torture_read_unlock(int idx) __releases(RCU)
302 * Update callback in the pipe. This should be invoked after a grace period.
305 rcu_torture_pipe_update_one(struct rcu_torture *rp)
309 i = rp->rtort_pipe_count;
310 if (i > RCU_TORTURE_PIPE_LEN)
311 i = RCU_TORTURE_PIPE_LEN;
312 atomic_inc(&rcu_torture_wcount[i]);
313 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
314 rp->rtort_mbtest = 0;
321 * Update all callbacks in the pipe. Suitable for synchronous grace-period
325 rcu_torture_pipe_update(struct rcu_torture *old_rp)
327 struct rcu_torture *rp;
328 struct rcu_torture *rp1;
331 list_add(&old_rp->rtort_free, &rcu_torture_removed);
332 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
333 if (rcu_torture_pipe_update_one(rp)) {
334 list_del(&rp->rtort_free);
335 rcu_torture_free(rp);
341 rcu_torture_cb(struct rcu_head *p)
343 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
345 if (torture_must_stop_irq()) {
346 /* Test is ending, just drop callbacks on the floor. */
347 /* The next initialization will pick up the pieces. */
350 if (rcu_torture_pipe_update_one(rp))
351 rcu_torture_free(rp);
353 cur_ops->deferred_free(rp);
356 static unsigned long rcu_no_completed(void)
361 static void rcu_torture_deferred_free(struct rcu_torture *p)
363 call_rcu(&p->rtort_rcu, rcu_torture_cb);
366 static void rcu_sync_torture_init(void)
368 INIT_LIST_HEAD(&rcu_torture_removed);
371 static struct rcu_torture_ops rcu_ops = {
373 .init = rcu_sync_torture_init,
374 .readlock = rcu_torture_read_lock,
375 .read_delay = rcu_read_delay,
376 .readunlock = rcu_torture_read_unlock,
377 .started = rcu_batches_started,
378 .completed = rcu_batches_completed,
379 .deferred_free = rcu_torture_deferred_free,
380 .sync = synchronize_rcu,
381 .exp_sync = synchronize_rcu_expedited,
382 .get_state = get_state_synchronize_rcu,
383 .cond_sync = cond_synchronize_rcu,
385 .cb_barrier = rcu_barrier,
386 .fqs = rcu_force_quiescent_state,
389 .can_boost = rcu_can_boost(),
393 #ifndef CONFIG_PREEMPT_RT_FULL
395 * Definitions for rcu_bh torture testing.
398 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
404 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
406 rcu_read_unlock_bh();
409 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
411 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
414 static struct rcu_torture_ops rcu_bh_ops = {
415 .ttype = RCU_BH_FLAVOR,
416 .init = rcu_sync_torture_init,
417 .readlock = rcu_bh_torture_read_lock,
418 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
419 .readunlock = rcu_bh_torture_read_unlock,
420 .started = rcu_batches_started_bh,
421 .completed = rcu_batches_completed_bh,
422 .deferred_free = rcu_bh_torture_deferred_free,
423 .sync = synchronize_rcu_bh,
424 .exp_sync = synchronize_rcu_bh_expedited,
426 .cb_barrier = rcu_barrier_bh,
427 .fqs = rcu_bh_force_quiescent_state,
434 static struct rcu_torture_ops rcu_bh_ops = {
435 .ttype = INVALID_RCU_FLAVOR,
440 * Don't even think about trying any of these in real life!!!
441 * The names includes "busted", and they really means it!
442 * The only purpose of these functions is to provide a buggy RCU
443 * implementation to make sure that rcutorture correctly emits
444 * buggy-RCU error messages.
446 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
448 /* This is a deliberate bug for testing purposes only! */
449 rcu_torture_cb(&p->rtort_rcu);
452 static void synchronize_rcu_busted(void)
454 /* This is a deliberate bug for testing purposes only! */
458 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
460 /* This is a deliberate bug for testing purposes only! */
464 static struct rcu_torture_ops rcu_busted_ops = {
465 .ttype = INVALID_RCU_FLAVOR,
466 .init = rcu_sync_torture_init,
467 .readlock = rcu_torture_read_lock,
468 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
469 .readunlock = rcu_torture_read_unlock,
470 .started = rcu_no_completed,
471 .completed = rcu_no_completed,
472 .deferred_free = rcu_busted_torture_deferred_free,
473 .sync = synchronize_rcu_busted,
474 .exp_sync = synchronize_rcu_busted,
475 .call = call_rcu_busted,
484 * Definitions for srcu torture testing.
487 DEFINE_STATIC_SRCU(srcu_ctl);
488 static struct srcu_struct srcu_ctld;
489 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
491 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
493 return srcu_read_lock(srcu_ctlp);
496 static void srcu_read_delay(struct torture_random_state *rrsp)
499 const long uspertick = 1000000 / HZ;
500 const long longdelay = 10;
502 /* We want there to be long-running readers, but not all the time. */
504 delay = torture_random(rrsp) %
505 (nrealreaders * 2 * longdelay * uspertick);
507 schedule_timeout_interruptible(longdelay);
509 rcu_read_delay(rrsp);
512 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
514 srcu_read_unlock(srcu_ctlp, idx);
517 static unsigned long srcu_torture_completed(void)
519 return srcu_batches_completed(srcu_ctlp);
522 static void srcu_torture_deferred_free(struct rcu_torture *rp)
524 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
527 static void srcu_torture_synchronize(void)
529 synchronize_srcu(srcu_ctlp);
532 static void srcu_torture_call(struct rcu_head *head,
535 call_srcu(srcu_ctlp, head, func);
538 static void srcu_torture_barrier(void)
540 srcu_barrier(srcu_ctlp);
543 static void srcu_torture_stats(void)
546 int idx = srcu_ctlp->completed & 0x1;
548 pr_alert("%s%s per-CPU(idx=%d):",
549 torture_type, TORTURE_FLAG, idx);
550 for_each_possible_cpu(cpu) {
553 c0 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[!idx];
554 c1 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[idx];
555 pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
560 static void srcu_torture_synchronize_expedited(void)
562 synchronize_srcu_expedited(srcu_ctlp);
565 static struct rcu_torture_ops srcu_ops = {
566 .ttype = SRCU_FLAVOR,
567 .init = rcu_sync_torture_init,
568 .readlock = srcu_torture_read_lock,
569 .read_delay = srcu_read_delay,
570 .readunlock = srcu_torture_read_unlock,
572 .completed = srcu_torture_completed,
573 .deferred_free = srcu_torture_deferred_free,
574 .sync = srcu_torture_synchronize,
575 .exp_sync = srcu_torture_synchronize_expedited,
576 .call = srcu_torture_call,
577 .cb_barrier = srcu_torture_barrier,
578 .stats = srcu_torture_stats,
582 static void srcu_torture_init(void)
584 rcu_sync_torture_init();
585 WARN_ON(init_srcu_struct(&srcu_ctld));
586 srcu_ctlp = &srcu_ctld;
589 static void srcu_torture_cleanup(void)
591 cleanup_srcu_struct(&srcu_ctld);
592 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
595 /* As above, but dynamically allocated. */
596 static struct rcu_torture_ops srcud_ops = {
597 .ttype = SRCU_FLAVOR,
598 .init = srcu_torture_init,
599 .cleanup = srcu_torture_cleanup,
600 .readlock = srcu_torture_read_lock,
601 .read_delay = srcu_read_delay,
602 .readunlock = srcu_torture_read_unlock,
604 .completed = srcu_torture_completed,
605 .deferred_free = srcu_torture_deferred_free,
606 .sync = srcu_torture_synchronize,
607 .exp_sync = srcu_torture_synchronize_expedited,
608 .call = srcu_torture_call,
609 .cb_barrier = srcu_torture_barrier,
610 .stats = srcu_torture_stats,
615 * Definitions for sched torture testing.
618 static int sched_torture_read_lock(void)
624 static void sched_torture_read_unlock(int idx)
629 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
631 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
634 static struct rcu_torture_ops sched_ops = {
635 .ttype = RCU_SCHED_FLAVOR,
636 .init = rcu_sync_torture_init,
637 .readlock = sched_torture_read_lock,
638 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
639 .readunlock = sched_torture_read_unlock,
640 .started = rcu_batches_started_sched,
641 .completed = rcu_batches_completed_sched,
642 .deferred_free = rcu_sched_torture_deferred_free,
643 .sync = synchronize_sched,
644 .exp_sync = synchronize_sched_expedited,
645 .get_state = get_state_synchronize_sched,
646 .cond_sync = cond_synchronize_sched,
647 .call = call_rcu_sched,
648 .cb_barrier = rcu_barrier_sched,
649 .fqs = rcu_sched_force_quiescent_state,
655 #ifdef CONFIG_TASKS_RCU
658 * Definitions for RCU-tasks torture testing.
661 static int tasks_torture_read_lock(void)
666 static void tasks_torture_read_unlock(int idx)
670 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
672 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
675 static struct rcu_torture_ops tasks_ops = {
676 .ttype = RCU_TASKS_FLAVOR,
677 .init = rcu_sync_torture_init,
678 .readlock = tasks_torture_read_lock,
679 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
680 .readunlock = tasks_torture_read_unlock,
681 .started = rcu_no_completed,
682 .completed = rcu_no_completed,
683 .deferred_free = rcu_tasks_torture_deferred_free,
684 .sync = synchronize_rcu_tasks,
685 .exp_sync = synchronize_rcu_tasks,
686 .call = call_rcu_tasks,
687 .cb_barrier = rcu_barrier_tasks,
694 #define RCUTORTURE_TASKS_OPS &tasks_ops,
696 static bool __maybe_unused torturing_tasks(void)
698 return cur_ops == &tasks_ops;
701 #else /* #ifdef CONFIG_TASKS_RCU */
703 #define RCUTORTURE_TASKS_OPS
705 static bool __maybe_unused torturing_tasks(void)
710 #endif /* #else #ifdef CONFIG_TASKS_RCU */
713 * RCU torture priority-boost testing. Runs one real-time thread per
714 * CPU for moderate bursts, repeatedly registering RCU callbacks and
715 * spinning waiting for them to be invoked. If a given callback takes
716 * too long to be invoked, we assume that priority inversion has occurred.
719 struct rcu_boost_inflight {
724 static void rcu_torture_boost_cb(struct rcu_head *head)
726 struct rcu_boost_inflight *rbip =
727 container_of(head, struct rcu_boost_inflight, rcu);
729 /* Ensure RCU-core accesses precede clearing ->inflight */
730 smp_store_release(&rbip->inflight, 0);
733 static int rcu_torture_boost(void *arg)
735 unsigned long call_rcu_time;
736 unsigned long endtime;
737 unsigned long oldstarttime;
738 struct rcu_boost_inflight rbi = { .inflight = 0 };
739 struct sched_param sp;
741 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
743 /* Set real-time priority. */
744 sp.sched_priority = 1;
745 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
746 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
747 n_rcu_torture_boost_rterror++;
750 init_rcu_head_on_stack(&rbi.rcu);
751 /* Each pass through the following loop does one boost-test cycle. */
753 /* Wait for the next test interval. */
754 oldstarttime = boost_starttime;
755 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
756 schedule_timeout_interruptible(oldstarttime - jiffies);
757 stutter_wait("rcu_torture_boost");
758 if (torture_must_stop())
762 /* Do one boost-test interval. */
763 endtime = oldstarttime + test_boost_duration * HZ;
764 call_rcu_time = jiffies;
765 while (ULONG_CMP_LT(jiffies, endtime)) {
766 /* If we don't have a callback in flight, post one. */
767 if (!smp_load_acquire(&rbi.inflight)) {
768 /* RCU core before ->inflight = 1. */
769 smp_store_release(&rbi.inflight, 1);
770 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
771 if (jiffies - call_rcu_time >
772 test_boost_duration * HZ - HZ / 2) {
773 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
774 n_rcu_torture_boost_failure++;
776 call_rcu_time = jiffies;
778 stutter_wait("rcu_torture_boost");
779 if (torture_must_stop())
784 * Set the start time of the next test interval.
785 * Yes, this is vulnerable to long delays, but such
786 * delays simply cause a false negative for the next
787 * interval. Besides, we are running at RT priority,
788 * so delays should be relatively rare.
790 while (oldstarttime == boost_starttime &&
791 !kthread_should_stop()) {
792 if (mutex_trylock(&boost_mutex)) {
793 boost_starttime = jiffies +
794 test_boost_interval * HZ;
795 n_rcu_torture_boosts++;
796 mutex_unlock(&boost_mutex);
799 schedule_timeout_uninterruptible(1);
802 /* Go do the stutter. */
803 checkwait: stutter_wait("rcu_torture_boost");
804 } while (!torture_must_stop());
806 /* Clean up and exit. */
807 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
808 torture_shutdown_absorb("rcu_torture_boost");
809 schedule_timeout_uninterruptible(1);
811 destroy_rcu_head_on_stack(&rbi.rcu);
812 torture_kthread_stopping("rcu_torture_boost");
816 static void rcu_torture_cbflood_cb(struct rcu_head *rhp)
821 * RCU torture callback-flood kthread. Repeatedly induces bursts of calls
822 * to call_rcu() or analogous, increasing the probability of occurrence
823 * of callback-overflow corner cases.
826 rcu_torture_cbflood(void *arg)
831 struct rcu_head *rhp;
833 if (cbflood_n_per_burst > 0 &&
834 cbflood_inter_holdoff > 0 &&
835 cbflood_intra_holdoff > 0 &&
837 cur_ops->cb_barrier) {
838 rhp = vmalloc(sizeof(*rhp) *
839 cbflood_n_burst * cbflood_n_per_burst);
843 VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
846 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
848 schedule_timeout_interruptible(cbflood_inter_holdoff);
849 atomic_long_inc(&n_cbfloods);
850 WARN_ON(signal_pending(current));
851 for (i = 0; i < cbflood_n_burst; i++) {
852 for (j = 0; j < cbflood_n_per_burst; j++) {
853 cur_ops->call(&rhp[i * cbflood_n_per_burst + j],
854 rcu_torture_cbflood_cb);
856 schedule_timeout_interruptible(cbflood_intra_holdoff);
857 WARN_ON(signal_pending(current));
859 cur_ops->cb_barrier();
860 stutter_wait("rcu_torture_cbflood");
861 } while (!torture_must_stop());
864 torture_kthread_stopping("rcu_torture_cbflood");
869 * RCU torture force-quiescent-state kthread. Repeatedly induces
870 * bursts of calls to force_quiescent_state(), increasing the probability
871 * of occurrence of some important types of race conditions.
874 rcu_torture_fqs(void *arg)
876 unsigned long fqs_resume_time;
877 int fqs_burst_remaining;
879 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
881 fqs_resume_time = jiffies + fqs_stutter * HZ;
882 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
883 !kthread_should_stop()) {
884 schedule_timeout_interruptible(1);
886 fqs_burst_remaining = fqs_duration;
887 while (fqs_burst_remaining > 0 &&
888 !kthread_should_stop()) {
891 fqs_burst_remaining -= fqs_holdoff;
893 stutter_wait("rcu_torture_fqs");
894 } while (!torture_must_stop());
895 torture_kthread_stopping("rcu_torture_fqs");
900 * RCU torture writer kthread. Repeatedly substitutes a new structure
901 * for that pointed to by rcu_torture_current, freeing the old structure
902 * after a series of grace periods (the "pipeline").
905 rcu_torture_writer(void *arg)
907 bool can_expedite = !rcu_gp_is_expedited();
909 unsigned long gp_snap;
910 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
911 bool gp_sync1 = gp_sync;
913 struct rcu_torture *rp;
914 struct rcu_torture *old_rp;
915 static DEFINE_TORTURE_RANDOM(rand);
916 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
917 RTWS_COND_GET, RTWS_SYNC };
920 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
921 pr_alert("%s" TORTURE_FLAG
922 " Grace periods expedited from boot/sysfs for %s,\n",
923 torture_type, cur_ops->name);
924 pr_alert("%s" TORTURE_FLAG
925 " Testing of dynamic grace-period expediting diabled.\n",
928 /* Initialize synctype[] array. If none set, take default. */
929 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
930 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
931 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync)
932 synctype[nsynctypes++] = RTWS_COND_GET;
933 else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync))
934 pr_alert("rcu_torture_writer: gp_cond without primitives.\n");
935 if (gp_exp1 && cur_ops->exp_sync)
936 synctype[nsynctypes++] = RTWS_EXP_SYNC;
937 else if (gp_exp && !cur_ops->exp_sync)
938 pr_alert("rcu_torture_writer: gp_exp without primitives.\n");
939 if (gp_normal1 && cur_ops->deferred_free)
940 synctype[nsynctypes++] = RTWS_DEF_FREE;
941 else if (gp_normal && !cur_ops->deferred_free)
942 pr_alert("rcu_torture_writer: gp_normal without primitives.\n");
943 if (gp_sync1 && cur_ops->sync)
944 synctype[nsynctypes++] = RTWS_SYNC;
945 else if (gp_sync && !cur_ops->sync)
946 pr_alert("rcu_torture_writer: gp_sync without primitives.\n");
947 if (WARN_ONCE(nsynctypes == 0,
948 "rcu_torture_writer: No update-side primitives.\n")) {
950 * No updates primitives, so don't try updating.
951 * The resulting test won't be testing much, hence the
954 rcu_torture_writer_state = RTWS_STOPPING;
955 torture_kthread_stopping("rcu_torture_writer");
959 rcu_torture_writer_state = RTWS_FIXED_DELAY;
960 schedule_timeout_uninterruptible(1);
961 rp = rcu_torture_alloc();
964 rp->rtort_pipe_count = 0;
965 rcu_torture_writer_state = RTWS_DELAY;
966 udelay(torture_random(&rand) & 0x3ff);
967 rcu_torture_writer_state = RTWS_REPLACE;
968 old_rp = rcu_dereference_check(rcu_torture_current,
969 current == writer_task);
970 rp->rtort_mbtest = 1;
971 rcu_assign_pointer(rcu_torture_current, rp);
972 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
974 i = old_rp->rtort_pipe_count;
975 if (i > RCU_TORTURE_PIPE_LEN)
976 i = RCU_TORTURE_PIPE_LEN;
977 atomic_inc(&rcu_torture_wcount[i]);
978 old_rp->rtort_pipe_count++;
979 switch (synctype[torture_random(&rand) % nsynctypes]) {
981 rcu_torture_writer_state = RTWS_DEF_FREE;
982 cur_ops->deferred_free(old_rp);
985 rcu_torture_writer_state = RTWS_EXP_SYNC;
987 rcu_torture_pipe_update(old_rp);
990 rcu_torture_writer_state = RTWS_COND_GET;
991 gp_snap = cur_ops->get_state();
992 i = torture_random(&rand) % 16;
994 schedule_timeout_interruptible(i);
995 udelay(torture_random(&rand) % 1000);
996 rcu_torture_writer_state = RTWS_COND_SYNC;
997 cur_ops->cond_sync(gp_snap);
998 rcu_torture_pipe_update(old_rp);
1001 rcu_torture_writer_state = RTWS_SYNC;
1003 rcu_torture_pipe_update(old_rp);
1010 rcutorture_record_progress(++rcu_torture_current_version);
1011 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1013 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1014 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1015 if (expediting >= 0)
1018 rcu_unexpedite_gp();
1019 if (++expediting > 3)
1020 expediting = -expediting;
1022 rcu_torture_writer_state = RTWS_STUTTER;
1023 stutter_wait("rcu_torture_writer");
1024 } while (!torture_must_stop());
1025 /* Reset expediting back to unexpedited. */
1027 expediting = -expediting;
1028 while (can_expedite && expediting++ < 0)
1029 rcu_unexpedite_gp();
1030 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1031 rcu_torture_writer_state = RTWS_STOPPING;
1032 torture_kthread_stopping("rcu_torture_writer");
1037 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1038 * delay between calls.
1041 rcu_torture_fakewriter(void *arg)
1043 DEFINE_TORTURE_RANDOM(rand);
1045 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1046 set_user_nice(current, MAX_NICE);
1049 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1050 udelay(torture_random(&rand) & 0x3ff);
1051 if (cur_ops->cb_barrier != NULL &&
1052 torture_random(&rand) % (nfakewriters * 8) == 0) {
1053 cur_ops->cb_barrier();
1054 } else if (gp_normal == gp_exp) {
1055 if (torture_random(&rand) & 0x80)
1058 cur_ops->exp_sync();
1059 } else if (gp_normal) {
1062 cur_ops->exp_sync();
1064 stutter_wait("rcu_torture_fakewriter");
1065 } while (!torture_must_stop());
1067 torture_kthread_stopping("rcu_torture_fakewriter");
1071 static void rcutorture_trace_dump(void)
1073 static atomic_t beenhere = ATOMIC_INIT(0);
1075 if (atomic_read(&beenhere))
1077 if (atomic_xchg(&beenhere, 1) != 0)
1079 ftrace_dump(DUMP_ALL);
1083 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1084 * incrementing the corresponding element of the pipeline array. The
1085 * counter in the element should never be greater than 1, otherwise, the
1086 * RCU implementation is broken.
1088 static void rcu_torture_timer(unsigned long unused)
1091 unsigned long started;
1092 unsigned long completed;
1093 static DEFINE_TORTURE_RANDOM(rand);
1094 static DEFINE_SPINLOCK(rand_lock);
1095 struct rcu_torture *p;
1097 unsigned long long ts;
1099 idx = cur_ops->readlock();
1100 if (cur_ops->started)
1101 started = cur_ops->started();
1103 started = cur_ops->completed();
1104 ts = rcu_trace_clock_local();
1105 p = rcu_dereference_check(rcu_torture_current,
1106 rcu_read_lock_bh_held() ||
1107 rcu_read_lock_sched_held() ||
1108 srcu_read_lock_held(srcu_ctlp) ||
1111 /* Leave because rcu_torture_writer is not yet underway */
1112 cur_ops->readunlock(idx);
1115 if (p->rtort_mbtest == 0)
1116 atomic_inc(&n_rcu_torture_mberror);
1117 spin_lock(&rand_lock);
1118 cur_ops->read_delay(&rand);
1119 n_rcu_torture_timers++;
1120 spin_unlock(&rand_lock);
1122 pipe_count = p->rtort_pipe_count;
1123 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1124 /* Should not happen, but... */
1125 pipe_count = RCU_TORTURE_PIPE_LEN;
1127 completed = cur_ops->completed();
1128 if (pipe_count > 1) {
1129 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
1130 started, completed);
1131 rcutorture_trace_dump();
1133 __this_cpu_inc(rcu_torture_count[pipe_count]);
1134 completed = completed - started;
1135 if (cur_ops->started)
1137 if (completed > RCU_TORTURE_PIPE_LEN) {
1138 /* Should not happen, but... */
1139 completed = RCU_TORTURE_PIPE_LEN;
1141 __this_cpu_inc(rcu_torture_batch[completed]);
1143 cur_ops->readunlock(idx);
1147 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1148 * incrementing the corresponding element of the pipeline array. The
1149 * counter in the element should never be greater than 1, otherwise, the
1150 * RCU implementation is broken.
1153 rcu_torture_reader(void *arg)
1155 unsigned long started;
1156 unsigned long completed;
1158 DEFINE_TORTURE_RANDOM(rand);
1159 struct rcu_torture *p;
1161 struct timer_list t;
1162 unsigned long long ts;
1164 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1165 set_user_nice(current, MAX_NICE);
1166 if (irqreader && cur_ops->irq_capable)
1167 setup_timer_on_stack(&t, rcu_torture_timer, 0);
1170 if (irqreader && cur_ops->irq_capable) {
1171 if (!timer_pending(&t))
1172 mod_timer(&t, jiffies + 1);
1174 idx = cur_ops->readlock();
1175 if (cur_ops->started)
1176 started = cur_ops->started();
1178 started = cur_ops->completed();
1179 ts = rcu_trace_clock_local();
1180 p = rcu_dereference_check(rcu_torture_current,
1181 rcu_read_lock_bh_held() ||
1182 rcu_read_lock_sched_held() ||
1183 srcu_read_lock_held(srcu_ctlp) ||
1186 /* Wait for rcu_torture_writer to get underway */
1187 cur_ops->readunlock(idx);
1188 schedule_timeout_interruptible(HZ);
1191 if (p->rtort_mbtest == 0)
1192 atomic_inc(&n_rcu_torture_mberror);
1193 cur_ops->read_delay(&rand);
1195 pipe_count = p->rtort_pipe_count;
1196 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1197 /* Should not happen, but... */
1198 pipe_count = RCU_TORTURE_PIPE_LEN;
1200 completed = cur_ops->completed();
1201 if (pipe_count > 1) {
1202 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1203 ts, started, completed);
1204 rcutorture_trace_dump();
1206 __this_cpu_inc(rcu_torture_count[pipe_count]);
1207 completed = completed - started;
1208 if (cur_ops->started)
1210 if (completed > RCU_TORTURE_PIPE_LEN) {
1211 /* Should not happen, but... */
1212 completed = RCU_TORTURE_PIPE_LEN;
1214 __this_cpu_inc(rcu_torture_batch[completed]);
1216 cur_ops->readunlock(idx);
1217 stutter_wait("rcu_torture_reader");
1218 } while (!torture_must_stop());
1219 if (irqreader && cur_ops->irq_capable) {
1221 destroy_timer_on_stack(&t);
1223 torture_kthread_stopping("rcu_torture_reader");
1228 * Print torture statistics. Caller must ensure that there is only
1229 * one call to this function at a given time!!! This is normally
1230 * accomplished by relying on the module system to only have one copy
1231 * of the module loaded, and then by giving the rcu_torture_stats
1232 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1233 * thread is not running).
1236 rcu_torture_stats_print(void)
1240 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1241 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1242 static unsigned long rtcv_snap = ULONG_MAX;
1244 for_each_possible_cpu(cpu) {
1245 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1246 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1247 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1250 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1251 if (pipesummary[i] != 0)
1255 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1256 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1257 rcu_torture_current,
1258 rcu_torture_current_version,
1259 list_empty(&rcu_torture_freelist),
1260 atomic_read(&n_rcu_torture_alloc),
1261 atomic_read(&n_rcu_torture_alloc_fail),
1262 atomic_read(&n_rcu_torture_free));
1263 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
1264 atomic_read(&n_rcu_torture_mberror),
1265 n_rcu_torture_boost_ktrerror,
1266 n_rcu_torture_boost_rterror);
1267 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1268 n_rcu_torture_boost_failure,
1269 n_rcu_torture_boosts,
1270 n_rcu_torture_timers);
1271 torture_onoff_stats();
1272 pr_cont("barrier: %ld/%ld:%ld ",
1273 n_barrier_successes,
1275 n_rcu_torture_barrier_error);
1276 pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
1278 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1279 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1280 n_rcu_torture_barrier_error != 0 ||
1281 n_rcu_torture_boost_ktrerror != 0 ||
1282 n_rcu_torture_boost_rterror != 0 ||
1283 n_rcu_torture_boost_failure != 0 ||
1285 pr_cont("%s", "!!! ");
1286 atomic_inc(&n_rcu_torture_error);
1289 pr_cont("Reader Pipe: ");
1290 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1291 pr_cont(" %ld", pipesummary[i]);
1294 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1295 pr_cont("Reader Batch: ");
1296 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1297 pr_cont(" %ld", batchsummary[i]);
1300 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1301 pr_cont("Free-Block Circulation: ");
1302 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1303 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1309 if (rtcv_snap == rcu_torture_current_version &&
1310 rcu_torture_current != NULL) {
1311 int __maybe_unused flags;
1312 unsigned long __maybe_unused gpnum;
1313 unsigned long __maybe_unused completed;
1315 rcutorture_get_gp_data(cur_ops->ttype,
1316 &flags, &gpnum, &completed);
1317 pr_alert("??? Writer stall state %d g%lu c%lu f%#x\n",
1318 rcu_torture_writer_state,
1319 gpnum, completed, flags);
1320 show_rcu_gp_kthreads();
1321 rcutorture_trace_dump();
1323 rtcv_snap = rcu_torture_current_version;
1327 * Periodically prints torture statistics, if periodic statistics printing
1328 * was specified via the stat_interval module parameter.
1331 rcu_torture_stats(void *arg)
1333 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1335 schedule_timeout_interruptible(stat_interval * HZ);
1336 rcu_torture_stats_print();
1337 torture_shutdown_absorb("rcu_torture_stats");
1338 } while (!torture_must_stop());
1339 torture_kthread_stopping("rcu_torture_stats");
1344 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1346 pr_alert("%s" TORTURE_FLAG
1347 "--- %s: nreaders=%d nfakewriters=%d "
1348 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1349 "shuffle_interval=%d stutter=%d irqreader=%d "
1350 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1351 "test_boost=%d/%d test_boost_interval=%d "
1352 "test_boost_duration=%d shutdown_secs=%d "
1353 "stall_cpu=%d stall_cpu_holdoff=%d "
1355 "onoff_interval=%d onoff_holdoff=%d\n",
1356 torture_type, tag, nrealreaders, nfakewriters,
1357 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1358 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1359 test_boost, cur_ops->can_boost,
1360 test_boost_interval, test_boost_duration, shutdown_secs,
1361 stall_cpu, stall_cpu_holdoff,
1363 onoff_interval, onoff_holdoff);
1366 static void rcutorture_booster_cleanup(int cpu)
1368 struct task_struct *t;
1370 if (boost_tasks[cpu] == NULL)
1372 mutex_lock(&boost_mutex);
1373 t = boost_tasks[cpu];
1374 boost_tasks[cpu] = NULL;
1375 mutex_unlock(&boost_mutex);
1377 /* This must be outside of the mutex, otherwise deadlock! */
1378 torture_stop_kthread(rcu_torture_boost, t);
1381 static int rcutorture_booster_init(int cpu)
1385 if (boost_tasks[cpu] != NULL)
1386 return 0; /* Already created, nothing more to do. */
1388 /* Don't allow time recalculation while creating a new task. */
1389 mutex_lock(&boost_mutex);
1390 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1391 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1393 "rcu_torture_boost");
1394 if (IS_ERR(boost_tasks[cpu])) {
1395 retval = PTR_ERR(boost_tasks[cpu]);
1396 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1397 n_rcu_torture_boost_ktrerror++;
1398 boost_tasks[cpu] = NULL;
1399 mutex_unlock(&boost_mutex);
1402 kthread_bind(boost_tasks[cpu], cpu);
1403 wake_up_process(boost_tasks[cpu]);
1404 mutex_unlock(&boost_mutex);
1409 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1410 * induces a CPU stall for the time specified by stall_cpu.
1412 static int rcu_torture_stall(void *args)
1414 unsigned long stop_at;
1416 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1417 if (stall_cpu_holdoff > 0) {
1418 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1419 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1420 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1422 if (!kthread_should_stop()) {
1423 stop_at = get_seconds() + stall_cpu;
1424 /* RCU CPU stall is expected behavior in following code. */
1425 pr_alert("rcu_torture_stall start.\n");
1428 while (ULONG_CMP_LT(get_seconds(), stop_at))
1429 continue; /* Induce RCU CPU stall warning. */
1432 pr_alert("rcu_torture_stall end.\n");
1434 torture_shutdown_absorb("rcu_torture_stall");
1435 while (!kthread_should_stop())
1436 schedule_timeout_interruptible(10 * HZ);
1440 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1441 static int __init rcu_torture_stall_init(void)
1445 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
1448 /* Callback function for RCU barrier testing. */
1449 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
1451 atomic_inc(&barrier_cbs_invoked);
1454 /* kthread function to register callbacks used to test RCU barriers. */
1455 static int rcu_torture_barrier_cbs(void *arg)
1457 long myid = (long)arg;
1460 struct rcu_head rcu;
1462 init_rcu_head_on_stack(&rcu);
1463 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
1464 set_user_nice(current, MAX_NICE);
1466 wait_event(barrier_cbs_wq[myid],
1468 smp_load_acquire(&barrier_phase)) != lastphase ||
1469 torture_must_stop());
1470 lastphase = newphase;
1471 if (torture_must_stop())
1474 * The above smp_load_acquire() ensures barrier_phase load
1475 * is ordered before the folloiwng ->call().
1477 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
1478 if (atomic_dec_and_test(&barrier_cbs_count))
1479 wake_up(&barrier_wq);
1480 } while (!torture_must_stop());
1481 if (cur_ops->cb_barrier != NULL)
1482 cur_ops->cb_barrier();
1483 destroy_rcu_head_on_stack(&rcu);
1484 torture_kthread_stopping("rcu_torture_barrier_cbs");
1488 /* kthread function to drive and coordinate RCU barrier testing. */
1489 static int rcu_torture_barrier(void *arg)
1493 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
1495 atomic_set(&barrier_cbs_invoked, 0);
1496 atomic_set(&barrier_cbs_count, n_barrier_cbs);
1497 /* Ensure barrier_phase ordered after prior assignments. */
1498 smp_store_release(&barrier_phase, !barrier_phase);
1499 for (i = 0; i < n_barrier_cbs; i++)
1500 wake_up(&barrier_cbs_wq[i]);
1501 wait_event(barrier_wq,
1502 atomic_read(&barrier_cbs_count) == 0 ||
1503 torture_must_stop());
1504 if (torture_must_stop())
1506 n_barrier_attempts++;
1507 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
1508 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1509 n_rcu_torture_barrier_error++;
1510 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1511 atomic_read(&barrier_cbs_invoked),
1515 n_barrier_successes++;
1516 schedule_timeout_interruptible(HZ / 10);
1517 } while (!torture_must_stop());
1518 torture_kthread_stopping("rcu_torture_barrier");
1522 /* Initialize RCU barrier testing. */
1523 static int rcu_torture_barrier_init(void)
1528 if (n_barrier_cbs <= 0)
1530 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
1531 pr_alert("%s" TORTURE_FLAG
1532 " Call or barrier ops missing for %s,\n",
1533 torture_type, cur_ops->name);
1534 pr_alert("%s" TORTURE_FLAG
1535 " RCU barrier testing omitted from run.\n",
1539 atomic_set(&barrier_cbs_count, 0);
1540 atomic_set(&barrier_cbs_invoked, 0);
1542 kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]),
1545 kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),
1547 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
1549 for (i = 0; i < n_barrier_cbs; i++) {
1550 init_waitqueue_head(&barrier_cbs_wq[i]);
1551 ret = torture_create_kthread(rcu_torture_barrier_cbs,
1553 barrier_cbs_tasks[i]);
1557 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
1560 /* Clean up after RCU barrier testing. */
1561 static void rcu_torture_barrier_cleanup(void)
1565 torture_stop_kthread(rcu_torture_barrier, barrier_task);
1566 if (barrier_cbs_tasks != NULL) {
1567 for (i = 0; i < n_barrier_cbs; i++)
1568 torture_stop_kthread(rcu_torture_barrier_cbs,
1569 barrier_cbs_tasks[i]);
1570 kfree(barrier_cbs_tasks);
1571 barrier_cbs_tasks = NULL;
1573 if (barrier_cbs_wq != NULL) {
1574 kfree(barrier_cbs_wq);
1575 barrier_cbs_wq = NULL;
1579 static int rcutorture_cpu_notify(struct notifier_block *self,
1580 unsigned long action, void *hcpu)
1582 long cpu = (long)hcpu;
1586 case CPU_DOWN_FAILED:
1587 (void)rcutorture_booster_init(cpu);
1589 case CPU_DOWN_PREPARE:
1590 rcutorture_booster_cleanup(cpu);
1598 static struct notifier_block rcutorture_cpu_nb = {
1599 .notifier_call = rcutorture_cpu_notify,
1603 rcu_torture_cleanup(void)
1607 rcutorture_record_test_transition();
1608 if (torture_cleanup_begin()) {
1609 if (cur_ops->cb_barrier != NULL)
1610 cur_ops->cb_barrier();
1614 rcu_torture_barrier_cleanup();
1615 torture_stop_kthread(rcu_torture_stall, stall_task);
1616 torture_stop_kthread(rcu_torture_writer, writer_task);
1619 for (i = 0; i < nrealreaders; i++)
1620 torture_stop_kthread(rcu_torture_reader,
1622 kfree(reader_tasks);
1624 rcu_torture_current = NULL;
1626 if (fakewriter_tasks) {
1627 for (i = 0; i < nfakewriters; i++) {
1628 torture_stop_kthread(rcu_torture_fakewriter,
1629 fakewriter_tasks[i]);
1631 kfree(fakewriter_tasks);
1632 fakewriter_tasks = NULL;
1635 torture_stop_kthread(rcu_torture_stats, stats_task);
1636 torture_stop_kthread(rcu_torture_fqs, fqs_task);
1637 for (i = 0; i < ncbflooders; i++)
1638 torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
1639 if ((test_boost == 1 && cur_ops->can_boost) ||
1641 unregister_cpu_notifier(&rcutorture_cpu_nb);
1642 for_each_possible_cpu(i)
1643 rcutorture_booster_cleanup(i);
1647 * Wait for all RCU callbacks to fire, then do flavor-specific
1648 * cleanup operations.
1650 if (cur_ops->cb_barrier != NULL)
1651 cur_ops->cb_barrier();
1652 if (cur_ops->cleanup != NULL)
1655 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
1657 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
1658 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1659 else if (torture_onoff_failures())
1660 rcu_torture_print_module_parms(cur_ops,
1661 "End of test: RCU_HOTPLUG");
1663 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1664 torture_cleanup_end();
1667 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1668 static void rcu_torture_leak_cb(struct rcu_head *rhp)
1672 static void rcu_torture_err_cb(struct rcu_head *rhp)
1675 * This -might- happen due to race conditions, but is unlikely.
1676 * The scenario that leads to this happening is that the
1677 * first of the pair of duplicate callbacks is queued,
1678 * someone else starts a grace period that includes that
1679 * callback, then the second of the pair must wait for the
1680 * next grace period. Unlikely, but can happen. If it
1681 * does happen, the debug-objects subsystem won't have splatted.
1683 pr_alert("rcutorture: duplicated callback was invoked.\n");
1685 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1688 * Verify that double-free causes debug-objects to complain, but only
1689 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
1690 * cannot be carried out.
1692 static void rcu_test_debug_objects(void)
1694 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1695 struct rcu_head rh1;
1696 struct rcu_head rh2;
1698 init_rcu_head_on_stack(&rh1);
1699 init_rcu_head_on_stack(&rh2);
1700 pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n");
1702 /* Try to queue the rh2 pair of callbacks for the same grace period. */
1703 preempt_disable(); /* Prevent preemption from interrupting test. */
1704 rcu_read_lock(); /* Make it impossible to finish a grace period. */
1705 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
1706 local_irq_disable(); /* Make it harder to start a new grace period. */
1707 call_rcu(&rh2, rcu_torture_leak_cb);
1708 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
1713 /* Wait for them all to get done so we can safely return. */
1715 pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n");
1716 destroy_rcu_head_on_stack(&rh1);
1717 destroy_rcu_head_on_stack(&rh2);
1718 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1719 pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n");
1720 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1724 rcu_torture_init(void)
1729 static struct rcu_torture_ops *torture_ops[] = {
1730 &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
1731 &sched_ops, RCUTORTURE_TASKS_OPS
1734 if (!torture_init_begin(torture_type, verbose, &torture_runnable))
1737 /* Process args and tell the world that the torturer is on the job. */
1738 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1739 cur_ops = torture_ops[i];
1740 if (strcmp(torture_type, cur_ops->name) == 0)
1743 if (i == ARRAY_SIZE(torture_ops)) {
1744 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
1746 pr_alert("rcu-torture types:");
1747 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1748 pr_alert(" %s", torture_ops[i]->name);
1753 if (cur_ops->fqs == NULL && fqs_duration != 0) {
1754 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
1760 if (nreaders >= 0) {
1761 nrealreaders = nreaders;
1763 nrealreaders = num_online_cpus() - 2 - nreaders;
1764 if (nrealreaders <= 0)
1767 rcu_torture_print_module_parms(cur_ops, "Start of test");
1769 /* Set up the freelist. */
1771 INIT_LIST_HEAD(&rcu_torture_freelist);
1772 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1773 rcu_tortures[i].rtort_mbtest = 0;
1774 list_add_tail(&rcu_tortures[i].rtort_free,
1775 &rcu_torture_freelist);
1778 /* Initialize the statistics so that each run gets its own numbers. */
1780 rcu_torture_current = NULL;
1781 rcu_torture_current_version = 0;
1782 atomic_set(&n_rcu_torture_alloc, 0);
1783 atomic_set(&n_rcu_torture_alloc_fail, 0);
1784 atomic_set(&n_rcu_torture_free, 0);
1785 atomic_set(&n_rcu_torture_mberror, 0);
1786 atomic_set(&n_rcu_torture_error, 0);
1787 n_rcu_torture_barrier_error = 0;
1788 n_rcu_torture_boost_ktrerror = 0;
1789 n_rcu_torture_boost_rterror = 0;
1790 n_rcu_torture_boost_failure = 0;
1791 n_rcu_torture_boosts = 0;
1792 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1793 atomic_set(&rcu_torture_wcount[i], 0);
1794 for_each_possible_cpu(cpu) {
1795 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1796 per_cpu(rcu_torture_count, cpu)[i] = 0;
1797 per_cpu(rcu_torture_batch, cpu)[i] = 0;
1801 /* Start up the kthreads. */
1803 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
1807 if (nfakewriters > 0) {
1808 fakewriter_tasks = kzalloc(nfakewriters *
1809 sizeof(fakewriter_tasks[0]),
1811 if (fakewriter_tasks == NULL) {
1812 VERBOSE_TOROUT_ERRSTRING("out of memory");
1817 for (i = 0; i < nfakewriters; i++) {
1818 firsterr = torture_create_kthread(rcu_torture_fakewriter,
1819 NULL, fakewriter_tasks[i]);
1823 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1825 if (reader_tasks == NULL) {
1826 VERBOSE_TOROUT_ERRSTRING("out of memory");
1830 for (i = 0; i < nrealreaders; i++) {
1831 firsterr = torture_create_kthread(rcu_torture_reader, NULL,
1836 if (stat_interval > 0) {
1837 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
1842 if (test_no_idle_hz && shuffle_interval > 0) {
1843 firsterr = torture_shuffle_init(shuffle_interval * HZ);
1850 firsterr = torture_stutter_init(stutter * HZ);
1854 if (fqs_duration < 0)
1857 /* Create the fqs thread */
1858 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
1863 if (test_boost_interval < 1)
1864 test_boost_interval = 1;
1865 if (test_boost_duration < 2)
1866 test_boost_duration = 2;
1867 if ((test_boost == 1 && cur_ops->can_boost) ||
1870 boost_starttime = jiffies + test_boost_interval * HZ;
1871 register_cpu_notifier(&rcutorture_cpu_nb);
1872 for_each_possible_cpu(i) {
1873 if (cpu_is_offline(i))
1874 continue; /* Heuristic: CPU can go offline. */
1875 firsterr = rcutorture_booster_init(i);
1880 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
1883 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ);
1886 firsterr = rcu_torture_stall_init();
1889 firsterr = rcu_torture_barrier_init();
1893 rcu_test_debug_objects();
1894 if (cbflood_n_burst > 0) {
1895 /* Create the cbflood threads */
1896 ncbflooders = (num_online_cpus() + 3) / 4;
1897 cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task),
1899 if (!cbflood_task) {
1900 VERBOSE_TOROUT_ERRSTRING("out of memory");
1904 for (i = 0; i < ncbflooders; i++) {
1905 firsterr = torture_create_kthread(rcu_torture_cbflood,
1912 rcutorture_record_test_transition();
1918 rcu_torture_cleanup();
1922 module_init(rcu_torture_init);
1923 module_exit(rcu_torture_cleanup);