2 * Module-based torture test facility for locking
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright (C) IBM Corporation, 2014
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Davidlohr Bueso <dave@stgolabs.net>
22 * Based on kernel/rcu/torture.c.
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/kthread.h>
27 #include <linux/sched/rt.h>
28 #include <linux/spinlock.h>
29 #include <linux/mutex.h>
30 #include <linux/rwsem.h>
31 #include <linux/smp.h>
32 #include <linux/interrupt.h>
33 #include <linux/sched.h>
34 #include <linux/atomic.h>
35 #include <linux/moduleparam.h>
36 #include <linux/delay.h>
37 #include <linux/slab.h>
38 #include <linux/percpu-rwsem.h>
39 #include <linux/torture.h>
41 MODULE_LICENSE("GPL");
42 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
44 torture_param(int, nwriters_stress, -1,
45 "Number of write-locking stress-test threads");
46 torture_param(int, nreaders_stress, -1,
47 "Number of read-locking stress-test threads");
48 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
49 torture_param(int, onoff_interval, 0,
50 "Time between CPU hotplugs (s), 0=disable");
51 torture_param(int, shuffle_interval, 3,
52 "Number of jiffies between shuffles, 0=disable");
53 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
54 torture_param(int, stat_interval, 60,
55 "Number of seconds between stats printk()s");
56 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
57 torture_param(bool, verbose, true,
58 "Enable verbose debugging printk()s");
60 static char *torture_type = "spin_lock";
61 module_param(torture_type, charp, 0444);
62 MODULE_PARM_DESC(torture_type,
63 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
65 static struct task_struct *stats_task;
66 static struct task_struct **writer_tasks;
67 static struct task_struct **reader_tasks;
69 static bool lock_is_write_held;
70 static bool lock_is_read_held;
72 struct lock_stress_stats {
78 #define LOCKTORTURE_RUNNABLE_INIT 1
80 #define LOCKTORTURE_RUNNABLE_INIT 0
82 int torture_runnable = LOCKTORTURE_RUNNABLE_INIT;
83 module_param(torture_runnable, int, 0444);
84 MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
86 /* Forward reference. */
87 static void lock_torture_cleanup(void);
90 * Operations vector for selecting different types of tests.
92 struct lock_torture_ops {
94 int (*writelock)(void);
95 void (*write_delay)(struct torture_random_state *trsp);
96 void (*task_boost)(struct torture_random_state *trsp);
97 void (*writeunlock)(void);
98 int (*readlock)(void);
99 void (*read_delay)(struct torture_random_state *trsp);
100 void (*readunlock)(void);
102 unsigned long flags; /* for irq spinlocks */
106 struct lock_torture_cxt {
107 int nrealwriters_stress;
108 int nrealreaders_stress;
110 atomic_t n_lock_torture_errors;
111 struct lock_torture_ops *cur_ops;
112 struct lock_stress_stats *lwsa; /* writer statistics */
113 struct lock_stress_stats *lrsa; /* reader statistics */
115 static struct lock_torture_cxt cxt = { 0, 0, false,
119 * Definitions for lock torture testing.
122 static int torture_lock_busted_write_lock(void)
124 return 0; /* BUGGY, do not use in real life!!! */
127 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
129 const unsigned long longdelay_ms = 100;
131 /* We want a long delay occasionally to force massive contention. */
132 if (!(torture_random(trsp) %
133 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
134 mdelay(longdelay_ms);
135 #ifdef CONFIG_PREEMPT
136 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
137 preempt_schedule(); /* Allow test to be preempted. */
141 static void torture_lock_busted_write_unlock(void)
143 /* BUGGY, do not use in real life!!! */
146 static void torture_boost_dummy(struct torture_random_state *trsp)
148 /* Only rtmutexes care about priority */
151 static struct lock_torture_ops lock_busted_ops = {
152 .writelock = torture_lock_busted_write_lock,
153 .write_delay = torture_lock_busted_write_delay,
154 .task_boost = torture_boost_dummy,
155 .writeunlock = torture_lock_busted_write_unlock,
159 .name = "lock_busted"
162 static DEFINE_SPINLOCK(torture_spinlock);
164 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
166 spin_lock(&torture_spinlock);
170 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
172 const unsigned long shortdelay_us = 2;
173 const unsigned long longdelay_ms = 100;
175 /* We want a short delay mostly to emulate likely code, and
176 * we want a long delay occasionally to force massive contention.
178 if (!(torture_random(trsp) %
179 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
180 mdelay(longdelay_ms);
181 if (!(torture_random(trsp) %
182 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
183 udelay(shortdelay_us);
184 #ifdef CONFIG_PREEMPT
185 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
186 preempt_schedule(); /* Allow test to be preempted. */
190 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
192 spin_unlock(&torture_spinlock);
195 static struct lock_torture_ops spin_lock_ops = {
196 .writelock = torture_spin_lock_write_lock,
197 .write_delay = torture_spin_lock_write_delay,
198 .task_boost = torture_boost_dummy,
199 .writeunlock = torture_spin_lock_write_unlock,
206 static int torture_spin_lock_write_lock_irq(void)
207 __acquires(torture_spinlock)
211 spin_lock_irqsave(&torture_spinlock, flags);
212 cxt.cur_ops->flags = flags;
216 static void torture_lock_spin_write_unlock_irq(void)
217 __releases(torture_spinlock)
219 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
222 static struct lock_torture_ops spin_lock_irq_ops = {
223 .writelock = torture_spin_lock_write_lock_irq,
224 .write_delay = torture_spin_lock_write_delay,
225 .task_boost = torture_boost_dummy,
226 .writeunlock = torture_lock_spin_write_unlock_irq,
230 .name = "spin_lock_irq"
233 static DEFINE_RWLOCK(torture_rwlock);
235 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
237 write_lock(&torture_rwlock);
241 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
243 const unsigned long shortdelay_us = 2;
244 const unsigned long longdelay_ms = 100;
246 /* We want a short delay mostly to emulate likely code, and
247 * we want a long delay occasionally to force massive contention.
249 if (!(torture_random(trsp) %
250 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
251 mdelay(longdelay_ms);
253 udelay(shortdelay_us);
256 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
258 write_unlock(&torture_rwlock);
261 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
263 read_lock(&torture_rwlock);
267 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
269 const unsigned long shortdelay_us = 10;
270 const unsigned long longdelay_ms = 100;
272 /* We want a short delay mostly to emulate likely code, and
273 * we want a long delay occasionally to force massive contention.
275 if (!(torture_random(trsp) %
276 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
277 mdelay(longdelay_ms);
279 udelay(shortdelay_us);
282 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
284 read_unlock(&torture_rwlock);
287 static struct lock_torture_ops rw_lock_ops = {
288 .writelock = torture_rwlock_write_lock,
289 .write_delay = torture_rwlock_write_delay,
290 .task_boost = torture_boost_dummy,
291 .writeunlock = torture_rwlock_write_unlock,
292 .readlock = torture_rwlock_read_lock,
293 .read_delay = torture_rwlock_read_delay,
294 .readunlock = torture_rwlock_read_unlock,
298 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
302 write_lock_irqsave(&torture_rwlock, flags);
303 cxt.cur_ops->flags = flags;
307 static void torture_rwlock_write_unlock_irq(void)
308 __releases(torture_rwlock)
310 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
313 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
317 read_lock_irqsave(&torture_rwlock, flags);
318 cxt.cur_ops->flags = flags;
322 static void torture_rwlock_read_unlock_irq(void)
323 __releases(torture_rwlock)
325 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
328 static struct lock_torture_ops rw_lock_irq_ops = {
329 .writelock = torture_rwlock_write_lock_irq,
330 .write_delay = torture_rwlock_write_delay,
331 .task_boost = torture_boost_dummy,
332 .writeunlock = torture_rwlock_write_unlock_irq,
333 .readlock = torture_rwlock_read_lock_irq,
334 .read_delay = torture_rwlock_read_delay,
335 .readunlock = torture_rwlock_read_unlock_irq,
336 .name = "rw_lock_irq"
339 static DEFINE_MUTEX(torture_mutex);
341 static int torture_mutex_lock(void) __acquires(torture_mutex)
343 mutex_lock(&torture_mutex);
347 static void torture_mutex_delay(struct torture_random_state *trsp)
349 const unsigned long longdelay_ms = 100;
351 /* We want a long delay occasionally to force massive contention. */
352 if (!(torture_random(trsp) %
353 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
354 mdelay(longdelay_ms * 5);
356 mdelay(longdelay_ms / 5);
357 #ifdef CONFIG_PREEMPT
358 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
359 preempt_schedule(); /* Allow test to be preempted. */
363 static void torture_mutex_unlock(void) __releases(torture_mutex)
365 mutex_unlock(&torture_mutex);
368 static struct lock_torture_ops mutex_lock_ops = {
369 .writelock = torture_mutex_lock,
370 .write_delay = torture_mutex_delay,
371 .task_boost = torture_boost_dummy,
372 .writeunlock = torture_mutex_unlock,
379 #ifdef CONFIG_RT_MUTEXES
380 static DEFINE_RT_MUTEX(torture_rtmutex);
382 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
384 rt_mutex_lock(&torture_rtmutex);
388 static void torture_rtmutex_boost(struct torture_random_state *trsp)
391 struct sched_param param;
392 const unsigned int factor = 50000; /* yes, quite arbitrary */
394 if (!rt_task(current)) {
396 * (1) Boost priority once every ~50k operations. When the
397 * task tries to take the lock, the rtmutex it will account
398 * for the new priority, and do any corresponding pi-dance.
400 if (!(torture_random(trsp) %
401 (cxt.nrealwriters_stress * factor))) {
403 param.sched_priority = MAX_RT_PRIO - 1;
404 } else /* common case, do nothing */
408 * The task will remain boosted for another ~500k operations,
409 * then restored back to its original prio, and so forth.
411 * When @trsp is nil, we want to force-reset the task for
412 * stopping the kthread.
414 if (!trsp || !(torture_random(trsp) %
415 (cxt.nrealwriters_stress * factor * 2))) {
416 policy = SCHED_NORMAL;
417 param.sched_priority = 0;
418 } else /* common case, do nothing */
422 sched_setscheduler_nocheck(current, policy, ¶m);
425 static void torture_rtmutex_delay(struct torture_random_state *trsp)
427 const unsigned long shortdelay_us = 2;
428 const unsigned long longdelay_ms = 100;
431 * We want a short delay mostly to emulate likely code, and
432 * we want a long delay occasionally to force massive contention.
434 if (!(torture_random(trsp) %
435 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
436 mdelay(longdelay_ms);
437 if (!(torture_random(trsp) %
438 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
439 udelay(shortdelay_us);
440 #ifdef CONFIG_PREEMPT
441 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
442 preempt_schedule(); /* Allow test to be preempted. */
446 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
448 rt_mutex_unlock(&torture_rtmutex);
451 static struct lock_torture_ops rtmutex_lock_ops = {
452 .writelock = torture_rtmutex_lock,
453 .write_delay = torture_rtmutex_delay,
454 .task_boost = torture_rtmutex_boost,
455 .writeunlock = torture_rtmutex_unlock,
459 .name = "rtmutex_lock"
463 static DECLARE_RWSEM(torture_rwsem);
464 static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
466 down_write(&torture_rwsem);
470 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
472 const unsigned long longdelay_ms = 100;
474 /* We want a long delay occasionally to force massive contention. */
475 if (!(torture_random(trsp) %
476 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
477 mdelay(longdelay_ms * 10);
479 mdelay(longdelay_ms / 10);
480 #ifdef CONFIG_PREEMPT
481 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
482 preempt_schedule(); /* Allow test to be preempted. */
486 static void torture_rwsem_up_write(void) __releases(torture_rwsem)
488 up_write(&torture_rwsem);
491 static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
493 down_read(&torture_rwsem);
497 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
499 const unsigned long longdelay_ms = 100;
501 /* We want a long delay occasionally to force massive contention. */
502 if (!(torture_random(trsp) %
503 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
504 mdelay(longdelay_ms * 2);
506 mdelay(longdelay_ms / 2);
507 #ifdef CONFIG_PREEMPT
508 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
509 preempt_schedule(); /* Allow test to be preempted. */
513 static void torture_rwsem_up_read(void) __releases(torture_rwsem)
515 up_read(&torture_rwsem);
518 static struct lock_torture_ops rwsem_lock_ops = {
519 .writelock = torture_rwsem_down_write,
520 .write_delay = torture_rwsem_write_delay,
521 .task_boost = torture_boost_dummy,
522 .writeunlock = torture_rwsem_up_write,
523 .readlock = torture_rwsem_down_read,
524 .read_delay = torture_rwsem_read_delay,
525 .readunlock = torture_rwsem_up_read,
529 #include <linux/percpu-rwsem.h>
530 static struct percpu_rw_semaphore pcpu_rwsem;
532 void torture_percpu_rwsem_init(void)
534 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
537 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
539 percpu_down_write(&pcpu_rwsem);
543 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
545 percpu_up_write(&pcpu_rwsem);
548 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
550 percpu_down_read(&pcpu_rwsem);
554 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
556 percpu_up_read(&pcpu_rwsem);
559 static struct lock_torture_ops percpu_rwsem_lock_ops = {
560 .init = torture_percpu_rwsem_init,
561 .writelock = torture_percpu_rwsem_down_write,
562 .write_delay = torture_rwsem_write_delay,
563 .task_boost = torture_boost_dummy,
564 .writeunlock = torture_percpu_rwsem_up_write,
565 .readlock = torture_percpu_rwsem_down_read,
566 .read_delay = torture_rwsem_read_delay,
567 .readunlock = torture_percpu_rwsem_up_read,
568 .name = "percpu_rwsem_lock"
572 * Lock torture writer kthread. Repeatedly acquires and releases
573 * the lock, checking for duplicate acquisitions.
575 static int lock_torture_writer(void *arg)
577 struct lock_stress_stats *lwsp = arg;
578 static DEFINE_TORTURE_RANDOM(rand);
580 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
581 set_user_nice(current, MAX_NICE);
584 if ((torture_random(&rand) & 0xfffff) == 0)
585 schedule_timeout_uninterruptible(1);
587 cxt.cur_ops->task_boost(&rand);
588 cxt.cur_ops->writelock();
589 if (WARN_ON_ONCE(lock_is_write_held))
591 lock_is_write_held = 1;
592 if (WARN_ON_ONCE(lock_is_read_held))
593 lwsp->n_lock_fail++; /* rare, but... */
595 lwsp->n_lock_acquired++;
596 cxt.cur_ops->write_delay(&rand);
597 lock_is_write_held = 0;
598 cxt.cur_ops->writeunlock();
600 stutter_wait("lock_torture_writer");
601 } while (!torture_must_stop());
603 cxt.cur_ops->task_boost(NULL); /* reset prio */
604 torture_kthread_stopping("lock_torture_writer");
609 * Lock torture reader kthread. Repeatedly acquires and releases
612 static int lock_torture_reader(void *arg)
614 struct lock_stress_stats *lrsp = arg;
615 static DEFINE_TORTURE_RANDOM(rand);
617 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
618 set_user_nice(current, MAX_NICE);
621 if ((torture_random(&rand) & 0xfffff) == 0)
622 schedule_timeout_uninterruptible(1);
624 cxt.cur_ops->readlock();
625 lock_is_read_held = 1;
626 if (WARN_ON_ONCE(lock_is_write_held))
627 lrsp->n_lock_fail++; /* rare, but... */
629 lrsp->n_lock_acquired++;
630 cxt.cur_ops->read_delay(&rand);
631 lock_is_read_held = 0;
632 cxt.cur_ops->readunlock();
634 stutter_wait("lock_torture_reader");
635 } while (!torture_must_stop());
636 torture_kthread_stopping("lock_torture_reader");
641 * Create an lock-torture-statistics message in the specified buffer.
643 static void __torture_print_stats(char *page,
644 struct lock_stress_stats *statp, bool write)
649 long min = statp[0].n_lock_acquired;
652 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
653 for (i = 0; i < n_stress; i++) {
654 if (statp[i].n_lock_fail)
656 sum += statp[i].n_lock_acquired;
657 if (max < statp[i].n_lock_fail)
658 max = statp[i].n_lock_fail;
659 if (min > statp[i].n_lock_fail)
660 min = statp[i].n_lock_fail;
662 page += sprintf(page,
663 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
664 write ? "Writes" : "Reads ",
665 sum, max, min, max / 2 > min ? "???" : "",
666 fail, fail ? "!!!" : "");
668 atomic_inc(&cxt.n_lock_torture_errors);
672 * Print torture statistics. Caller must ensure that there is only one
673 * call to this function at a given time!!! This is normally accomplished
674 * by relying on the module system to only have one copy of the module
675 * loaded, and then by giving the lock_torture_stats kthread full control
676 * (or the init/cleanup functions when lock_torture_stats thread is not
679 static void lock_torture_stats_print(void)
681 int size = cxt.nrealwriters_stress * 200 + 8192;
684 if (cxt.cur_ops->readlock)
685 size += cxt.nrealreaders_stress * 200 + 8192;
687 buf = kmalloc(size, GFP_KERNEL);
689 pr_err("lock_torture_stats_print: Out of memory, need: %d",
694 __torture_print_stats(buf, cxt.lwsa, true);
698 if (cxt.cur_ops->readlock) {
699 buf = kmalloc(size, GFP_KERNEL);
701 pr_err("lock_torture_stats_print: Out of memory, need: %d",
706 __torture_print_stats(buf, cxt.lrsa, false);
713 * Periodically prints torture statistics, if periodic statistics printing
714 * was specified via the stat_interval module parameter.
716 * No need to worry about fullstop here, since this one doesn't reference
717 * volatile state or register callbacks.
719 static int lock_torture_stats(void *arg)
721 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
723 schedule_timeout_interruptible(stat_interval * HZ);
724 lock_torture_stats_print();
725 torture_shutdown_absorb("lock_torture_stats");
726 } while (!torture_must_stop());
727 torture_kthread_stopping("lock_torture_stats");
732 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
735 pr_alert("%s" TORTURE_FLAG
736 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
737 torture_type, tag, cxt.debug_lock ? " [debug]": "",
738 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
739 verbose, shuffle_interval, stutter, shutdown_secs,
740 onoff_interval, onoff_holdoff);
743 static void lock_torture_cleanup(void)
747 if (torture_cleanup_begin())
751 for (i = 0; i < cxt.nrealwriters_stress; i++)
752 torture_stop_kthread(lock_torture_writer,
759 for (i = 0; i < cxt.nrealreaders_stress; i++)
760 torture_stop_kthread(lock_torture_reader,
766 torture_stop_kthread(lock_torture_stats, stats_task);
767 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
769 if (atomic_read(&cxt.n_lock_torture_errors))
770 lock_torture_print_module_parms(cxt.cur_ops,
771 "End of test: FAILURE");
772 else if (torture_onoff_failures())
773 lock_torture_print_module_parms(cxt.cur_ops,
774 "End of test: LOCK_HOTPLUG");
776 lock_torture_print_module_parms(cxt.cur_ops,
777 "End of test: SUCCESS");
778 torture_cleanup_end();
781 static int __init lock_torture_init(void)
785 static struct lock_torture_ops *torture_ops[] = {
787 &spin_lock_ops, &spin_lock_irq_ops,
788 &rw_lock_ops, &rw_lock_irq_ops,
790 #ifdef CONFIG_RT_MUTEXES
794 &percpu_rwsem_lock_ops,
797 if (!torture_init_begin(torture_type, verbose, &torture_runnable))
800 /* Process args and tell the world that the torturer is on the job. */
801 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
802 cxt.cur_ops = torture_ops[i];
803 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
806 if (i == ARRAY_SIZE(torture_ops)) {
807 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
809 pr_alert("lock-torture types:");
810 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
811 pr_alert(" %s", torture_ops[i]->name);
816 if (cxt.cur_ops->init)
819 if (nwriters_stress >= 0)
820 cxt.nrealwriters_stress = nwriters_stress;
822 cxt.nrealwriters_stress = 2 * num_online_cpus();
824 #ifdef CONFIG_DEBUG_MUTEXES
825 if (strncmp(torture_type, "mutex", 5) == 0)
826 cxt.debug_lock = true;
828 #ifdef CONFIG_DEBUG_RT_MUTEXES
829 if (strncmp(torture_type, "rtmutex", 7) == 0)
830 cxt.debug_lock = true;
832 #ifdef CONFIG_DEBUG_SPINLOCK
833 if ((strncmp(torture_type, "spin", 4) == 0) ||
834 (strncmp(torture_type, "rw_lock", 7) == 0))
835 cxt.debug_lock = true;
838 /* Initialize the statistics so that each run gets its own numbers. */
840 lock_is_write_held = 0;
841 cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
842 if (cxt.lwsa == NULL) {
843 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
847 for (i = 0; i < cxt.nrealwriters_stress; i++) {
848 cxt.lwsa[i].n_lock_fail = 0;
849 cxt.lwsa[i].n_lock_acquired = 0;
852 if (cxt.cur_ops->readlock) {
853 if (nreaders_stress >= 0)
854 cxt.nrealreaders_stress = nreaders_stress;
857 * By default distribute evenly the number of
858 * readers and writers. We still run the same number
859 * of threads as the writer-only locks default.
861 if (nwriters_stress < 0) /* user doesn't care */
862 cxt.nrealwriters_stress = num_online_cpus();
863 cxt.nrealreaders_stress = cxt.nrealwriters_stress;
866 lock_is_read_held = 0;
867 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
868 if (cxt.lrsa == NULL) {
869 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
875 for (i = 0; i < cxt.nrealreaders_stress; i++) {
876 cxt.lrsa[i].n_lock_fail = 0;
877 cxt.lrsa[i].n_lock_acquired = 0;
880 lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
882 /* Prepare torture context. */
883 if (onoff_interval > 0) {
884 firsterr = torture_onoff_init(onoff_holdoff * HZ,
885 onoff_interval * HZ);
889 if (shuffle_interval > 0) {
890 firsterr = torture_shuffle_init(shuffle_interval);
894 if (shutdown_secs > 0) {
895 firsterr = torture_shutdown_init(shutdown_secs,
896 lock_torture_cleanup);
901 firsterr = torture_stutter_init(stutter);
906 writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
908 if (writer_tasks == NULL) {
909 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
914 if (cxt.cur_ops->readlock) {
915 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
917 if (reader_tasks == NULL) {
918 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
925 * Create the kthreads and start torturing (oh, those poor little locks).
927 * TODO: Note that we interleave writers with readers, giving writers a
928 * slight advantage, by creating its kthread first. This can be modified
929 * for very specific needs, or even let the user choose the policy, if
932 for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
933 j < cxt.nrealreaders_stress; i++, j++) {
934 if (i >= cxt.nrealwriters_stress)
938 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
944 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
947 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
952 if (stat_interval > 0) {
953 firsterr = torture_create_kthread(lock_torture_stats, NULL,
963 lock_torture_cleanup();
967 module_init(lock_torture_init);
968 module_exit(lock_torture_cleanup);