Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / kernel / locking / locktorture.c
1 /*
2  * Module-based torture test facility for locking
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright (C) IBM Corporation, 2014
19  *
20  * Author: Paul E. McKenney <paulmck@us.ibm.com>
21  *      Based on kernel/rcu/torture.c.
22  */
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/kthread.h>
26 #include <linux/spinlock.h>
27 #include <linux/mutex.h>
28 #include <linux/rwsem.h>
29 #include <linux/smp.h>
30 #include <linux/interrupt.h>
31 #include <linux/sched.h>
32 #include <linux/atomic.h>
33 #include <linux/moduleparam.h>
34 #include <linux/delay.h>
35 #include <linux/slab.h>
36 #include <linux/torture.h>
37
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
40
41 torture_param(int, nwriters_stress, -1,
42              "Number of write-locking stress-test threads");
43 torture_param(int, nreaders_stress, -1,
44              "Number of read-locking stress-test threads");
45 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
46 torture_param(int, onoff_interval, 0,
47              "Time between CPU hotplugs (s), 0=disable");
48 torture_param(int, shuffle_interval, 3,
49              "Number of jiffies between shuffles, 0=disable");
50 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
51 torture_param(int, stat_interval, 60,
52              "Number of seconds between stats printk()s");
53 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
54 torture_param(bool, verbose, true,
55              "Enable verbose debugging printk()s");
56
57 static char *torture_type = "spin_lock";
58 module_param(torture_type, charp, 0444);
59 MODULE_PARM_DESC(torture_type,
60                  "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
61
62 static struct task_struct *stats_task;
63 static struct task_struct **writer_tasks;
64 static struct task_struct **reader_tasks;
65
66 static bool lock_is_write_held;
67 static bool lock_is_read_held;
68
69 struct lock_stress_stats {
70         long n_lock_fail;
71         long n_lock_acquired;
72 };
73
74 #if defined(MODULE)
75 #define LOCKTORTURE_RUNNABLE_INIT 1
76 #else
77 #define LOCKTORTURE_RUNNABLE_INIT 0
78 #endif
79 int torture_runnable = LOCKTORTURE_RUNNABLE_INIT;
80 module_param(torture_runnable, int, 0444);
81 MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
82
83 /* Forward reference. */
84 static void lock_torture_cleanup(void);
85
86 /*
87  * Operations vector for selecting different types of tests.
88  */
89 struct lock_torture_ops {
90         void (*init)(void);
91         int (*writelock)(void);
92         void (*write_delay)(struct torture_random_state *trsp);
93         void (*writeunlock)(void);
94         int (*readlock)(void);
95         void (*read_delay)(struct torture_random_state *trsp);
96         void (*readunlock)(void);
97         unsigned long flags;
98         const char *name;
99 };
100
101 struct lock_torture_cxt {
102         int nrealwriters_stress;
103         int nrealreaders_stress;
104         bool debug_lock;
105         atomic_t n_lock_torture_errors;
106         struct lock_torture_ops *cur_ops;
107         struct lock_stress_stats *lwsa; /* writer statistics */
108         struct lock_stress_stats *lrsa; /* reader statistics */
109 };
110 static struct lock_torture_cxt cxt = { 0, 0, false,
111                                        ATOMIC_INIT(0),
112                                        NULL, NULL};
113 /*
114  * Definitions for lock torture testing.
115  */
116
117 static int torture_lock_busted_write_lock(void)
118 {
119         return 0;  /* BUGGY, do not use in real life!!! */
120 }
121
122 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
123 {
124         const unsigned long longdelay_us = 100;
125
126         /* We want a long delay occasionally to force massive contention.  */
127         if (!(torture_random(trsp) %
128               (cxt.nrealwriters_stress * 2000 * longdelay_us)))
129                 mdelay(longdelay_us);
130 #ifdef CONFIG_PREEMPT
131         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
132                 preempt_schedule();  /* Allow test to be preempted. */
133 #endif
134 }
135
136 static void torture_lock_busted_write_unlock(void)
137 {
138           /* BUGGY, do not use in real life!!! */
139 }
140
141 static struct lock_torture_ops lock_busted_ops = {
142         .writelock      = torture_lock_busted_write_lock,
143         .write_delay    = torture_lock_busted_write_delay,
144         .writeunlock    = torture_lock_busted_write_unlock,
145         .readlock       = NULL,
146         .read_delay     = NULL,
147         .readunlock     = NULL,
148         .name           = "lock_busted"
149 };
150
151 static DEFINE_SPINLOCK(torture_spinlock);
152
153 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
154 {
155         spin_lock(&torture_spinlock);
156         return 0;
157 }
158
159 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
160 {
161         const unsigned long shortdelay_us = 2;
162         const unsigned long longdelay_us = 100;
163
164         /* We want a short delay mostly to emulate likely code, and
165          * we want a long delay occasionally to force massive contention.
166          */
167         if (!(torture_random(trsp) %
168               (cxt.nrealwriters_stress * 2000 * longdelay_us)))
169                 mdelay(longdelay_us);
170         if (!(torture_random(trsp) %
171               (cxt.nrealwriters_stress * 2 * shortdelay_us)))
172                 udelay(shortdelay_us);
173 #ifdef CONFIG_PREEMPT
174         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
175                 preempt_schedule();  /* Allow test to be preempted. */
176 #endif
177 }
178
179 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
180 {
181         spin_unlock(&torture_spinlock);
182 }
183
184 static struct lock_torture_ops spin_lock_ops = {
185         .writelock      = torture_spin_lock_write_lock,
186         .write_delay    = torture_spin_lock_write_delay,
187         .writeunlock    = torture_spin_lock_write_unlock,
188         .readlock       = NULL,
189         .read_delay     = NULL,
190         .readunlock     = NULL,
191         .name           = "spin_lock"
192 };
193
194 static int torture_spin_lock_write_lock_irq(void)
195 __acquires(torture_spinlock)
196 {
197         unsigned long flags;
198
199         spin_lock_irqsave(&torture_spinlock, flags);
200         cxt.cur_ops->flags = flags;
201         return 0;
202 }
203
204 static void torture_lock_spin_write_unlock_irq(void)
205 __releases(torture_spinlock)
206 {
207         spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
208 }
209
210 static struct lock_torture_ops spin_lock_irq_ops = {
211         .writelock      = torture_spin_lock_write_lock_irq,
212         .write_delay    = torture_spin_lock_write_delay,
213         .writeunlock    = torture_lock_spin_write_unlock_irq,
214         .readlock       = NULL,
215         .read_delay     = NULL,
216         .readunlock     = NULL,
217         .name           = "spin_lock_irq"
218 };
219
220 static DEFINE_RWLOCK(torture_rwlock);
221
222 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
223 {
224         write_lock(&torture_rwlock);
225         return 0;
226 }
227
228 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
229 {
230         const unsigned long shortdelay_us = 2;
231         const unsigned long longdelay_ms = 100;
232
233         /* We want a short delay mostly to emulate likely code, and
234          * we want a long delay occasionally to force massive contention.
235          */
236         if (!(torture_random(trsp) %
237               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
238                 mdelay(longdelay_ms);
239         else
240                 udelay(shortdelay_us);
241 }
242
243 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
244 {
245         write_unlock(&torture_rwlock);
246 }
247
248 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
249 {
250         read_lock(&torture_rwlock);
251         return 0;
252 }
253
254 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
255 {
256         const unsigned long shortdelay_us = 10;
257         const unsigned long longdelay_ms = 100;
258
259         /* We want a short delay mostly to emulate likely code, and
260          * we want a long delay occasionally to force massive contention.
261          */
262         if (!(torture_random(trsp) %
263               (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
264                 mdelay(longdelay_ms);
265         else
266                 udelay(shortdelay_us);
267 }
268
269 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
270 {
271         read_unlock(&torture_rwlock);
272 }
273
274 static struct lock_torture_ops rw_lock_ops = {
275         .writelock      = torture_rwlock_write_lock,
276         .write_delay    = torture_rwlock_write_delay,
277         .writeunlock    = torture_rwlock_write_unlock,
278         .readlock       = torture_rwlock_read_lock,
279         .read_delay     = torture_rwlock_read_delay,
280         .readunlock     = torture_rwlock_read_unlock,
281         .name           = "rw_lock"
282 };
283
284 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
285 {
286         unsigned long flags;
287
288         write_lock_irqsave(&torture_rwlock, flags);
289         cxt.cur_ops->flags = flags;
290         return 0;
291 }
292
293 static void torture_rwlock_write_unlock_irq(void)
294 __releases(torture_rwlock)
295 {
296         write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
297 }
298
299 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
300 {
301         unsigned long flags;
302
303         read_lock_irqsave(&torture_rwlock, flags);
304         cxt.cur_ops->flags = flags;
305         return 0;
306 }
307
308 static void torture_rwlock_read_unlock_irq(void)
309 __releases(torture_rwlock)
310 {
311         write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
312 }
313
314 static struct lock_torture_ops rw_lock_irq_ops = {
315         .writelock      = torture_rwlock_write_lock_irq,
316         .write_delay    = torture_rwlock_write_delay,
317         .writeunlock    = torture_rwlock_write_unlock_irq,
318         .readlock       = torture_rwlock_read_lock_irq,
319         .read_delay     = torture_rwlock_read_delay,
320         .readunlock     = torture_rwlock_read_unlock_irq,
321         .name           = "rw_lock_irq"
322 };
323
324 static DEFINE_MUTEX(torture_mutex);
325
326 static int torture_mutex_lock(void) __acquires(torture_mutex)
327 {
328         mutex_lock(&torture_mutex);
329         return 0;
330 }
331
332 static void torture_mutex_delay(struct torture_random_state *trsp)
333 {
334         const unsigned long longdelay_ms = 100;
335
336         /* We want a long delay occasionally to force massive contention.  */
337         if (!(torture_random(trsp) %
338               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
339                 mdelay(longdelay_ms * 5);
340         else
341                 mdelay(longdelay_ms / 5);
342 #ifdef CONFIG_PREEMPT
343         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
344                 preempt_schedule();  /* Allow test to be preempted. */
345 #endif
346 }
347
348 static void torture_mutex_unlock(void) __releases(torture_mutex)
349 {
350         mutex_unlock(&torture_mutex);
351 }
352
353 static struct lock_torture_ops mutex_lock_ops = {
354         .writelock      = torture_mutex_lock,
355         .write_delay    = torture_mutex_delay,
356         .writeunlock    = torture_mutex_unlock,
357         .readlock       = NULL,
358         .read_delay     = NULL,
359         .readunlock     = NULL,
360         .name           = "mutex_lock"
361 };
362
363 static DECLARE_RWSEM(torture_rwsem);
364 static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
365 {
366         down_write(&torture_rwsem);
367         return 0;
368 }
369
370 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
371 {
372         const unsigned long longdelay_ms = 100;
373
374         /* We want a long delay occasionally to force massive contention.  */
375         if (!(torture_random(trsp) %
376               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
377                 mdelay(longdelay_ms * 10);
378         else
379                 mdelay(longdelay_ms / 10);
380 #ifdef CONFIG_PREEMPT
381         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
382                 preempt_schedule();  /* Allow test to be preempted. */
383 #endif
384 }
385
386 static void torture_rwsem_up_write(void) __releases(torture_rwsem)
387 {
388         up_write(&torture_rwsem);
389 }
390
391 static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
392 {
393         down_read(&torture_rwsem);
394         return 0;
395 }
396
397 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
398 {
399         const unsigned long longdelay_ms = 100;
400
401         /* We want a long delay occasionally to force massive contention.  */
402         if (!(torture_random(trsp) %
403               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
404                 mdelay(longdelay_ms * 2);
405         else
406                 mdelay(longdelay_ms / 2);
407 #ifdef CONFIG_PREEMPT
408         if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
409                 preempt_schedule();  /* Allow test to be preempted. */
410 #endif
411 }
412
413 static void torture_rwsem_up_read(void) __releases(torture_rwsem)
414 {
415         up_read(&torture_rwsem);
416 }
417
418 static struct lock_torture_ops rwsem_lock_ops = {
419         .writelock      = torture_rwsem_down_write,
420         .write_delay    = torture_rwsem_write_delay,
421         .writeunlock    = torture_rwsem_up_write,
422         .readlock       = torture_rwsem_down_read,
423         .read_delay     = torture_rwsem_read_delay,
424         .readunlock     = torture_rwsem_up_read,
425         .name           = "rwsem_lock"
426 };
427
428 /*
429  * Lock torture writer kthread.  Repeatedly acquires and releases
430  * the lock, checking for duplicate acquisitions.
431  */
432 static int lock_torture_writer(void *arg)
433 {
434         struct lock_stress_stats *lwsp = arg;
435         static DEFINE_TORTURE_RANDOM(rand);
436
437         VERBOSE_TOROUT_STRING("lock_torture_writer task started");
438         set_user_nice(current, MAX_NICE);
439
440         do {
441                 if ((torture_random(&rand) & 0xfffff) == 0)
442                         schedule_timeout_uninterruptible(1);
443
444                 cxt.cur_ops->writelock();
445                 if (WARN_ON_ONCE(lock_is_write_held))
446                         lwsp->n_lock_fail++;
447                 lock_is_write_held = 1;
448                 if (WARN_ON_ONCE(lock_is_read_held))
449                         lwsp->n_lock_fail++; /* rare, but... */
450
451                 lwsp->n_lock_acquired++;
452                 cxt.cur_ops->write_delay(&rand);
453                 lock_is_write_held = 0;
454                 cxt.cur_ops->writeunlock();
455
456                 stutter_wait("lock_torture_writer");
457         } while (!torture_must_stop());
458         torture_kthread_stopping("lock_torture_writer");
459         return 0;
460 }
461
462 /*
463  * Lock torture reader kthread.  Repeatedly acquires and releases
464  * the reader lock.
465  */
466 static int lock_torture_reader(void *arg)
467 {
468         struct lock_stress_stats *lrsp = arg;
469         static DEFINE_TORTURE_RANDOM(rand);
470
471         VERBOSE_TOROUT_STRING("lock_torture_reader task started");
472         set_user_nice(current, MAX_NICE);
473
474         do {
475                 if ((torture_random(&rand) & 0xfffff) == 0)
476                         schedule_timeout_uninterruptible(1);
477
478                 cxt.cur_ops->readlock();
479                 lock_is_read_held = 1;
480                 if (WARN_ON_ONCE(lock_is_write_held))
481                         lrsp->n_lock_fail++; /* rare, but... */
482
483                 lrsp->n_lock_acquired++;
484                 cxt.cur_ops->read_delay(&rand);
485                 lock_is_read_held = 0;
486                 cxt.cur_ops->readunlock();
487
488                 stutter_wait("lock_torture_reader");
489         } while (!torture_must_stop());
490         torture_kthread_stopping("lock_torture_reader");
491         return 0;
492 }
493
494 /*
495  * Create an lock-torture-statistics message in the specified buffer.
496  */
497 static void __torture_print_stats(char *page,
498                                   struct lock_stress_stats *statp, bool write)
499 {
500         bool fail = 0;
501         int i, n_stress;
502         long max = 0;
503         long min = statp[0].n_lock_acquired;
504         long long sum = 0;
505
506         n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
507         for (i = 0; i < n_stress; i++) {
508                 if (statp[i].n_lock_fail)
509                         fail = true;
510                 sum += statp[i].n_lock_acquired;
511                 if (max < statp[i].n_lock_fail)
512                         max = statp[i].n_lock_fail;
513                 if (min > statp[i].n_lock_fail)
514                         min = statp[i].n_lock_fail;
515         }
516         page += sprintf(page,
517                         "%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
518                         write ? "Writes" : "Reads ",
519                         sum, max, min, max / 2 > min ? "???" : "",
520                         fail, fail ? "!!!" : "");
521         if (fail)
522                 atomic_inc(&cxt.n_lock_torture_errors);
523 }
524
525 /*
526  * Print torture statistics.  Caller must ensure that there is only one
527  * call to this function at a given time!!!  This is normally accomplished
528  * by relying on the module system to only have one copy of the module
529  * loaded, and then by giving the lock_torture_stats kthread full control
530  * (or the init/cleanup functions when lock_torture_stats thread is not
531  * running).
532  */
533 static void lock_torture_stats_print(void)
534 {
535         int size = cxt.nrealwriters_stress * 200 + 8192;
536         char *buf;
537
538         if (cxt.cur_ops->readlock)
539                 size += cxt.nrealreaders_stress * 200 + 8192;
540
541         buf = kmalloc(size, GFP_KERNEL);
542         if (!buf) {
543                 pr_err("lock_torture_stats_print: Out of memory, need: %d",
544                        size);
545                 return;
546         }
547
548         __torture_print_stats(buf, cxt.lwsa, true);
549         pr_alert("%s", buf);
550         kfree(buf);
551
552         if (cxt.cur_ops->readlock) {
553                 buf = kmalloc(size, GFP_KERNEL);
554                 if (!buf) {
555                         pr_err("lock_torture_stats_print: Out of memory, need: %d",
556                                size);
557                         return;
558                 }
559
560                 __torture_print_stats(buf, cxt.lrsa, false);
561                 pr_alert("%s", buf);
562                 kfree(buf);
563         }
564 }
565
566 /*
567  * Periodically prints torture statistics, if periodic statistics printing
568  * was specified via the stat_interval module parameter.
569  *
570  * No need to worry about fullstop here, since this one doesn't reference
571  * volatile state or register callbacks.
572  */
573 static int lock_torture_stats(void *arg)
574 {
575         VERBOSE_TOROUT_STRING("lock_torture_stats task started");
576         do {
577                 schedule_timeout_interruptible(stat_interval * HZ);
578                 lock_torture_stats_print();
579                 torture_shutdown_absorb("lock_torture_stats");
580         } while (!torture_must_stop());
581         torture_kthread_stopping("lock_torture_stats");
582         return 0;
583 }
584
585 static inline void
586 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
587                                 const char *tag)
588 {
589         pr_alert("%s" TORTURE_FLAG
590                  "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
591                  torture_type, tag, cxt.debug_lock ? " [debug]": "",
592                  cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
593                  verbose, shuffle_interval, stutter, shutdown_secs,
594                  onoff_interval, onoff_holdoff);
595 }
596
597 static void lock_torture_cleanup(void)
598 {
599         int i;
600
601         if (torture_cleanup_begin())
602                 return;
603
604         if (writer_tasks) {
605                 for (i = 0; i < cxt.nrealwriters_stress; i++)
606                         torture_stop_kthread(lock_torture_writer,
607                                              writer_tasks[i]);
608                 kfree(writer_tasks);
609                 writer_tasks = NULL;
610         }
611
612         if (reader_tasks) {
613                 for (i = 0; i < cxt.nrealreaders_stress; i++)
614                         torture_stop_kthread(lock_torture_reader,
615                                              reader_tasks[i]);
616                 kfree(reader_tasks);
617                 reader_tasks = NULL;
618         }
619
620         torture_stop_kthread(lock_torture_stats, stats_task);
621         lock_torture_stats_print();  /* -After- the stats thread is stopped! */
622
623         if (atomic_read(&cxt.n_lock_torture_errors))
624                 lock_torture_print_module_parms(cxt.cur_ops,
625                                                 "End of test: FAILURE");
626         else if (torture_onoff_failures())
627                 lock_torture_print_module_parms(cxt.cur_ops,
628                                                 "End of test: LOCK_HOTPLUG");
629         else
630                 lock_torture_print_module_parms(cxt.cur_ops,
631                                                 "End of test: SUCCESS");
632         torture_cleanup_end();
633 }
634
635 static int __init lock_torture_init(void)
636 {
637         int i, j;
638         int firsterr = 0;
639         static struct lock_torture_ops *torture_ops[] = {
640                 &lock_busted_ops,
641                 &spin_lock_ops, &spin_lock_irq_ops,
642                 &rw_lock_ops, &rw_lock_irq_ops,
643                 &mutex_lock_ops,
644                 &rwsem_lock_ops,
645         };
646
647         if (!torture_init_begin(torture_type, verbose, &torture_runnable))
648                 return -EBUSY;
649
650         /* Process args and tell the world that the torturer is on the job. */
651         for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
652                 cxt.cur_ops = torture_ops[i];
653                 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
654                         break;
655         }
656         if (i == ARRAY_SIZE(torture_ops)) {
657                 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
658                          torture_type);
659                 pr_alert("lock-torture types:");
660                 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
661                         pr_alert(" %s", torture_ops[i]->name);
662                 pr_alert("\n");
663                 torture_init_end();
664                 return -EINVAL;
665         }
666         if (cxt.cur_ops->init)
667                 cxt.cur_ops->init(); /* no "goto unwind" prior to this point!!! */
668
669         if (nwriters_stress >= 0)
670                 cxt.nrealwriters_stress = nwriters_stress;
671         else
672                 cxt.nrealwriters_stress = 2 * num_online_cpus();
673
674 #ifdef CONFIG_DEBUG_MUTEXES
675         if (strncmp(torture_type, "mutex", 5) == 0)
676                 cxt.debug_lock = true;
677 #endif
678 #ifdef CONFIG_DEBUG_SPINLOCK
679         if ((strncmp(torture_type, "spin", 4) == 0) ||
680             (strncmp(torture_type, "rw_lock", 7) == 0))
681                 cxt.debug_lock = true;
682 #endif
683
684         /* Initialize the statistics so that each run gets its own numbers. */
685
686         lock_is_write_held = 0;
687         cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
688         if (cxt.lwsa == NULL) {
689                 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
690                 firsterr = -ENOMEM;
691                 goto unwind;
692         }
693         for (i = 0; i < cxt.nrealwriters_stress; i++) {
694                 cxt.lwsa[i].n_lock_fail = 0;
695                 cxt.lwsa[i].n_lock_acquired = 0;
696         }
697
698         if (cxt.cur_ops->readlock) {
699                 if (nreaders_stress >= 0)
700                         cxt.nrealreaders_stress = nreaders_stress;
701                 else {
702                         /*
703                          * By default distribute evenly the number of
704                          * readers and writers. We still run the same number
705                          * of threads as the writer-only locks default.
706                          */
707                         if (nwriters_stress < 0) /* user doesn't care */
708                                 cxt.nrealwriters_stress = num_online_cpus();
709                         cxt.nrealreaders_stress = cxt.nrealwriters_stress;
710                 }
711
712                 lock_is_read_held = 0;
713                 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
714                 if (cxt.lrsa == NULL) {
715                         VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
716                         firsterr = -ENOMEM;
717                         kfree(cxt.lwsa);
718                         goto unwind;
719                 }
720
721                 for (i = 0; i < cxt.nrealreaders_stress; i++) {
722                         cxt.lrsa[i].n_lock_fail = 0;
723                         cxt.lrsa[i].n_lock_acquired = 0;
724                 }
725         }
726         lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
727
728         /* Prepare torture context. */
729         if (onoff_interval > 0) {
730                 firsterr = torture_onoff_init(onoff_holdoff * HZ,
731                                               onoff_interval * HZ);
732                 if (firsterr)
733                         goto unwind;
734         }
735         if (shuffle_interval > 0) {
736                 firsterr = torture_shuffle_init(shuffle_interval);
737                 if (firsterr)
738                         goto unwind;
739         }
740         if (shutdown_secs > 0) {
741                 firsterr = torture_shutdown_init(shutdown_secs,
742                                                  lock_torture_cleanup);
743                 if (firsterr)
744                         goto unwind;
745         }
746         if (stutter > 0) {
747                 firsterr = torture_stutter_init(stutter);
748                 if (firsterr)
749                         goto unwind;
750         }
751
752         writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
753                                GFP_KERNEL);
754         if (writer_tasks == NULL) {
755                 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
756                 firsterr = -ENOMEM;
757                 goto unwind;
758         }
759
760         if (cxt.cur_ops->readlock) {
761                 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
762                                        GFP_KERNEL);
763                 if (reader_tasks == NULL) {
764                         VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
765                         firsterr = -ENOMEM;
766                         goto unwind;
767                 }
768         }
769
770         /*
771          * Create the kthreads and start torturing (oh, those poor little locks).
772          *
773          * TODO: Note that we interleave writers with readers, giving writers a
774          * slight advantage, by creating its kthread first. This can be modified
775          * for very specific needs, or even let the user choose the policy, if
776          * ever wanted.
777          */
778         for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
779                     j < cxt.nrealreaders_stress; i++, j++) {
780                 if (i >= cxt.nrealwriters_stress)
781                         goto create_reader;
782
783                 /* Create writer. */
784                 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
785                                                   writer_tasks[i]);
786                 if (firsterr)
787                         goto unwind;
788
789         create_reader:
790                 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
791                         continue;
792                 /* Create reader. */
793                 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
794                                                   reader_tasks[j]);
795                 if (firsterr)
796                         goto unwind;
797         }
798         if (stat_interval > 0) {
799                 firsterr = torture_create_kthread(lock_torture_stats, NULL,
800                                                   stats_task);
801                 if (firsterr)
802                         goto unwind;
803         }
804         torture_init_end();
805         return 0;
806
807 unwind:
808         torture_init_end();
809         lock_torture_cleanup();
810         return firsterr;
811 }
812
813 module_init(lock_torture_init);
814 module_exit(lock_torture_cleanup);