These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / locking / locktorture.c
1 /*
2  * Module-based torture test facility for locking
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright (C) IBM Corporation, 2014
19  *
20  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21  *          Davidlohr Bueso <dave@stgolabs.net>
22  *      Based on kernel/rcu/torture.c.
23  */
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/kthread.h>
27 #include <linux/sched/rt.h>
28 #include <linux/spinlock.h>
29 #include <linux/mutex.h>
30 #include <linux/rwsem.h>
31 #include <linux/smp.h>
32 #include <linux/interrupt.h>
33 #include <linux/sched.h>
34 #include <linux/atomic.h>
35 #include <linux/moduleparam.h>
36 #include <linux/delay.h>
37 #include <linux/slab.h>
38 #include <linux/percpu-rwsem.h>
39 #include <linux/torture.h>
40
41 MODULE_LICENSE("GPL");
42 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
43
44 torture_param(int, nwriters_stress, -1,
45              "Number of write-locking stress-test threads");
46 torture_param(int, nreaders_stress, -1,
47              "Number of read-locking stress-test threads");
48 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
49 torture_param(int, onoff_interval, 0,
50              "Time between CPU hotplugs (s), 0=disable");
51 torture_param(int, shuffle_interval, 3,
52              "Number of jiffies between shuffles, 0=disable");
53 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
54 torture_param(int, stat_interval, 60,
55              "Number of seconds between stats printk()s");
56 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
57 torture_param(bool, verbose, true,
58              "Enable verbose debugging printk()s");
59
60 static char *torture_type = "spin_lock";
61 module_param(torture_type, charp, 0444);
62 MODULE_PARM_DESC(torture_type,
63                  "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
64
65 static struct task_struct *stats_task;
66 static struct task_struct **writer_tasks;
67 static struct task_struct **reader_tasks;
68
69 static bool lock_is_write_held;
70 static bool lock_is_read_held;
71
72 struct lock_stress_stats {
73         long n_lock_fail;
74         long n_lock_acquired;
75 };
76
77 #if defined(MODULE)
78 #define LOCKTORTURE_RUNNABLE_INIT 1
79 #else
80 #define LOCKTORTURE_RUNNABLE_INIT 0
81 #endif
82 int torture_runnable = LOCKTORTURE_RUNNABLE_INIT;
83 module_param(torture_runnable, int, 0444);
84 MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
85
86 /* Forward reference. */
87 static void lock_torture_cleanup(void);
88
89 /*
90  * Operations vector for selecting different types of tests.
91  */
92 struct lock_torture_ops {
93         void (*init)(void);
94         int (*writelock)(void);
95         void (*write_delay)(struct torture_random_state *trsp);
96         void (*task_boost)(struct torture_random_state *trsp);
97         void (*writeunlock)(void);
98         int (*readlock)(void);
99         void (*read_delay)(struct torture_random_state *trsp);
100         void (*readunlock)(void);
101
102         unsigned long flags; /* for irq spinlocks */
103         const char *name;
104 };
105
106 struct lock_torture_cxt {
107         int nrealwriters_stress;
108         int nrealreaders_stress;
109         bool debug_lock;
110         atomic_t n_lock_torture_errors;
111         struct lock_torture_ops *cur_ops;
112         struct lock_stress_stats *lwsa; /* writer statistics */
113         struct lock_stress_stats *lrsa; /* reader statistics */
114 };
115 static struct lock_torture_cxt cxt = { 0, 0, false,
116                                        ATOMIC_INIT(0),
117                                        NULL, NULL};
118 /*
119  * Definitions for lock torture testing.
120  */
121
122 static int torture_lock_busted_write_lock(void)
123 {
124         return 0;  /* BUGGY, do not use in real life!!! */
125 }
126
127 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
128 {
129         const unsigned long longdelay_ms = 100;
130
131         /* We want a long delay occasionally to force massive contention.  */
132         if (!(torture_random(trsp) %
133               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
134                 mdelay(longdelay_ms);
135 #ifdef CONFIG_PREEMPT
136         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
137                 preempt_schedule();  /* Allow test to be preempted. */
138 #endif
139 }
140
141 static void torture_lock_busted_write_unlock(void)
142 {
143           /* BUGGY, do not use in real life!!! */
144 }
145
146 static void torture_boost_dummy(struct torture_random_state *trsp)
147 {
148         /* Only rtmutexes care about priority */
149 }
150
151 static struct lock_torture_ops lock_busted_ops = {
152         .writelock      = torture_lock_busted_write_lock,
153         .write_delay    = torture_lock_busted_write_delay,
154         .task_boost     = torture_boost_dummy,
155         .writeunlock    = torture_lock_busted_write_unlock,
156         .readlock       = NULL,
157         .read_delay     = NULL,
158         .readunlock     = NULL,
159         .name           = "lock_busted"
160 };
161
162 static DEFINE_SPINLOCK(torture_spinlock);
163
164 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
165 {
166         spin_lock(&torture_spinlock);
167         return 0;
168 }
169
170 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
171 {
172         const unsigned long shortdelay_us = 2;
173         const unsigned long longdelay_ms = 100;
174
175         /* We want a short delay mostly to emulate likely code, and
176          * we want a long delay occasionally to force massive contention.
177          */
178         if (!(torture_random(trsp) %
179               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
180                 mdelay(longdelay_ms);
181         if (!(torture_random(trsp) %
182               (cxt.nrealwriters_stress * 2 * shortdelay_us)))
183                 udelay(shortdelay_us);
184 #ifdef CONFIG_PREEMPT
185         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
186                 preempt_schedule();  /* Allow test to be preempted. */
187 #endif
188 }
189
190 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
191 {
192         spin_unlock(&torture_spinlock);
193 }
194
195 static struct lock_torture_ops spin_lock_ops = {
196         .writelock      = torture_spin_lock_write_lock,
197         .write_delay    = torture_spin_lock_write_delay,
198         .task_boost     = torture_boost_dummy,
199         .writeunlock    = torture_spin_lock_write_unlock,
200         .readlock       = NULL,
201         .read_delay     = NULL,
202         .readunlock     = NULL,
203         .name           = "spin_lock"
204 };
205
206 static int torture_spin_lock_write_lock_irq(void)
207 __acquires(torture_spinlock)
208 {
209         unsigned long flags;
210
211         spin_lock_irqsave(&torture_spinlock, flags);
212         cxt.cur_ops->flags = flags;
213         return 0;
214 }
215
216 static void torture_lock_spin_write_unlock_irq(void)
217 __releases(torture_spinlock)
218 {
219         spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
220 }
221
222 static struct lock_torture_ops spin_lock_irq_ops = {
223         .writelock      = torture_spin_lock_write_lock_irq,
224         .write_delay    = torture_spin_lock_write_delay,
225         .task_boost     = torture_boost_dummy,
226         .writeunlock    = torture_lock_spin_write_unlock_irq,
227         .readlock       = NULL,
228         .read_delay     = NULL,
229         .readunlock     = NULL,
230         .name           = "spin_lock_irq"
231 };
232
233 static DEFINE_RWLOCK(torture_rwlock);
234
235 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
236 {
237         write_lock(&torture_rwlock);
238         return 0;
239 }
240
241 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
242 {
243         const unsigned long shortdelay_us = 2;
244         const unsigned long longdelay_ms = 100;
245
246         /* We want a short delay mostly to emulate likely code, and
247          * we want a long delay occasionally to force massive contention.
248          */
249         if (!(torture_random(trsp) %
250               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
251                 mdelay(longdelay_ms);
252         else
253                 udelay(shortdelay_us);
254 }
255
256 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
257 {
258         write_unlock(&torture_rwlock);
259 }
260
261 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
262 {
263         read_lock(&torture_rwlock);
264         return 0;
265 }
266
267 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
268 {
269         const unsigned long shortdelay_us = 10;
270         const unsigned long longdelay_ms = 100;
271
272         /* We want a short delay mostly to emulate likely code, and
273          * we want a long delay occasionally to force massive contention.
274          */
275         if (!(torture_random(trsp) %
276               (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
277                 mdelay(longdelay_ms);
278         else
279                 udelay(shortdelay_us);
280 }
281
282 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
283 {
284         read_unlock(&torture_rwlock);
285 }
286
287 static struct lock_torture_ops rw_lock_ops = {
288         .writelock      = torture_rwlock_write_lock,
289         .write_delay    = torture_rwlock_write_delay,
290         .task_boost     = torture_boost_dummy,
291         .writeunlock    = torture_rwlock_write_unlock,
292         .readlock       = torture_rwlock_read_lock,
293         .read_delay     = torture_rwlock_read_delay,
294         .readunlock     = torture_rwlock_read_unlock,
295         .name           = "rw_lock"
296 };
297
298 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
299 {
300         unsigned long flags;
301
302         write_lock_irqsave(&torture_rwlock, flags);
303         cxt.cur_ops->flags = flags;
304         return 0;
305 }
306
307 static void torture_rwlock_write_unlock_irq(void)
308 __releases(torture_rwlock)
309 {
310         write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
311 }
312
313 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
314 {
315         unsigned long flags;
316
317         read_lock_irqsave(&torture_rwlock, flags);
318         cxt.cur_ops->flags = flags;
319         return 0;
320 }
321
322 static void torture_rwlock_read_unlock_irq(void)
323 __releases(torture_rwlock)
324 {
325         read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
326 }
327
328 static struct lock_torture_ops rw_lock_irq_ops = {
329         .writelock      = torture_rwlock_write_lock_irq,
330         .write_delay    = torture_rwlock_write_delay,
331         .task_boost     = torture_boost_dummy,
332         .writeunlock    = torture_rwlock_write_unlock_irq,
333         .readlock       = torture_rwlock_read_lock_irq,
334         .read_delay     = torture_rwlock_read_delay,
335         .readunlock     = torture_rwlock_read_unlock_irq,
336         .name           = "rw_lock_irq"
337 };
338
339 static DEFINE_MUTEX(torture_mutex);
340
341 static int torture_mutex_lock(void) __acquires(torture_mutex)
342 {
343         mutex_lock(&torture_mutex);
344         return 0;
345 }
346
347 static void torture_mutex_delay(struct torture_random_state *trsp)
348 {
349         const unsigned long longdelay_ms = 100;
350
351         /* We want a long delay occasionally to force massive contention.  */
352         if (!(torture_random(trsp) %
353               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
354                 mdelay(longdelay_ms * 5);
355         else
356                 mdelay(longdelay_ms / 5);
357 #ifdef CONFIG_PREEMPT
358         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
359                 preempt_schedule();  /* Allow test to be preempted. */
360 #endif
361 }
362
363 static void torture_mutex_unlock(void) __releases(torture_mutex)
364 {
365         mutex_unlock(&torture_mutex);
366 }
367
368 static struct lock_torture_ops mutex_lock_ops = {
369         .writelock      = torture_mutex_lock,
370         .write_delay    = torture_mutex_delay,
371         .task_boost     = torture_boost_dummy,
372         .writeunlock    = torture_mutex_unlock,
373         .readlock       = NULL,
374         .read_delay     = NULL,
375         .readunlock     = NULL,
376         .name           = "mutex_lock"
377 };
378
379 #ifdef CONFIG_RT_MUTEXES
380 static DEFINE_RT_MUTEX(torture_rtmutex);
381
382 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
383 {
384         rt_mutex_lock(&torture_rtmutex);
385         return 0;
386 }
387
388 static void torture_rtmutex_boost(struct torture_random_state *trsp)
389 {
390         int policy;
391         struct sched_param param;
392         const unsigned int factor = 50000; /* yes, quite arbitrary */
393
394         if (!rt_task(current)) {
395                 /*
396                  * (1) Boost priority once every ~50k operations. When the
397                  * task tries to take the lock, the rtmutex it will account
398                  * for the new priority, and do any corresponding pi-dance.
399                  */
400                 if (!(torture_random(trsp) %
401                       (cxt.nrealwriters_stress * factor))) {
402                         policy = SCHED_FIFO;
403                         param.sched_priority = MAX_RT_PRIO - 1;
404                 } else /* common case, do nothing */
405                         return;
406         } else {
407                 /*
408                  * The task will remain boosted for another ~500k operations,
409                  * then restored back to its original prio, and so forth.
410                  *
411                  * When @trsp is nil, we want to force-reset the task for
412                  * stopping the kthread.
413                  */
414                 if (!trsp || !(torture_random(trsp) %
415                                (cxt.nrealwriters_stress * factor * 2))) {
416                         policy = SCHED_NORMAL;
417                         param.sched_priority = 0;
418                 } else /* common case, do nothing */
419                         return;
420         }
421
422         sched_setscheduler_nocheck(current, policy, &param);
423 }
424
425 static void torture_rtmutex_delay(struct torture_random_state *trsp)
426 {
427         const unsigned long shortdelay_us = 2;
428         const unsigned long longdelay_ms = 100;
429
430         /*
431          * We want a short delay mostly to emulate likely code, and
432          * we want a long delay occasionally to force massive contention.
433          */
434         if (!(torture_random(trsp) %
435               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
436                 mdelay(longdelay_ms);
437         if (!(torture_random(trsp) %
438               (cxt.nrealwriters_stress * 2 * shortdelay_us)))
439                 udelay(shortdelay_us);
440 #ifdef CONFIG_PREEMPT
441         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
442                 preempt_schedule();  /* Allow test to be preempted. */
443 #endif
444 }
445
446 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
447 {
448         rt_mutex_unlock(&torture_rtmutex);
449 }
450
451 static struct lock_torture_ops rtmutex_lock_ops = {
452         .writelock      = torture_rtmutex_lock,
453         .write_delay    = torture_rtmutex_delay,
454         .task_boost     = torture_rtmutex_boost,
455         .writeunlock    = torture_rtmutex_unlock,
456         .readlock       = NULL,
457         .read_delay     = NULL,
458         .readunlock     = NULL,
459         .name           = "rtmutex_lock"
460 };
461 #endif
462
463 static DECLARE_RWSEM(torture_rwsem);
464 static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
465 {
466         down_write(&torture_rwsem);
467         return 0;
468 }
469
470 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
471 {
472         const unsigned long longdelay_ms = 100;
473
474         /* We want a long delay occasionally to force massive contention.  */
475         if (!(torture_random(trsp) %
476               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
477                 mdelay(longdelay_ms * 10);
478         else
479                 mdelay(longdelay_ms / 10);
480 #ifdef CONFIG_PREEMPT
481         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
482                 preempt_schedule();  /* Allow test to be preempted. */
483 #endif
484 }
485
486 static void torture_rwsem_up_write(void) __releases(torture_rwsem)
487 {
488         up_write(&torture_rwsem);
489 }
490
491 static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
492 {
493         down_read(&torture_rwsem);
494         return 0;
495 }
496
497 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
498 {
499         const unsigned long longdelay_ms = 100;
500
501         /* We want a long delay occasionally to force massive contention.  */
502         if (!(torture_random(trsp) %
503               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
504                 mdelay(longdelay_ms * 2);
505         else
506                 mdelay(longdelay_ms / 2);
507 #ifdef CONFIG_PREEMPT
508         if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
509                 preempt_schedule();  /* Allow test to be preempted. */
510 #endif
511 }
512
513 static void torture_rwsem_up_read(void) __releases(torture_rwsem)
514 {
515         up_read(&torture_rwsem);
516 }
517
518 static struct lock_torture_ops rwsem_lock_ops = {
519         .writelock      = torture_rwsem_down_write,
520         .write_delay    = torture_rwsem_write_delay,
521         .task_boost     = torture_boost_dummy,
522         .writeunlock    = torture_rwsem_up_write,
523         .readlock       = torture_rwsem_down_read,
524         .read_delay     = torture_rwsem_read_delay,
525         .readunlock     = torture_rwsem_up_read,
526         .name           = "rwsem_lock"
527 };
528
529 #include <linux/percpu-rwsem.h>
530 static struct percpu_rw_semaphore pcpu_rwsem;
531
532 void torture_percpu_rwsem_init(void)
533 {
534         BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
535 }
536
537 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
538 {
539         percpu_down_write(&pcpu_rwsem);
540         return 0;
541 }
542
543 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
544 {
545         percpu_up_write(&pcpu_rwsem);
546 }
547
548 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
549 {
550         percpu_down_read(&pcpu_rwsem);
551         return 0;
552 }
553
554 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
555 {
556         percpu_up_read(&pcpu_rwsem);
557 }
558
559 static struct lock_torture_ops percpu_rwsem_lock_ops = {
560         .init           = torture_percpu_rwsem_init,
561         .writelock      = torture_percpu_rwsem_down_write,
562         .write_delay    = torture_rwsem_write_delay,
563         .task_boost     = torture_boost_dummy,
564         .writeunlock    = torture_percpu_rwsem_up_write,
565         .readlock       = torture_percpu_rwsem_down_read,
566         .read_delay     = torture_rwsem_read_delay,
567         .readunlock     = torture_percpu_rwsem_up_read,
568         .name           = "percpu_rwsem_lock"
569 };
570
571 /*
572  * Lock torture writer kthread.  Repeatedly acquires and releases
573  * the lock, checking for duplicate acquisitions.
574  */
575 static int lock_torture_writer(void *arg)
576 {
577         struct lock_stress_stats *lwsp = arg;
578         static DEFINE_TORTURE_RANDOM(rand);
579
580         VERBOSE_TOROUT_STRING("lock_torture_writer task started");
581         set_user_nice(current, MAX_NICE);
582
583         do {
584                 if ((torture_random(&rand) & 0xfffff) == 0)
585                         schedule_timeout_uninterruptible(1);
586
587                 cxt.cur_ops->task_boost(&rand);
588                 cxt.cur_ops->writelock();
589                 if (WARN_ON_ONCE(lock_is_write_held))
590                         lwsp->n_lock_fail++;
591                 lock_is_write_held = 1;
592                 if (WARN_ON_ONCE(lock_is_read_held))
593                         lwsp->n_lock_fail++; /* rare, but... */
594
595                 lwsp->n_lock_acquired++;
596                 cxt.cur_ops->write_delay(&rand);
597                 lock_is_write_held = 0;
598                 cxt.cur_ops->writeunlock();
599
600                 stutter_wait("lock_torture_writer");
601         } while (!torture_must_stop());
602
603         cxt.cur_ops->task_boost(NULL); /* reset prio */
604         torture_kthread_stopping("lock_torture_writer");
605         return 0;
606 }
607
608 /*
609  * Lock torture reader kthread.  Repeatedly acquires and releases
610  * the reader lock.
611  */
612 static int lock_torture_reader(void *arg)
613 {
614         struct lock_stress_stats *lrsp = arg;
615         static DEFINE_TORTURE_RANDOM(rand);
616
617         VERBOSE_TOROUT_STRING("lock_torture_reader task started");
618         set_user_nice(current, MAX_NICE);
619
620         do {
621                 if ((torture_random(&rand) & 0xfffff) == 0)
622                         schedule_timeout_uninterruptible(1);
623
624                 cxt.cur_ops->readlock();
625                 lock_is_read_held = 1;
626                 if (WARN_ON_ONCE(lock_is_write_held))
627                         lrsp->n_lock_fail++; /* rare, but... */
628
629                 lrsp->n_lock_acquired++;
630                 cxt.cur_ops->read_delay(&rand);
631                 lock_is_read_held = 0;
632                 cxt.cur_ops->readunlock();
633
634                 stutter_wait("lock_torture_reader");
635         } while (!torture_must_stop());
636         torture_kthread_stopping("lock_torture_reader");
637         return 0;
638 }
639
640 /*
641  * Create an lock-torture-statistics message in the specified buffer.
642  */
643 static void __torture_print_stats(char *page,
644                                   struct lock_stress_stats *statp, bool write)
645 {
646         bool fail = 0;
647         int i, n_stress;
648         long max = 0;
649         long min = statp[0].n_lock_acquired;
650         long long sum = 0;
651
652         n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
653         for (i = 0; i < n_stress; i++) {
654                 if (statp[i].n_lock_fail)
655                         fail = true;
656                 sum += statp[i].n_lock_acquired;
657                 if (max < statp[i].n_lock_fail)
658                         max = statp[i].n_lock_fail;
659                 if (min > statp[i].n_lock_fail)
660                         min = statp[i].n_lock_fail;
661         }
662         page += sprintf(page,
663                         "%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
664                         write ? "Writes" : "Reads ",
665                         sum, max, min, max / 2 > min ? "???" : "",
666                         fail, fail ? "!!!" : "");
667         if (fail)
668                 atomic_inc(&cxt.n_lock_torture_errors);
669 }
670
671 /*
672  * Print torture statistics.  Caller must ensure that there is only one
673  * call to this function at a given time!!!  This is normally accomplished
674  * by relying on the module system to only have one copy of the module
675  * loaded, and then by giving the lock_torture_stats kthread full control
676  * (or the init/cleanup functions when lock_torture_stats thread is not
677  * running).
678  */
679 static void lock_torture_stats_print(void)
680 {
681         int size = cxt.nrealwriters_stress * 200 + 8192;
682         char *buf;
683
684         if (cxt.cur_ops->readlock)
685                 size += cxt.nrealreaders_stress * 200 + 8192;
686
687         buf = kmalloc(size, GFP_KERNEL);
688         if (!buf) {
689                 pr_err("lock_torture_stats_print: Out of memory, need: %d",
690                        size);
691                 return;
692         }
693
694         __torture_print_stats(buf, cxt.lwsa, true);
695         pr_alert("%s", buf);
696         kfree(buf);
697
698         if (cxt.cur_ops->readlock) {
699                 buf = kmalloc(size, GFP_KERNEL);
700                 if (!buf) {
701                         pr_err("lock_torture_stats_print: Out of memory, need: %d",
702                                size);
703                         return;
704                 }
705
706                 __torture_print_stats(buf, cxt.lrsa, false);
707                 pr_alert("%s", buf);
708                 kfree(buf);
709         }
710 }
711
712 /*
713  * Periodically prints torture statistics, if periodic statistics printing
714  * was specified via the stat_interval module parameter.
715  *
716  * No need to worry about fullstop here, since this one doesn't reference
717  * volatile state or register callbacks.
718  */
719 static int lock_torture_stats(void *arg)
720 {
721         VERBOSE_TOROUT_STRING("lock_torture_stats task started");
722         do {
723                 schedule_timeout_interruptible(stat_interval * HZ);
724                 lock_torture_stats_print();
725                 torture_shutdown_absorb("lock_torture_stats");
726         } while (!torture_must_stop());
727         torture_kthread_stopping("lock_torture_stats");
728         return 0;
729 }
730
731 static inline void
732 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
733                                 const char *tag)
734 {
735         pr_alert("%s" TORTURE_FLAG
736                  "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
737                  torture_type, tag, cxt.debug_lock ? " [debug]": "",
738                  cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
739                  verbose, shuffle_interval, stutter, shutdown_secs,
740                  onoff_interval, onoff_holdoff);
741 }
742
743 static void lock_torture_cleanup(void)
744 {
745         int i;
746
747         if (torture_cleanup_begin())
748                 return;
749
750         if (writer_tasks) {
751                 for (i = 0; i < cxt.nrealwriters_stress; i++)
752                         torture_stop_kthread(lock_torture_writer,
753                                              writer_tasks[i]);
754                 kfree(writer_tasks);
755                 writer_tasks = NULL;
756         }
757
758         if (reader_tasks) {
759                 for (i = 0; i < cxt.nrealreaders_stress; i++)
760                         torture_stop_kthread(lock_torture_reader,
761                                              reader_tasks[i]);
762                 kfree(reader_tasks);
763                 reader_tasks = NULL;
764         }
765
766         torture_stop_kthread(lock_torture_stats, stats_task);
767         lock_torture_stats_print();  /* -After- the stats thread is stopped! */
768
769         if (atomic_read(&cxt.n_lock_torture_errors))
770                 lock_torture_print_module_parms(cxt.cur_ops,
771                                                 "End of test: FAILURE");
772         else if (torture_onoff_failures())
773                 lock_torture_print_module_parms(cxt.cur_ops,
774                                                 "End of test: LOCK_HOTPLUG");
775         else
776                 lock_torture_print_module_parms(cxt.cur_ops,
777                                                 "End of test: SUCCESS");
778         torture_cleanup_end();
779 }
780
781 static int __init lock_torture_init(void)
782 {
783         int i, j;
784         int firsterr = 0;
785         static struct lock_torture_ops *torture_ops[] = {
786                 &lock_busted_ops,
787                 &spin_lock_ops, &spin_lock_irq_ops,
788                 &rw_lock_ops, &rw_lock_irq_ops,
789                 &mutex_lock_ops,
790 #ifdef CONFIG_RT_MUTEXES
791                 &rtmutex_lock_ops,
792 #endif
793                 &rwsem_lock_ops,
794                 &percpu_rwsem_lock_ops,
795         };
796
797         if (!torture_init_begin(torture_type, verbose, &torture_runnable))
798                 return -EBUSY;
799
800         /* Process args and tell the world that the torturer is on the job. */
801         for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
802                 cxt.cur_ops = torture_ops[i];
803                 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
804                         break;
805         }
806         if (i == ARRAY_SIZE(torture_ops)) {
807                 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
808                          torture_type);
809                 pr_alert("lock-torture types:");
810                 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
811                         pr_alert(" %s", torture_ops[i]->name);
812                 pr_alert("\n");
813                 firsterr = -EINVAL;
814                 goto unwind;
815         }
816         if (cxt.cur_ops->init)
817                 cxt.cur_ops->init();
818
819         if (nwriters_stress >= 0)
820                 cxt.nrealwriters_stress = nwriters_stress;
821         else
822                 cxt.nrealwriters_stress = 2 * num_online_cpus();
823
824 #ifdef CONFIG_DEBUG_MUTEXES
825         if (strncmp(torture_type, "mutex", 5) == 0)
826                 cxt.debug_lock = true;
827 #endif
828 #ifdef CONFIG_DEBUG_RT_MUTEXES
829         if (strncmp(torture_type, "rtmutex", 7) == 0)
830                 cxt.debug_lock = true;
831 #endif
832 #ifdef CONFIG_DEBUG_SPINLOCK
833         if ((strncmp(torture_type, "spin", 4) == 0) ||
834             (strncmp(torture_type, "rw_lock", 7) == 0))
835                 cxt.debug_lock = true;
836 #endif
837
838         /* Initialize the statistics so that each run gets its own numbers. */
839
840         lock_is_write_held = 0;
841         cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
842         if (cxt.lwsa == NULL) {
843                 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
844                 firsterr = -ENOMEM;
845                 goto unwind;
846         }
847         for (i = 0; i < cxt.nrealwriters_stress; i++) {
848                 cxt.lwsa[i].n_lock_fail = 0;
849                 cxt.lwsa[i].n_lock_acquired = 0;
850         }
851
852         if (cxt.cur_ops->readlock) {
853                 if (nreaders_stress >= 0)
854                         cxt.nrealreaders_stress = nreaders_stress;
855                 else {
856                         /*
857                          * By default distribute evenly the number of
858                          * readers and writers. We still run the same number
859                          * of threads as the writer-only locks default.
860                          */
861                         if (nwriters_stress < 0) /* user doesn't care */
862                                 cxt.nrealwriters_stress = num_online_cpus();
863                         cxt.nrealreaders_stress = cxt.nrealwriters_stress;
864                 }
865
866                 lock_is_read_held = 0;
867                 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
868                 if (cxt.lrsa == NULL) {
869                         VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
870                         firsterr = -ENOMEM;
871                         kfree(cxt.lwsa);
872                         goto unwind;
873                 }
874
875                 for (i = 0; i < cxt.nrealreaders_stress; i++) {
876                         cxt.lrsa[i].n_lock_fail = 0;
877                         cxt.lrsa[i].n_lock_acquired = 0;
878                 }
879         }
880         lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
881
882         /* Prepare torture context. */
883         if (onoff_interval > 0) {
884                 firsterr = torture_onoff_init(onoff_holdoff * HZ,
885                                               onoff_interval * HZ);
886                 if (firsterr)
887                         goto unwind;
888         }
889         if (shuffle_interval > 0) {
890                 firsterr = torture_shuffle_init(shuffle_interval);
891                 if (firsterr)
892                         goto unwind;
893         }
894         if (shutdown_secs > 0) {
895                 firsterr = torture_shutdown_init(shutdown_secs,
896                                                  lock_torture_cleanup);
897                 if (firsterr)
898                         goto unwind;
899         }
900         if (stutter > 0) {
901                 firsterr = torture_stutter_init(stutter);
902                 if (firsterr)
903                         goto unwind;
904         }
905
906         writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
907                                GFP_KERNEL);
908         if (writer_tasks == NULL) {
909                 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
910                 firsterr = -ENOMEM;
911                 goto unwind;
912         }
913
914         if (cxt.cur_ops->readlock) {
915                 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
916                                        GFP_KERNEL);
917                 if (reader_tasks == NULL) {
918                         VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
919                         firsterr = -ENOMEM;
920                         goto unwind;
921                 }
922         }
923
924         /*
925          * Create the kthreads and start torturing (oh, those poor little locks).
926          *
927          * TODO: Note that we interleave writers with readers, giving writers a
928          * slight advantage, by creating its kthread first. This can be modified
929          * for very specific needs, or even let the user choose the policy, if
930          * ever wanted.
931          */
932         for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
933                     j < cxt.nrealreaders_stress; i++, j++) {
934                 if (i >= cxt.nrealwriters_stress)
935                         goto create_reader;
936
937                 /* Create writer. */
938                 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
939                                                   writer_tasks[i]);
940                 if (firsterr)
941                         goto unwind;
942
943         create_reader:
944                 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
945                         continue;
946                 /* Create reader. */
947                 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
948                                                   reader_tasks[j]);
949                 if (firsterr)
950                         goto unwind;
951         }
952         if (stat_interval > 0) {
953                 firsterr = torture_create_kthread(lock_torture_stats, NULL,
954                                                   stats_task);
955                 if (firsterr)
956                         goto unwind;
957         }
958         torture_init_end();
959         return 0;
960
961 unwind:
962         torture_init_end();
963         lock_torture_cleanup();
964         return firsterr;
965 }
966
967 module_init(lock_torture_init);
968 module_exit(lock_torture_cleanup);