2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <linux/tick.h>
24 #include <asm/irq_regs.h>
25 #include <linux/kvm_para.h>
26 #include <linux/perf_event.h>
27 #include <linux/kthread.h>
30 * The run state of the lockup detectors is controlled by the content of the
31 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
32 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
34 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
35 * are variables that are only used as an 'interface' between the parameters
36 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
37 * 'watchdog_thresh' variable is handled differently because its value is not
38 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
41 #define NMI_WATCHDOG_ENABLED_BIT 0
42 #define SOFT_WATCHDOG_ENABLED_BIT 1
43 #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
44 #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
46 static DEFINE_MUTEX(watchdog_proc_mutex);
48 #ifdef CONFIG_HARDLOCKUP_DETECTOR
49 static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
51 static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
53 int __read_mostly nmi_watchdog_enabled;
54 int __read_mostly soft_watchdog_enabled;
55 int __read_mostly watchdog_user_enabled;
56 int __read_mostly watchdog_thresh = 10;
59 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
60 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
62 #define sysctl_softlockup_all_cpu_backtrace 0
63 #define sysctl_hardlockup_all_cpu_backtrace 0
65 static struct cpumask watchdog_cpumask __read_mostly;
66 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
68 /* Helper for online, unparked cpus. */
69 #define for_each_watchdog_cpu(cpu) \
70 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
73 * The 'watchdog_running' variable is set to 1 when the watchdog threads
74 * are registered/started and is set to 0 when the watchdog threads are
75 * unregistered/stopped, so it is an indicator whether the threads exist.
77 static int __read_mostly watchdog_running;
79 * If a subsystem has a need to deactivate the watchdog temporarily, it
80 * can use the suspend/resume interface to achieve this. The content of
81 * the 'watchdog_suspended' variable reflects this state. Existing threads
82 * are parked/unparked by the lockup_detector_{suspend|resume} functions
83 * (see comment blocks pertaining to those functions for further details).
85 * 'watchdog_suspended' also prevents threads from being registered/started
86 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
87 * of 'watchdog_running' cannot change while the watchdog is deactivated
88 * temporarily (see related code in 'proc' handlers).
90 static int __read_mostly watchdog_suspended;
92 static u64 __read_mostly sample_period;
94 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
95 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
96 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
97 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
98 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
99 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
100 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
101 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
102 #ifdef CONFIG_HARDLOCKUP_DETECTOR
103 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
104 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
105 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
106 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
108 static unsigned long soft_lockup_nmi_warn;
112 * Should we panic when a soft-lockup or hard-lockup occurs:
114 #ifdef CONFIG_HARDLOCKUP_DETECTOR
115 unsigned int __read_mostly hardlockup_panic =
116 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
117 static unsigned long hardlockup_allcpu_dumped;
119 * We may not want to enable hard lockup detection by default in all cases,
120 * for example when running the kernel as a guest on a hypervisor. In these
121 * cases this function can be called to disable hard lockup detection. This
122 * function should only be executed once by the boot processor before the
123 * kernel command line parameters are parsed, because otherwise it is not
124 * possible to override this in hardlockup_panic_setup().
126 void hardlockup_detector_disable(void)
128 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
131 static int __init hardlockup_panic_setup(char *str)
133 if (!strncmp(str, "panic", 5))
134 hardlockup_panic = 1;
135 else if (!strncmp(str, "nopanic", 7))
136 hardlockup_panic = 0;
137 else if (!strncmp(str, "0", 1))
138 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
139 else if (!strncmp(str, "1", 1))
140 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
143 __setup("nmi_watchdog=", hardlockup_panic_setup);
146 unsigned int __read_mostly softlockup_panic =
147 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
149 static int __init softlockup_panic_setup(char *str)
151 softlockup_panic = simple_strtoul(str, NULL, 0);
155 __setup("softlockup_panic=", softlockup_panic_setup);
157 static int __init nowatchdog_setup(char *str)
159 watchdog_enabled = 0;
162 __setup("nowatchdog", nowatchdog_setup);
164 static int __init nosoftlockup_setup(char *str)
166 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
169 __setup("nosoftlockup", nosoftlockup_setup);
172 static int __init softlockup_all_cpu_backtrace_setup(char *str)
174 sysctl_softlockup_all_cpu_backtrace =
175 !!simple_strtol(str, NULL, 0);
178 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
179 static int __init hardlockup_all_cpu_backtrace_setup(char *str)
181 sysctl_hardlockup_all_cpu_backtrace =
182 !!simple_strtol(str, NULL, 0);
185 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
189 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
190 * lockups can have false positives under extreme conditions. So we generally
191 * want a higher threshold for soft lockups than for hard lockups. So we couple
192 * the thresholds with a factor: we make the soft threshold twice the amount of
193 * time the hard threshold is.
195 static int get_softlockup_thresh(void)
197 return watchdog_thresh * 2;
201 * Returns seconds, approximately. We don't need nanosecond
202 * resolution, and we don't need to waste time with a big divide when
205 static unsigned long get_timestamp(void)
207 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
210 static void set_sample_period(void)
213 * convert watchdog_thresh from seconds to ns
214 * the divide by 5 is to give hrtimer several chances (two
215 * or three with the current relation between the soft
216 * and hard thresholds) to increment before the
217 * hardlockup detector generates a warning
219 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
222 /* Commands for resetting the watchdog */
223 static void __touch_watchdog(void)
225 __this_cpu_write(watchdog_touch_ts, get_timestamp());
228 void touch_softlockup_watchdog(void)
231 * Preemption can be enabled. It doesn't matter which CPU's timestamp
232 * gets zeroed here, so use the raw_ operation.
234 raw_cpu_write(watchdog_touch_ts, 0);
236 EXPORT_SYMBOL(touch_softlockup_watchdog);
238 void touch_all_softlockup_watchdogs(void)
243 * this is done lockless
244 * do we care if a 0 races with a timestamp?
245 * all it means is the softlock check starts one cycle later
247 for_each_watchdog_cpu(cpu)
248 per_cpu(watchdog_touch_ts, cpu) = 0;
251 #ifdef CONFIG_HARDLOCKUP_DETECTOR
252 void touch_nmi_watchdog(void)
255 * Using __raw here because some code paths have
256 * preemption enabled. If preemption is enabled
257 * then interrupts should be enabled too, in which
258 * case we shouldn't have to worry about the watchdog
261 raw_cpu_write(watchdog_nmi_touch, true);
262 touch_softlockup_watchdog();
264 EXPORT_SYMBOL(touch_nmi_watchdog);
268 void touch_softlockup_watchdog_sync(void)
270 __this_cpu_write(softlockup_touch_sync, true);
271 __this_cpu_write(watchdog_touch_ts, 0);
274 #ifdef CONFIG_HARDLOCKUP_DETECTOR
275 /* watchdog detector functions */
276 static bool is_hardlockup(void)
278 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
280 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
283 __this_cpu_write(hrtimer_interrupts_saved, hrint);
288 static int is_softlockup(unsigned long touch_ts)
290 unsigned long now = get_timestamp();
292 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
293 /* Warn about unreasonable delays. */
294 if (time_after(now, touch_ts + get_softlockup_thresh()))
295 return now - touch_ts;
300 #ifdef CONFIG_HARDLOCKUP_DETECTOR
302 static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
304 static struct perf_event_attr wd_hw_attr = {
305 .type = PERF_TYPE_HARDWARE,
306 .config = PERF_COUNT_HW_CPU_CYCLES,
307 .size = sizeof(struct perf_event_attr),
312 /* Callback function for perf event subsystem */
313 static void watchdog_overflow_callback(struct perf_event *event,
314 struct perf_sample_data *data,
315 struct pt_regs *regs)
317 /* Ensure the watchdog never gets throttled */
318 event->hw.interrupts = 0;
320 if (__this_cpu_read(watchdog_nmi_touch) == true) {
321 __this_cpu_write(watchdog_nmi_touch, false);
325 /* check for a hardlockup
326 * This is done by making sure our timer interrupt
327 * is incrementing. The timer interrupt should have
328 * fired multiple times before we overflow'd. If it hasn't
329 * then this is a good indication the cpu is stuck
331 if (is_hardlockup()) {
332 int this_cpu = smp_processor_id();
333 struct pt_regs *regs = get_irq_regs();
335 /* only print hardlockups once */
336 if (__this_cpu_read(hard_watchdog_warn) == true)
339 * If early-printk is enabled then make sure we do not
340 * lock up in printk() and kill console logging:
344 raw_spin_lock(&watchdog_output_lock);
346 pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
348 print_irqtrace_events(current);
355 * Perform all-CPU dump only once to avoid multiple hardlockups
356 * generating interleaving traces
358 if (sysctl_hardlockup_all_cpu_backtrace &&
359 !test_and_set_bit(0, &hardlockup_allcpu_dumped))
360 trigger_allbutself_cpu_backtrace();
362 raw_spin_unlock(&watchdog_output_lock);
363 if (hardlockup_panic)
364 panic("Hard LOCKUP");
366 __this_cpu_write(hard_watchdog_warn, true);
370 __this_cpu_write(hard_watchdog_warn, false);
373 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
375 static void watchdog_interrupt_count(void)
377 __this_cpu_inc(hrtimer_interrupts);
380 static int watchdog_nmi_enable(unsigned int cpu);
381 static void watchdog_nmi_disable(unsigned int cpu);
383 static int watchdog_enable_all_cpus(void);
384 static void watchdog_disable_all_cpus(void);
386 /* watchdog kicker functions */
387 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
389 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
390 struct pt_regs *regs = get_irq_regs();
392 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
394 /* kick the hardlockup detector */
395 watchdog_interrupt_count();
397 /* kick the softlockup detector */
398 wake_up_process(__this_cpu_read(softlockup_watchdog));
401 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
404 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
406 * If the time stamp was touched atomically
407 * make sure the scheduler tick is up to date.
409 __this_cpu_write(softlockup_touch_sync, false);
413 /* Clear the guest paused flag on watchdog reset */
414 kvm_check_and_clear_guest_paused();
416 return HRTIMER_RESTART;
419 /* check for a softlockup
420 * This is done by making sure a high priority task is
421 * being scheduled. The task touches the watchdog to
422 * indicate it is getting cpu time. If it hasn't then
423 * this is a good indication some task is hogging the cpu
425 duration = is_softlockup(touch_ts);
426 if (unlikely(duration)) {
428 * If a virtual machine is stopped by the host it can look to
429 * the watchdog like a soft lockup, check to see if the host
430 * stopped the vm before we issue the warning
432 if (kvm_check_and_clear_guest_paused())
433 return HRTIMER_RESTART;
436 if (__this_cpu_read(soft_watchdog_warn) == true) {
438 * When multiple processes are causing softlockups the
439 * softlockup detector only warns on the first one
440 * because the code relies on a full quiet cycle to
441 * re-arm. The second process prevents the quiet cycle
442 * and never gets reported. Use task pointers to detect
445 if (__this_cpu_read(softlockup_task_ptr_saved) !=
447 __this_cpu_write(soft_watchdog_warn, false);
450 return HRTIMER_RESTART;
453 if (softlockup_all_cpu_backtrace) {
454 /* Prevent multiple soft-lockup reports if one cpu is already
455 * engaged in dumping cpu back traces
457 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
458 /* Someone else will report us. Let's give up */
459 __this_cpu_write(soft_watchdog_warn, true);
460 return HRTIMER_RESTART;
464 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
465 smp_processor_id(), duration,
466 current->comm, task_pid_nr(current));
467 __this_cpu_write(softlockup_task_ptr_saved, current);
469 print_irqtrace_events(current);
475 if (softlockup_all_cpu_backtrace) {
476 /* Avoid generating two back traces for current
477 * given that one is already made above
479 trigger_allbutself_cpu_backtrace();
481 clear_bit(0, &soft_lockup_nmi_warn);
482 /* Barrier to sync with other cpus */
483 smp_mb__after_atomic();
486 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
487 if (softlockup_panic)
488 panic("softlockup: hung tasks");
489 __this_cpu_write(soft_watchdog_warn, true);
491 __this_cpu_write(soft_watchdog_warn, false);
493 return HRTIMER_RESTART;
496 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
498 struct sched_param param = { .sched_priority = prio };
500 sched_setscheduler(current, policy, ¶m);
503 static void watchdog_enable(unsigned int cpu)
505 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
507 /* kick off the timer for the hardlockup detector */
508 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
509 hrtimer->function = watchdog_timer_fn;
510 hrtimer->irqsafe = 1;
512 /* Enable the perf event */
513 watchdog_nmi_enable(cpu);
515 /* done here because hrtimer_start can only pin to smp_processor_id() */
516 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
517 HRTIMER_MODE_REL_PINNED);
519 /* initialize timestamp */
520 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
524 static void watchdog_disable(unsigned int cpu)
526 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
528 watchdog_set_prio(SCHED_NORMAL, 0);
529 hrtimer_cancel(hrtimer);
530 /* disable the perf event */
531 watchdog_nmi_disable(cpu);
534 static void watchdog_cleanup(unsigned int cpu, bool online)
536 watchdog_disable(cpu);
539 static int watchdog_should_run(unsigned int cpu)
541 return __this_cpu_read(hrtimer_interrupts) !=
542 __this_cpu_read(soft_lockup_hrtimer_cnt);
546 * The watchdog thread function - touches the timestamp.
548 * It only runs once every sample_period seconds (4 seconds by
549 * default) to reset the softlockup timestamp. If this gets delayed
550 * for more than 2*watchdog_thresh seconds then the debug-printout
551 * triggers in watchdog_timer_fn().
553 static void watchdog(unsigned int cpu)
555 __this_cpu_write(soft_lockup_hrtimer_cnt,
556 __this_cpu_read(hrtimer_interrupts));
560 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
561 * failure path. Check for failures that can occur asynchronously -
562 * for example, when CPUs are on-lined - and shut down the hardware
563 * perf event on each CPU accordingly.
565 * The only non-obvious place this bit can be cleared is through
566 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
567 * pr_info here would be too noisy as it would result in a message
568 * every few seconds if the hardlockup was disabled but the softlockup
571 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
572 watchdog_nmi_disable(cpu);
575 #ifdef CONFIG_HARDLOCKUP_DETECTOR
577 * People like the simple clean cpu node info on boot.
578 * Reduce the watchdog noise by only printing messages
579 * that are different from what cpu0 displayed.
581 static unsigned long cpu0_err;
583 static int watchdog_nmi_enable(unsigned int cpu)
585 struct perf_event_attr *wd_attr;
586 struct perf_event *event = per_cpu(watchdog_ev, cpu);
588 /* nothing to do if the hard lockup detector is disabled */
589 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
592 /* is it already setup and enabled? */
593 if (event && event->state > PERF_EVENT_STATE_OFF)
596 /* it is setup but not enabled */
600 wd_attr = &wd_hw_attr;
601 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
603 /* Try to register using hardware perf events */
604 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
606 /* save cpu0 error for future comparision */
607 if (cpu == 0 && IS_ERR(event))
608 cpu0_err = PTR_ERR(event);
610 if (!IS_ERR(event)) {
611 /* only print for cpu0 or different than cpu0 */
612 if (cpu == 0 || cpu0_err)
613 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
618 * Disable the hard lockup detector if _any_ CPU fails to set up
619 * set up the hardware perf event. The watchdog() function checks
620 * the NMI_WATCHDOG_ENABLED bit periodically.
622 * The barriers are for syncing up watchdog_enabled across all the
623 * cpus, as clear_bit() does not use barriers.
625 smp_mb__before_atomic();
626 clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
627 smp_mb__after_atomic();
629 /* skip displaying the same error again */
630 if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
631 return PTR_ERR(event);
633 /* vary the KERN level based on the returned errno */
634 if (PTR_ERR(event) == -EOPNOTSUPP)
635 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
636 else if (PTR_ERR(event) == -ENOENT)
637 pr_warn("disabled (cpu%i): hardware events not enabled\n",
640 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
641 cpu, PTR_ERR(event));
643 pr_info("Shutting down hard lockup detector on all cpus\n");
645 return PTR_ERR(event);
649 per_cpu(watchdog_ev, cpu) = event;
651 perf_event_enable(per_cpu(watchdog_ev, cpu));
656 static void watchdog_nmi_disable(unsigned int cpu)
658 struct perf_event *event = per_cpu(watchdog_ev, cpu);
661 perf_event_disable(event);
662 per_cpu(watchdog_ev, cpu) = NULL;
664 /* should be in cleanup, but blocks oprofile */
665 perf_event_release_kernel(event);
668 /* watchdog_nmi_enable() expects this to be zero initially. */
674 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
675 static void watchdog_nmi_disable(unsigned int cpu) { return; }
676 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
678 static struct smp_hotplug_thread watchdog_threads = {
679 .store = &softlockup_watchdog,
680 .thread_should_run = watchdog_should_run,
681 .thread_fn = watchdog,
682 .thread_comm = "watchdog/%u",
683 .setup = watchdog_enable,
684 .cleanup = watchdog_cleanup,
685 .park = watchdog_disable,
686 .unpark = watchdog_enable,
690 * park all watchdog threads that are specified in 'watchdog_cpumask'
692 * This function returns an error if kthread_park() of a watchdog thread
693 * fails. In this situation, the watchdog threads of some CPUs can already
694 * be parked and the watchdog threads of other CPUs can still be runnable.
695 * Callers are expected to handle this special condition as appropriate in
698 * This function may only be called in a context that is protected against
699 * races with CPU hotplug - for example, via get_online_cpus().
701 static int watchdog_park_threads(void)
705 for_each_watchdog_cpu(cpu) {
706 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
715 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
717 * This function may only be called in a context that is protected against
718 * races with CPU hotplug - for example, via get_online_cpus().
720 static void watchdog_unpark_threads(void)
724 for_each_watchdog_cpu(cpu)
725 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
729 * Suspend the hard and soft lockup detector by parking the watchdog threads.
731 int lockup_detector_suspend(void)
736 mutex_lock(&watchdog_proc_mutex);
738 * Multiple suspend requests can be active in parallel (counted by
739 * the 'watchdog_suspended' variable). If the watchdog threads are
740 * running, the first caller takes care that they will be parked.
741 * The state of 'watchdog_running' cannot change while a suspend
742 * request is active (see related code in 'proc' handlers).
744 if (watchdog_running && !watchdog_suspended)
745 ret = watchdog_park_threads();
748 watchdog_suspended++;
750 watchdog_disable_all_cpus();
751 pr_err("Failed to suspend lockup detectors, disabled\n");
752 watchdog_enabled = 0;
755 mutex_unlock(&watchdog_proc_mutex);
761 * Resume the hard and soft lockup detector by unparking the watchdog threads.
763 void lockup_detector_resume(void)
765 mutex_lock(&watchdog_proc_mutex);
767 watchdog_suspended--;
769 * The watchdog threads are unparked if they were previously running
770 * and if there is no more active suspend request.
772 if (watchdog_running && !watchdog_suspended)
773 watchdog_unpark_threads();
775 mutex_unlock(&watchdog_proc_mutex);
779 static int update_watchdog_all_cpus(void)
783 ret = watchdog_park_threads();
787 watchdog_unpark_threads();
792 static int watchdog_enable_all_cpus(void)
796 if (!watchdog_running) {
797 err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
800 pr_err("Failed to create watchdog threads, disabled\n");
802 watchdog_running = 1;
805 * Enable/disable the lockup detectors or
806 * change the sample period 'on the fly'.
808 err = update_watchdog_all_cpus();
811 watchdog_disable_all_cpus();
812 pr_err("Failed to update lockup detectors, disabled\n");
817 watchdog_enabled = 0;
822 static void watchdog_disable_all_cpus(void)
824 if (watchdog_running) {
825 watchdog_running = 0;
826 smpboot_unregister_percpu_thread(&watchdog_threads);
833 * Update the run state of the lockup detectors.
835 static int proc_watchdog_update(void)
840 * Watchdog threads won't be started if they are already active.
841 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
842 * care of this. If those threads are already active, the sample
843 * period will be updated and the lockup detectors will be enabled
844 * or disabled 'on the fly'.
846 if (watchdog_enabled && watchdog_thresh)
847 err = watchdog_enable_all_cpus();
849 watchdog_disable_all_cpus();
856 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
858 * caller | table->data points to | 'which' contains the flag(s)
859 * -------------------|-----------------------|-----------------------------
860 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
861 * | | with SOFT_WATCHDOG_ENABLED
862 * -------------------|-----------------------|-----------------------------
863 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
864 * -------------------|-----------------------|-----------------------------
865 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
867 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
868 void __user *buffer, size_t *lenp, loff_t *ppos)
871 int *watchdog_param = (int *)table->data;
874 mutex_lock(&watchdog_proc_mutex);
876 if (watchdog_suspended) {
877 /* no parameter changes allowed while watchdog is suspended */
883 * If the parameter is being read return the state of the corresponding
884 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
885 * run state of the lockup detectors.
888 *watchdog_param = (watchdog_enabled & which) != 0;
889 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
891 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
896 * There is a race window between fetching the current value
897 * from 'watchdog_enabled' and storing the new value. During
898 * this race window, watchdog_nmi_enable() can sneak in and
899 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
900 * The 'cmpxchg' detects this race and the loop retries.
903 old = watchdog_enabled;
905 * If the parameter value is not zero set the
906 * corresponding bit(s), else clear it(them).
912 } while (cmpxchg(&watchdog_enabled, old, new) != old);
915 * Update the run state of the lockup detectors. There is _no_
916 * need to check the value returned by proc_watchdog_update()
917 * and to restore the previous value of 'watchdog_enabled' as
918 * both lockup detectors are disabled if proc_watchdog_update()
921 err = proc_watchdog_update();
924 mutex_unlock(&watchdog_proc_mutex);
930 * /proc/sys/kernel/watchdog
932 int proc_watchdog(struct ctl_table *table, int write,
933 void __user *buffer, size_t *lenp, loff_t *ppos)
935 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
936 table, write, buffer, lenp, ppos);
940 * /proc/sys/kernel/nmi_watchdog
942 int proc_nmi_watchdog(struct ctl_table *table, int write,
943 void __user *buffer, size_t *lenp, loff_t *ppos)
945 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
946 table, write, buffer, lenp, ppos);
950 * /proc/sys/kernel/soft_watchdog
952 int proc_soft_watchdog(struct ctl_table *table, int write,
953 void __user *buffer, size_t *lenp, loff_t *ppos)
955 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
956 table, write, buffer, lenp, ppos);
960 * /proc/sys/kernel/watchdog_thresh
962 int proc_watchdog_thresh(struct ctl_table *table, int write,
963 void __user *buffer, size_t *lenp, loff_t *ppos)
968 mutex_lock(&watchdog_proc_mutex);
970 if (watchdog_suspended) {
971 /* no parameter changes allowed while watchdog is suspended */
976 old = ACCESS_ONCE(watchdog_thresh);
977 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
983 * Update the sample period. Restore on failure.
986 err = proc_watchdog_update();
988 watchdog_thresh = old;
992 mutex_unlock(&watchdog_proc_mutex);
998 * The cpumask is the mask of possible cpus that the watchdog can run
999 * on, not the mask of cpus it is actually running on. This allows the
1000 * user to specify a mask that will include cpus that have not yet
1001 * been brought online, if desired.
1003 int proc_watchdog_cpumask(struct ctl_table *table, int write,
1004 void __user *buffer, size_t *lenp, loff_t *ppos)
1009 mutex_lock(&watchdog_proc_mutex);
1011 if (watchdog_suspended) {
1012 /* no parameter changes allowed while watchdog is suspended */
1017 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
1018 if (!err && write) {
1019 /* Remove impossible cpus to keep sysctl output cleaner. */
1020 cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
1023 if (watchdog_running) {
1025 * Failure would be due to being unable to allocate
1026 * a temporary cpumask, so we are likely not in a
1027 * position to do much else to make things better.
1029 if (smpboot_update_cpumask_percpu_thread(
1030 &watchdog_threads, &watchdog_cpumask) != 0)
1031 pr_err("cpumask update failed\n");
1035 mutex_unlock(&watchdog_proc_mutex);
1040 #endif /* CONFIG_SYSCTL */
1042 void __init lockup_detector_init(void)
1044 set_sample_period();
1046 #ifdef CONFIG_NO_HZ_FULL
1047 if (tick_nohz_full_enabled()) {
1048 pr_info("Disabling watchdog on nohz_full cores by default\n");
1049 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
1051 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
1053 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
1056 if (watchdog_enabled)
1057 watchdog_enable_all_cpus();