4 * Kernel internal timers
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/export.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/irq_work.h>
41 #include <linux/sched.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/slab.h>
44 #include <linux/compat.h>
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include <asm/div64.h>
49 #include <asm/timex.h>
52 #include "tick-internal.h"
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/timer.h>
57 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
59 EXPORT_SYMBOL(jiffies_64);
62 * per-CPU timer vector definitions:
64 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
65 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
66 #define TVN_SIZE (1 << TVN_BITS)
67 #define TVR_SIZE (1 << TVR_BITS)
68 #define TVN_MASK (TVN_SIZE - 1)
69 #define TVR_MASK (TVR_SIZE - 1)
70 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
73 struct hlist_head vec[TVN_SIZE];
77 struct hlist_head vec[TVR_SIZE];
82 struct timer_list *running_timer;
83 #ifdef CONFIG_PREEMPT_RT_FULL
84 wait_queue_head_t wait_for_running_timer;
86 unsigned long timer_jiffies;
87 unsigned long next_timer;
88 unsigned long active_timers;
89 unsigned long all_timers;
91 bool migration_enabled;
98 } ____cacheline_aligned;
101 static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
103 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
104 unsigned int sysctl_timer_migration = 1;
106 void timers_update_migration(bool update_nohz)
108 bool on = sysctl_timer_migration && tick_nohz_active;
111 /* Avoid the loop, if nothing to update */
112 if (this_cpu_read(tvec_bases.migration_enabled) == on)
115 for_each_possible_cpu(cpu) {
116 per_cpu(tvec_bases.migration_enabled, cpu) = on;
117 per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
120 per_cpu(tvec_bases.nohz_active, cpu) = true;
121 per_cpu(hrtimer_bases.nohz_active, cpu) = true;
125 int timer_migration_handler(struct ctl_table *table, int write,
126 void __user *buffer, size_t *lenp,
129 static DEFINE_MUTEX(mutex);
133 ret = proc_dointvec(table, write, buffer, lenp, ppos);
135 timers_update_migration(false);
136 mutex_unlock(&mutex);
140 static inline struct tvec_base *get_target_base(struct tvec_base *base,
143 if (pinned || !base->migration_enabled)
144 return this_cpu_ptr(&tvec_bases);
145 return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
148 static inline struct tvec_base *get_target_base(struct tvec_base *base,
151 return this_cpu_ptr(&tvec_bases);
155 static unsigned long round_jiffies_common(unsigned long j, int cpu,
159 unsigned long original = j;
162 * We don't want all cpus firing their timers at once hitting the
163 * same lock or cachelines, so we skew each extra cpu with an extra
164 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
166 * The skew is done by adding 3*cpunr, then round, then subtract this
167 * extra offset again.
174 * If the target jiffie is just after a whole second (which can happen
175 * due to delays of the timer irq, long irq off times etc etc) then
176 * we should round down to the whole second, not up. Use 1/4th second
177 * as cutoff for this rounding as an extreme upper bound for this.
178 * But never round down if @force_up is set.
180 if (rem < HZ/4 && !force_up) /* round down */
185 /* now that we have rounded, subtract the extra skew again */
189 * Make sure j is still in the future. Otherwise return the
192 return time_is_after_jiffies(j) ? j : original;
196 * __round_jiffies - function to round jiffies to a full second
197 * @j: the time in (absolute) jiffies that should be rounded
198 * @cpu: the processor number on which the timeout will happen
200 * __round_jiffies() rounds an absolute time in the future (in jiffies)
201 * up or down to (approximately) full seconds. This is useful for timers
202 * for which the exact time they fire does not matter too much, as long as
203 * they fire approximately every X seconds.
205 * By rounding these timers to whole seconds, all such timers will fire
206 * at the same time, rather than at various times spread out. The goal
207 * of this is to have the CPU wake up less, which saves power.
209 * The exact rounding is skewed for each processor to avoid all
210 * processors firing at the exact same time, which could lead
211 * to lock contention or spurious cache line bouncing.
213 * The return value is the rounded version of the @j parameter.
215 unsigned long __round_jiffies(unsigned long j, int cpu)
217 return round_jiffies_common(j, cpu, false);
219 EXPORT_SYMBOL_GPL(__round_jiffies);
222 * __round_jiffies_relative - function to round jiffies to a full second
223 * @j: the time in (relative) jiffies that should be rounded
224 * @cpu: the processor number on which the timeout will happen
226 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
227 * up or down to (approximately) full seconds. This is useful for timers
228 * for which the exact time they fire does not matter too much, as long as
229 * they fire approximately every X seconds.
231 * By rounding these timers to whole seconds, all such timers will fire
232 * at the same time, rather than at various times spread out. The goal
233 * of this is to have the CPU wake up less, which saves power.
235 * The exact rounding is skewed for each processor to avoid all
236 * processors firing at the exact same time, which could lead
237 * to lock contention or spurious cache line bouncing.
239 * The return value is the rounded version of the @j parameter.
241 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
243 unsigned long j0 = jiffies;
245 /* Use j0 because jiffies might change while we run */
246 return round_jiffies_common(j + j0, cpu, false) - j0;
248 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
251 * round_jiffies - function to round jiffies to a full second
252 * @j: the time in (absolute) jiffies that should be rounded
254 * round_jiffies() rounds an absolute time in the future (in jiffies)
255 * up or down to (approximately) full seconds. This is useful for timers
256 * for which the exact time they fire does not matter too much, as long as
257 * they fire approximately every X seconds.
259 * By rounding these timers to whole seconds, all such timers will fire
260 * at the same time, rather than at various times spread out. The goal
261 * of this is to have the CPU wake up less, which saves power.
263 * The return value is the rounded version of the @j parameter.
265 unsigned long round_jiffies(unsigned long j)
267 return round_jiffies_common(j, raw_smp_processor_id(), false);
269 EXPORT_SYMBOL_GPL(round_jiffies);
272 * round_jiffies_relative - function to round jiffies to a full second
273 * @j: the time in (relative) jiffies that should be rounded
275 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
276 * up or down to (approximately) full seconds. This is useful for timers
277 * for which the exact time they fire does not matter too much, as long as
278 * they fire approximately every X seconds.
280 * By rounding these timers to whole seconds, all such timers will fire
281 * at the same time, rather than at various times spread out. The goal
282 * of this is to have the CPU wake up less, which saves power.
284 * The return value is the rounded version of the @j parameter.
286 unsigned long round_jiffies_relative(unsigned long j)
288 return __round_jiffies_relative(j, raw_smp_processor_id());
290 EXPORT_SYMBOL_GPL(round_jiffies_relative);
293 * __round_jiffies_up - function to round jiffies up to a full second
294 * @j: the time in (absolute) jiffies that should be rounded
295 * @cpu: the processor number on which the timeout will happen
297 * This is the same as __round_jiffies() except that it will never
298 * round down. This is useful for timeouts for which the exact time
299 * of firing does not matter too much, as long as they don't fire too
302 unsigned long __round_jiffies_up(unsigned long j, int cpu)
304 return round_jiffies_common(j, cpu, true);
306 EXPORT_SYMBOL_GPL(__round_jiffies_up);
309 * __round_jiffies_up_relative - function to round jiffies up to a full second
310 * @j: the time in (relative) jiffies that should be rounded
311 * @cpu: the processor number on which the timeout will happen
313 * This is the same as __round_jiffies_relative() except that it will never
314 * round down. This is useful for timeouts for which the exact time
315 * of firing does not matter too much, as long as they don't fire too
318 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
320 unsigned long j0 = jiffies;
322 /* Use j0 because jiffies might change while we run */
323 return round_jiffies_common(j + j0, cpu, true) - j0;
325 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
328 * round_jiffies_up - function to round jiffies up to a full second
329 * @j: the time in (absolute) jiffies that should be rounded
331 * This is the same as round_jiffies() except that it will never
332 * round down. This is useful for timeouts for which the exact time
333 * of firing does not matter too much, as long as they don't fire too
336 unsigned long round_jiffies_up(unsigned long j)
338 return round_jiffies_common(j, raw_smp_processor_id(), true);
340 EXPORT_SYMBOL_GPL(round_jiffies_up);
343 * round_jiffies_up_relative - function to round jiffies up to a full second
344 * @j: the time in (relative) jiffies that should be rounded
346 * This is the same as round_jiffies_relative() except that it will never
347 * round down. This is useful for timeouts for which the exact time
348 * of firing does not matter too much, as long as they don't fire too
351 unsigned long round_jiffies_up_relative(unsigned long j)
353 return __round_jiffies_up_relative(j, raw_smp_processor_id());
355 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
358 * set_timer_slack - set the allowed slack for a timer
359 * @timer: the timer to be modified
360 * @slack_hz: the amount of time (in jiffies) allowed for rounding
362 * Set the amount of time, in jiffies, that a certain timer has
363 * in terms of slack. By setting this value, the timer subsystem
364 * will schedule the actual timer somewhere between
365 * the time mod_timer() asks for, and that time plus the slack.
367 * By setting the slack to -1, a percentage of the delay is used
370 void set_timer_slack(struct timer_list *timer, int slack_hz)
372 timer->slack = slack_hz;
374 EXPORT_SYMBOL_GPL(set_timer_slack);
377 __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
379 unsigned long expires = timer->expires;
380 unsigned long idx = expires - base->timer_jiffies;
381 struct hlist_head *vec;
383 if (idx < TVR_SIZE) {
384 int i = expires & TVR_MASK;
385 vec = base->tv1.vec + i;
386 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
387 int i = (expires >> TVR_BITS) & TVN_MASK;
388 vec = base->tv2.vec + i;
389 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
390 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
391 vec = base->tv3.vec + i;
392 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
393 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
394 vec = base->tv4.vec + i;
395 } else if ((signed long) idx < 0) {
397 * Can happen if you add a timer with expires == jiffies,
398 * or you set a timer to go off in the past
400 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
403 /* If the timeout is larger than MAX_TVAL (on 64-bit
404 * architectures or with CONFIG_BASE_SMALL=1) then we
405 * use the maximum timeout.
407 if (idx > MAX_TVAL) {
409 expires = idx + base->timer_jiffies;
411 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
412 vec = base->tv5.vec + i;
415 hlist_add_head(&timer->entry, vec);
418 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
420 /* Advance base->jiffies, if the base is empty */
421 if (!base->all_timers++)
422 base->timer_jiffies = jiffies;
424 __internal_add_timer(base, timer);
426 * Update base->active_timers and base->next_timer
428 if (!(timer->flags & TIMER_DEFERRABLE)) {
429 if (!base->active_timers++ ||
430 time_before(timer->expires, base->next_timer))
431 base->next_timer = timer->expires;
435 * Check whether the other CPU is in dynticks mode and needs
436 * to be triggered to reevaluate the timer wheel.
437 * We are protected against the other CPU fiddling
438 * with the timer by holding the timer base lock. This also
439 * makes sure that a CPU on the way to stop its tick can not
440 * evaluate the timer wheel.
442 * Spare the IPI for deferrable timers on idle targets though.
443 * The next busy ticks will take care of it. Except full dynticks
444 * require special care against races with idle_cpu(), lets deal
447 if (base->nohz_active) {
448 if (!(timer->flags & TIMER_DEFERRABLE) ||
449 tick_nohz_full_cpu(base->cpu))
450 wake_up_nohz_cpu(base->cpu);
454 #ifdef CONFIG_TIMER_STATS
455 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
457 if (timer->start_site)
460 timer->start_site = addr;
461 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
462 timer->start_pid = current->pid;
465 static void timer_stats_account_timer(struct timer_list *timer)
470 * start_site can be concurrently reset by
471 * timer_stats_timer_clear_start_info()
473 site = READ_ONCE(timer->start_site);
477 timer_stats_update_stats(timer, timer->start_pid, site,
478 timer->function, timer->start_comm,
483 static void timer_stats_account_timer(struct timer_list *timer) {}
486 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
488 static struct debug_obj_descr timer_debug_descr;
490 static void *timer_debug_hint(void *addr)
492 return ((struct timer_list *) addr)->function;
496 * fixup_init is called when:
497 * - an active object is initialized
499 static int timer_fixup_init(void *addr, enum debug_obj_state state)
501 struct timer_list *timer = addr;
504 case ODEBUG_STATE_ACTIVE:
505 del_timer_sync(timer);
506 debug_object_init(timer, &timer_debug_descr);
513 /* Stub timer callback for improperly used timers. */
514 static void stub_timer(unsigned long data)
520 * fixup_activate is called when:
521 * - an active object is activated
522 * - an unknown object is activated (might be a statically initialized object)
524 static int timer_fixup_activate(void *addr, enum debug_obj_state state)
526 struct timer_list *timer = addr;
530 case ODEBUG_STATE_NOTAVAILABLE:
532 * This is not really a fixup. The timer was
533 * statically initialized. We just make sure that it
534 * is tracked in the object tracker.
536 if (timer->entry.pprev == NULL &&
537 timer->entry.next == TIMER_ENTRY_STATIC) {
538 debug_object_init(timer, &timer_debug_descr);
539 debug_object_activate(timer, &timer_debug_descr);
542 setup_timer(timer, stub_timer, 0);
547 case ODEBUG_STATE_ACTIVE:
556 * fixup_free is called when:
557 * - an active object is freed
559 static int timer_fixup_free(void *addr, enum debug_obj_state state)
561 struct timer_list *timer = addr;
564 case ODEBUG_STATE_ACTIVE:
565 del_timer_sync(timer);
566 debug_object_free(timer, &timer_debug_descr);
574 * fixup_assert_init is called when:
575 * - an untracked/uninit-ed object is found
577 static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
579 struct timer_list *timer = addr;
582 case ODEBUG_STATE_NOTAVAILABLE:
583 if (timer->entry.next == TIMER_ENTRY_STATIC) {
585 * This is not really a fixup. The timer was
586 * statically initialized. We just make sure that it
587 * is tracked in the object tracker.
589 debug_object_init(timer, &timer_debug_descr);
592 setup_timer(timer, stub_timer, 0);
600 static struct debug_obj_descr timer_debug_descr = {
601 .name = "timer_list",
602 .debug_hint = timer_debug_hint,
603 .fixup_init = timer_fixup_init,
604 .fixup_activate = timer_fixup_activate,
605 .fixup_free = timer_fixup_free,
606 .fixup_assert_init = timer_fixup_assert_init,
609 static inline void debug_timer_init(struct timer_list *timer)
611 debug_object_init(timer, &timer_debug_descr);
614 static inline void debug_timer_activate(struct timer_list *timer)
616 debug_object_activate(timer, &timer_debug_descr);
619 static inline void debug_timer_deactivate(struct timer_list *timer)
621 debug_object_deactivate(timer, &timer_debug_descr);
624 static inline void debug_timer_free(struct timer_list *timer)
626 debug_object_free(timer, &timer_debug_descr);
629 static inline void debug_timer_assert_init(struct timer_list *timer)
631 debug_object_assert_init(timer, &timer_debug_descr);
634 static void do_init_timer(struct timer_list *timer, unsigned int flags,
635 const char *name, struct lock_class_key *key);
637 void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
638 const char *name, struct lock_class_key *key)
640 debug_object_init_on_stack(timer, &timer_debug_descr);
641 do_init_timer(timer, flags, name, key);
643 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
645 void destroy_timer_on_stack(struct timer_list *timer)
647 debug_object_free(timer, &timer_debug_descr);
649 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
652 static inline void debug_timer_init(struct timer_list *timer) { }
653 static inline void debug_timer_activate(struct timer_list *timer) { }
654 static inline void debug_timer_deactivate(struct timer_list *timer) { }
655 static inline void debug_timer_assert_init(struct timer_list *timer) { }
658 static inline void debug_init(struct timer_list *timer)
660 debug_timer_init(timer);
661 trace_timer_init(timer);
665 debug_activate(struct timer_list *timer, unsigned long expires)
667 debug_timer_activate(timer);
668 trace_timer_start(timer, expires, timer->flags);
671 static inline void debug_deactivate(struct timer_list *timer)
673 debug_timer_deactivate(timer);
674 trace_timer_cancel(timer);
677 static inline void debug_assert_init(struct timer_list *timer)
679 debug_timer_assert_init(timer);
682 static void do_init_timer(struct timer_list *timer, unsigned int flags,
683 const char *name, struct lock_class_key *key)
685 timer->entry.pprev = NULL;
686 timer->flags = flags | raw_smp_processor_id();
688 #ifdef CONFIG_TIMER_STATS
689 timer->start_site = NULL;
690 timer->start_pid = -1;
691 memset(timer->start_comm, 0, TASK_COMM_LEN);
693 lockdep_init_map(&timer->lockdep_map, name, key, 0);
697 * init_timer_key - initialize a timer
698 * @timer: the timer to be initialized
699 * @flags: timer flags
700 * @name: name of the timer
701 * @key: lockdep class key of the fake lock used for tracking timer
702 * sync lock dependencies
704 * init_timer_key() must be done to a timer prior calling *any* of the
705 * other timer functions.
707 void init_timer_key(struct timer_list *timer, unsigned int flags,
708 const char *name, struct lock_class_key *key)
711 do_init_timer(timer, flags, name, key);
713 EXPORT_SYMBOL(init_timer_key);
715 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
717 struct hlist_node *entry = &timer->entry;
719 debug_deactivate(timer);
724 entry->next = LIST_POISON2;
728 detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
730 detach_timer(timer, true);
731 if (!(timer->flags & TIMER_DEFERRABLE))
732 base->active_timers--;
736 static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
739 if (!timer_pending(timer))
742 detach_timer(timer, clear_pending);
743 if (!(timer->flags & TIMER_DEFERRABLE)) {
744 base->active_timers--;
745 if (timer->expires == base->next_timer)
746 base->next_timer = base->timer_jiffies;
748 /* If this was the last timer, advance base->jiffies */
749 if (!--base->all_timers)
750 base->timer_jiffies = jiffies;
755 * We are using hashed locking: holding per_cpu(tvec_bases).lock
756 * means that all timers which are tied to this base via timer->base are
757 * locked, and the base itself is locked too.
759 * So __run_timers/migrate_timers can safely modify all timers which could
760 * be found on ->tvX lists.
762 * When the timer's base is locked and removed from the list, the
763 * TIMER_MIGRATING flag is set, FIXME
765 static struct tvec_base *lock_timer_base(struct timer_list *timer,
766 unsigned long *flags)
767 __acquires(timer->base->lock)
770 u32 tf = timer->flags;
771 struct tvec_base *base;
773 if (!(tf & TIMER_MIGRATING)) {
774 base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
775 spin_lock_irqsave(&base->lock, *flags);
776 if (timer->flags == tf)
778 spin_unlock_irqrestore(&base->lock, *flags);
783 #ifdef CONFIG_PREEMPT_RT_FULL
784 static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
785 struct tvec_base *old,
786 struct tvec_base *new)
789 * We cannot do the below because we might be preempted and
790 * then the preempter would see NULL and loop forever.
792 if (spin_trylock(&new->lock)) {
793 WRITE_ONCE(timer->flags,
794 (timer->flags & ~TIMER_BASEMASK) | new->cpu);
795 spin_unlock(&old->lock);
802 static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
803 struct tvec_base *old,
804 struct tvec_base *new)
806 /* See the comment in lock_timer_base() */
807 timer->flags |= TIMER_MIGRATING;
809 spin_unlock(&old->lock);
810 spin_lock(&new->lock);
811 WRITE_ONCE(timer->flags,
812 (timer->flags & ~TIMER_BASEMASK) | new->cpu);
818 __mod_timer(struct timer_list *timer, unsigned long expires,
819 bool pending_only, int pinned)
821 struct tvec_base *base, *new_base;
825 timer_stats_timer_set_start_info(timer);
826 BUG_ON(!timer->function);
828 base = lock_timer_base(timer, &flags);
830 ret = detach_if_pending(timer, base, false);
831 if (!ret && pending_only)
834 debug_activate(timer, expires);
836 new_base = get_target_base(base, pinned);
838 if (base != new_base) {
840 * We are trying to schedule the timer on the local CPU.
841 * However we can't change timer's base while it is running,
842 * otherwise del_timer_sync() can't detect that the timer's
843 * handler yet has not finished. This also guarantees that
844 * the timer is serialized wrt itself.
846 if (likely(base->running_timer != timer))
847 base = switch_timer_base(timer, base, new_base);
850 timer->expires = expires;
851 internal_add_timer(base, timer);
854 spin_unlock_irqrestore(&base->lock, flags);
860 * mod_timer_pending - modify a pending timer's timeout
861 * @timer: the pending timer to be modified
862 * @expires: new timeout in jiffies
864 * mod_timer_pending() is the same for pending timers as mod_timer(),
865 * but will not re-activate and modify already deleted timers.
867 * It is useful for unserialized use of timers.
869 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
871 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
873 EXPORT_SYMBOL(mod_timer_pending);
876 * Decide where to put the timer while taking the slack into account
879 * 1) calculate the maximum (absolute) time
880 * 2) calculate the highest bit where the expires and new max are different
881 * 3) use this bit to make a mask
882 * 4) use the bitmask to round down the maximum time, so that all last
886 unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
888 unsigned long expires_limit, mask;
891 if (timer->slack >= 0) {
892 expires_limit = expires + timer->slack;
894 long delta = expires - jiffies;
899 expires_limit = expires + delta / 256;
901 mask = expires ^ expires_limit;
907 mask = (1UL << bit) - 1;
909 expires_limit = expires_limit & ~(mask);
911 return expires_limit;
915 * mod_timer - modify a timer's timeout
916 * @timer: the timer to be modified
917 * @expires: new timeout in jiffies
919 * mod_timer() is a more efficient way to update the expire field of an
920 * active timer (if the timer is inactive it will be activated)
922 * mod_timer(timer, expires) is equivalent to:
924 * del_timer(timer); timer->expires = expires; add_timer(timer);
926 * Note that if there are multiple unserialized concurrent users of the
927 * same timer, then mod_timer() is the only safe way to modify the timeout,
928 * since add_timer() cannot modify an already running timer.
930 * The function returns whether it has modified a pending timer or not.
931 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
932 * active timer returns 1.)
934 int mod_timer(struct timer_list *timer, unsigned long expires)
936 expires = apply_slack(timer, expires);
939 * This is a common optimization triggered by the
940 * networking code - if the timer is re-modified
941 * to be the same thing then just return:
943 if (timer_pending(timer) && timer->expires == expires)
946 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
948 EXPORT_SYMBOL(mod_timer);
951 * mod_timer_pinned - modify a timer's timeout
952 * @timer: the timer to be modified
953 * @expires: new timeout in jiffies
955 * mod_timer_pinned() is a way to update the expire field of an
956 * active timer (if the timer is inactive it will be activated)
957 * and to ensure that the timer is scheduled on the current CPU.
959 * Note that this does not prevent the timer from being migrated
960 * when the current CPU goes offline. If this is a problem for
961 * you, use CPU-hotplug notifiers to handle it correctly, for
962 * example, cancelling the timer when the corresponding CPU goes
965 * mod_timer_pinned(timer, expires) is equivalent to:
967 * del_timer(timer); timer->expires = expires; add_timer(timer);
969 int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
971 if (timer->expires == expires && timer_pending(timer))
974 return __mod_timer(timer, expires, false, TIMER_PINNED);
976 EXPORT_SYMBOL(mod_timer_pinned);
979 * add_timer - start a timer
980 * @timer: the timer to be added
982 * The kernel will do a ->function(->data) callback from the
983 * timer interrupt at the ->expires point in the future. The
984 * current time is 'jiffies'.
986 * The timer's ->expires, ->function (and if the handler uses it, ->data)
987 * fields must be set prior calling this function.
989 * Timers with an ->expires field in the past will be executed in the next
992 void add_timer(struct timer_list *timer)
994 BUG_ON(timer_pending(timer));
995 mod_timer(timer, timer->expires);
997 EXPORT_SYMBOL(add_timer);
1000 * add_timer_on - start a timer on a particular CPU
1001 * @timer: the timer to be added
1002 * @cpu: the CPU to start it on
1004 * This is not very scalable on SMP. Double adds are not possible.
1006 void add_timer_on(struct timer_list *timer, int cpu)
1008 struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
1009 struct tvec_base *base;
1010 unsigned long flags;
1012 timer_stats_timer_set_start_info(timer);
1013 BUG_ON(timer_pending(timer) || !timer->function);
1016 * If @timer was on a different CPU, it should be migrated with the
1017 * old base locked to prevent other operations proceeding with the
1018 * wrong base locked. See lock_timer_base().
1020 base = lock_timer_base(timer, &flags);
1021 if (base != new_base) {
1022 timer->flags |= TIMER_MIGRATING;
1024 spin_unlock(&base->lock);
1026 spin_lock(&base->lock);
1027 WRITE_ONCE(timer->flags,
1028 (timer->flags & ~TIMER_BASEMASK) | cpu);
1031 debug_activate(timer, timer->expires);
1032 internal_add_timer(base, timer);
1033 spin_unlock_irqrestore(&base->lock, flags);
1035 EXPORT_SYMBOL_GPL(add_timer_on);
1037 #ifdef CONFIG_PREEMPT_RT_FULL
1039 * Wait for a running timer
1041 static void wait_for_running_timer(struct timer_list *timer)
1043 struct tvec_base *base;
1044 u32 tf = timer->flags;
1046 if (tf & TIMER_MIGRATING)
1049 base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
1050 wait_event(base->wait_for_running_timer,
1051 base->running_timer != timer);
1054 # define wakeup_timer_waiters(b) wake_up_all(&(b)->wait_for_running_timer)
1056 static inline void wait_for_running_timer(struct timer_list *timer)
1061 # define wakeup_timer_waiters(b) do { } while (0)
1065 * del_timer - deactive a timer.
1066 * @timer: the timer to be deactivated
1068 * del_timer() deactivates a timer - this works on both active and inactive
1071 * The function returns whether it has deactivated a pending timer or not.
1072 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1073 * active timer returns 1.)
1075 int del_timer(struct timer_list *timer)
1077 struct tvec_base *base;
1078 unsigned long flags;
1081 debug_assert_init(timer);
1083 timer_stats_timer_clear_start_info(timer);
1084 if (timer_pending(timer)) {
1085 base = lock_timer_base(timer, &flags);
1086 ret = detach_if_pending(timer, base, true);
1087 spin_unlock_irqrestore(&base->lock, flags);
1092 EXPORT_SYMBOL(del_timer);
1095 * try_to_del_timer_sync - Try to deactivate a timer
1096 * @timer: timer do del
1098 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1099 * exit the timer is not queued and the handler is not running on any CPU.
1101 int try_to_del_timer_sync(struct timer_list *timer)
1103 struct tvec_base *base;
1104 unsigned long flags;
1107 debug_assert_init(timer);
1109 base = lock_timer_base(timer, &flags);
1111 if (base->running_timer != timer) {
1112 timer_stats_timer_clear_start_info(timer);
1113 ret = detach_if_pending(timer, base, true);
1115 spin_unlock_irqrestore(&base->lock, flags);
1119 EXPORT_SYMBOL(try_to_del_timer_sync);
1121 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
1123 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1124 * @timer: the timer to be deactivated
1126 * This function only differs from del_timer() on SMP: besides deactivating
1127 * the timer it also makes sure the handler has finished executing on other
1130 * Synchronization rules: Callers must prevent restarting of the timer,
1131 * otherwise this function is meaningless. It must not be called from
1132 * interrupt contexts unless the timer is an irqsafe one. The caller must
1133 * not hold locks which would prevent completion of the timer's
1134 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1135 * timer is not queued and the handler is not running on any CPU.
1137 * Note: For !irqsafe timers, you must not hold locks that are held in
1138 * interrupt context while calling this function. Even if the lock has
1139 * nothing to do with the timer in question. Here's why:
1145 * base->running_timer = mytimer;
1146 * spin_lock_irq(somelock);
1148 * spin_lock(somelock);
1149 * del_timer_sync(mytimer);
1150 * while (base->running_timer == mytimer);
1152 * Now del_timer_sync() will never return and never release somelock.
1153 * The interrupt on the other CPU is waiting to grab somelock but
1154 * it has interrupted the softirq that CPU0 is waiting to finish.
1156 * The function returns whether it has deactivated a pending timer or not.
1158 int del_timer_sync(struct timer_list *timer)
1160 #ifdef CONFIG_LOCKDEP
1161 unsigned long flags;
1164 * If lockdep gives a backtrace here, please reference
1165 * the synchronization rules above.
1167 local_irq_save(flags);
1168 lock_map_acquire(&timer->lockdep_map);
1169 lock_map_release(&timer->lockdep_map);
1170 local_irq_restore(flags);
1173 * don't use it in hardirq context, because it
1174 * could lead to deadlock.
1176 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1178 int ret = try_to_del_timer_sync(timer);
1181 wait_for_running_timer(timer);
1184 EXPORT_SYMBOL(del_timer_sync);
1187 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1189 /* cascade all the timers from tv up one level */
1190 struct timer_list *timer;
1191 struct hlist_node *tmp;
1192 struct hlist_head tv_list;
1194 hlist_move_list(tv->vec + index, &tv_list);
1197 * We are removing _all_ timers from the list, so we
1198 * don't have to detach them individually.
1200 hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1201 /* No accounting, while moving them */
1202 __internal_add_timer(base, timer);
1208 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1211 int count = preempt_count();
1213 #ifdef CONFIG_LOCKDEP
1215 * It is permissible to free the timer from inside the
1216 * function that is called from it, this we need to take into
1217 * account for lockdep too. To avoid bogus "held lock freed"
1218 * warnings as well as problems when looking into
1219 * timer->lockdep_map, make a copy and use that here.
1221 struct lockdep_map lockdep_map;
1223 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1226 * Couple the lock chain with the lock chain at
1227 * del_timer_sync() by acquiring the lock_map around the fn()
1228 * call here and in del_timer_sync().
1230 lock_map_acquire(&lockdep_map);
1232 trace_timer_expire_entry(timer);
1234 trace_timer_expire_exit(timer);
1236 lock_map_release(&lockdep_map);
1238 if (count != preempt_count()) {
1239 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1240 fn, count, preempt_count());
1242 * Restore the preempt count. That gives us a decent
1243 * chance to survive and extract information. If the
1244 * callback kept a lock held, bad luck, but not worse
1245 * than the BUG() we had.
1247 preempt_count_set(count);
1251 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1254 * __run_timers - run all expired timers (if any) on this CPU.
1255 * @base: the timer vector to be processed.
1257 * This function cascades all vectors and executes all expired timer
1260 static inline void __run_timers(struct tvec_base *base)
1262 struct timer_list *timer;
1264 spin_lock_irq(&base->lock);
1266 while (time_after_eq(jiffies, base->timer_jiffies)) {
1267 struct hlist_head work_list;
1268 struct hlist_head *head = &work_list;
1271 if (!base->all_timers) {
1272 base->timer_jiffies = jiffies;
1276 index = base->timer_jiffies & TVR_MASK;
1282 (!cascade(base, &base->tv2, INDEX(0))) &&
1283 (!cascade(base, &base->tv3, INDEX(1))) &&
1284 !cascade(base, &base->tv4, INDEX(2)))
1285 cascade(base, &base->tv5, INDEX(3));
1286 ++base->timer_jiffies;
1287 hlist_move_list(base->tv1.vec + index, head);
1288 while (!hlist_empty(head)) {
1289 void (*fn)(unsigned long);
1293 timer = hlist_entry(head->first, struct timer_list, entry);
1294 fn = timer->function;
1296 irqsafe = timer->flags & TIMER_IRQSAFE;
1298 timer_stats_account_timer(timer);
1300 base->running_timer = timer;
1301 detach_expired_timer(timer, base);
1304 spin_unlock(&base->lock);
1305 call_timer_fn(timer, fn, data);
1306 base->running_timer = NULL;
1307 spin_lock(&base->lock);
1309 spin_unlock_irq(&base->lock);
1310 call_timer_fn(timer, fn, data);
1311 base->running_timer = NULL;
1312 spin_lock_irq(&base->lock);
1316 spin_unlock_irq(&base->lock);
1317 wakeup_timer_waiters(base);
1320 #ifdef CONFIG_NO_HZ_COMMON
1322 * Find out when the next timer event is due to happen. This
1323 * is used on S/390 to stop all activity when a CPU is idle.
1324 * This function needs to be called with interrupts disabled.
1326 static unsigned long __next_timer_interrupt(struct tvec_base *base)
1328 unsigned long timer_jiffies = base->timer_jiffies;
1329 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1330 int index, slot, array, found = 0;
1331 struct timer_list *nte;
1332 struct tvec *varray[4];
1334 /* Look for timer events in tv1. */
1335 index = slot = timer_jiffies & TVR_MASK;
1337 hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
1338 if (nte->flags & TIMER_DEFERRABLE)
1342 expires = nte->expires;
1343 /* Look at the cascade bucket(s)? */
1344 if (!index || slot < index)
1348 slot = (slot + 1) & TVR_MASK;
1349 } while (slot != index);
1352 /* Calculate the next cascade event */
1354 timer_jiffies += TVR_SIZE - index;
1355 timer_jiffies >>= TVR_BITS;
1357 /* Check tv2-tv5. */
1358 varray[0] = &base->tv2;
1359 varray[1] = &base->tv3;
1360 varray[2] = &base->tv4;
1361 varray[3] = &base->tv5;
1363 for (array = 0; array < 4; array++) {
1364 struct tvec *varp = varray[array];
1366 index = slot = timer_jiffies & TVN_MASK;
1368 hlist_for_each_entry(nte, varp->vec + slot, entry) {
1369 if (nte->flags & TIMER_DEFERRABLE)
1373 if (time_before(nte->expires, expires))
1374 expires = nte->expires;
1377 * Do we still search for the first timer or are
1378 * we looking up the cascade buckets ?
1381 /* Look at the cascade bucket(s)? */
1382 if (!index || slot < index)
1386 slot = (slot + 1) & TVN_MASK;
1387 } while (slot != index);
1390 timer_jiffies += TVN_SIZE - index;
1391 timer_jiffies >>= TVN_BITS;
1397 * Check, if the next hrtimer event is before the next timer wheel
1400 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1402 u64 nextevt = hrtimer_get_next_event();
1405 * If high resolution timers are enabled
1406 * hrtimer_get_next_event() returns KTIME_MAX.
1408 if (expires <= nextevt)
1412 * If the next timer is already expired, return the tick base
1413 * time so the tick is fired immediately.
1415 if (nextevt <= basem)
1419 * Round up to the next jiffie. High resolution timers are
1420 * off, so the hrtimers are expired in the tick and we need to
1421 * make sure that this tick really expires the timer to avoid
1422 * a ping pong of the nohz stop code.
1424 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1426 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1430 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1431 * @basej: base time jiffies
1432 * @basem: base time clock monotonic
1434 * Returns the tick aligned clock monotonic time of the next pending
1435 * timer or KTIME_MAX if no timer is pending.
1437 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1439 struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1440 u64 expires = KTIME_MAX;
1441 unsigned long nextevt;
1444 * Pretend that there is no timer pending if the cpu is offline.
1445 * Possible pending timers will be migrated later to an active cpu.
1447 if (cpu_is_offline(smp_processor_id()))
1450 #ifdef CONFIG_PREEMPT_RT_FULL
1452 * On PREEMPT_RT we cannot sleep here. As a result we can't take
1453 * the base lock to check when the next timer is pending and so
1454 * we assume the next jiffy.
1456 if (!spin_do_trylock(&base->lock))
1457 return basem + TICK_NSEC;
1459 spin_lock(&base->lock);
1461 if (base->active_timers) {
1462 if (time_before_eq(base->next_timer, base->timer_jiffies))
1463 base->next_timer = __next_timer_interrupt(base);
1464 nextevt = base->next_timer;
1465 if (time_before_eq(nextevt, basej))
1468 expires = basem + (nextevt - basej) * TICK_NSEC;
1470 #ifdef CONFIG_PREEMPT_RT_FULL
1471 rt_spin_unlock(&base->lock);
1473 spin_unlock(&base->lock);
1476 return cmp_next_hrtimer_event(basem, expires);
1481 * Called from the timer interrupt handler to charge one tick to the current
1482 * process. user_tick is 1 if the tick is user time, 0 for system.
1484 void update_process_times(int user_tick)
1486 struct task_struct *p = current;
1488 /* Note: this timer irq context must be accounted for as well. */
1489 account_process_tick(p, user_tick);
1492 rcu_check_callbacks(user_tick);
1493 #if defined(CONFIG_IRQ_WORK)
1497 run_posix_cpu_timers(p);
1501 * This function runs timers and the timer-tq in bottom half context.
1503 static void run_timer_softirq(struct softirq_action *h)
1505 struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1507 irq_work_tick_soft();
1509 if (time_after_eq(jiffies, base->timer_jiffies))
1514 * Called by the local, per-CPU timer interrupt on SMP.
1516 void run_local_timers(void)
1518 struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1520 hrtimer_run_queues();
1522 * We can access this lockless as we are in the timer
1523 * interrupt. If there are no timers queued, nothing to do in
1524 * the timer softirq.
1526 #ifdef CONFIG_PREEMPT_RT_FULL
1527 if (irq_work_needs_cpu()) {
1528 raise_softirq(TIMER_SOFTIRQ);
1531 if (!spin_do_trylock(&base->lock)) {
1532 raise_softirq(TIMER_SOFTIRQ);
1536 if (!base->active_timers)
1539 /* Check whether the next pending timer has expired */
1540 if (time_before_eq(base->next_timer, jiffies))
1541 raise_softirq(TIMER_SOFTIRQ);
1543 #ifdef CONFIG_PREEMPT_RT_FULL
1544 rt_spin_unlock(&base->lock);
1546 /* The ; ensures that gcc won't complain in the !RT case */
1550 #ifdef __ARCH_WANT_SYS_ALARM
1553 * For backwards compatibility? This can be done in libc so Alpha
1554 * and all newer ports shouldn't need it.
1556 SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1558 return alarm_setitimer(seconds);
1563 static void process_timeout(unsigned long __data)
1565 wake_up_process((struct task_struct *)__data);
1569 * schedule_timeout - sleep until timeout
1570 * @timeout: timeout value in jiffies
1572 * Make the current task sleep until @timeout jiffies have
1573 * elapsed. The routine will return immediately unless
1574 * the current task state has been set (see set_current_state()).
1576 * You can set the task state as follows -
1578 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1579 * pass before the routine returns. The routine will return 0
1581 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1582 * delivered to the current task. In this case the remaining time
1583 * in jiffies will be returned, or 0 if the timer expired in time
1585 * The current task state is guaranteed to be TASK_RUNNING when this
1588 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1589 * the CPU away without a bound on the timeout. In this case the return
1590 * value will be %MAX_SCHEDULE_TIMEOUT.
1592 * In all cases the return value is guaranteed to be non-negative.
1594 signed long __sched schedule_timeout(signed long timeout)
1596 struct timer_list timer;
1597 unsigned long expire;
1601 case MAX_SCHEDULE_TIMEOUT:
1603 * These two special cases are useful to be comfortable
1604 * in the caller. Nothing more. We could take
1605 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1606 * but I' d like to return a valid offset (>=0) to allow
1607 * the caller to do everything it want with the retval.
1613 * Another bit of PARANOID. Note that the retval will be
1614 * 0 since no piece of kernel is supposed to do a check
1615 * for a negative retval of schedule_timeout() (since it
1616 * should never happens anyway). You just have the printk()
1617 * that will tell you if something is gone wrong and where.
1620 printk(KERN_ERR "schedule_timeout: wrong timeout "
1621 "value %lx\n", timeout);
1623 current->state = TASK_RUNNING;
1628 expire = timeout + jiffies;
1630 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1631 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1633 del_singleshot_timer_sync(&timer);
1635 /* Remove the timer from the object tracker */
1636 destroy_timer_on_stack(&timer);
1638 timeout = expire - jiffies;
1641 return timeout < 0 ? 0 : timeout;
1643 EXPORT_SYMBOL(schedule_timeout);
1646 * We can use __set_current_state() here because schedule_timeout() calls
1647 * schedule() unconditionally.
1649 signed long __sched schedule_timeout_interruptible(signed long timeout)
1651 __set_current_state(TASK_INTERRUPTIBLE);
1652 return schedule_timeout(timeout);
1654 EXPORT_SYMBOL(schedule_timeout_interruptible);
1656 signed long __sched schedule_timeout_killable(signed long timeout)
1658 __set_current_state(TASK_KILLABLE);
1659 return schedule_timeout(timeout);
1661 EXPORT_SYMBOL(schedule_timeout_killable);
1663 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1665 __set_current_state(TASK_UNINTERRUPTIBLE);
1666 return schedule_timeout(timeout);
1668 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1670 #ifdef CONFIG_HOTPLUG_CPU
1671 static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
1673 struct timer_list *timer;
1674 int cpu = new_base->cpu;
1676 while (!hlist_empty(head)) {
1677 timer = hlist_entry(head->first, struct timer_list, entry);
1678 /* We ignore the accounting on the dying cpu */
1679 detach_timer(timer, false);
1680 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1681 internal_add_timer(new_base, timer);
1685 static void migrate_timers(int cpu)
1687 struct tvec_base *old_base;
1688 struct tvec_base *new_base;
1691 BUG_ON(cpu_online(cpu));
1692 old_base = per_cpu_ptr(&tvec_bases, cpu);
1693 new_base = get_local_ptr(&tvec_bases);
1695 * The caller is globally serialized and nobody else
1696 * takes two locks at once, deadlock is not possible.
1698 spin_lock_irq(&new_base->lock);
1699 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1701 BUG_ON(old_base->running_timer);
1703 for (i = 0; i < TVR_SIZE; i++)
1704 migrate_timer_list(new_base, old_base->tv1.vec + i);
1705 for (i = 0; i < TVN_SIZE; i++) {
1706 migrate_timer_list(new_base, old_base->tv2.vec + i);
1707 migrate_timer_list(new_base, old_base->tv3.vec + i);
1708 migrate_timer_list(new_base, old_base->tv4.vec + i);
1709 migrate_timer_list(new_base, old_base->tv5.vec + i);
1712 old_base->active_timers = 0;
1713 old_base->all_timers = 0;
1715 spin_unlock(&old_base->lock);
1716 spin_unlock_irq(&new_base->lock);
1717 put_local_ptr(&tvec_bases);
1720 static int timer_cpu_notify(struct notifier_block *self,
1721 unsigned long action, void *hcpu)
1725 case CPU_DEAD_FROZEN:
1726 migrate_timers((long)hcpu);
1735 static inline void timer_register_cpu_notifier(void)
1737 cpu_notifier(timer_cpu_notify, 0);
1740 static inline void timer_register_cpu_notifier(void) { }
1741 #endif /* CONFIG_HOTPLUG_CPU */
1743 static void __init init_timer_cpu(int cpu)
1745 struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
1748 spin_lock_init(&base->lock);
1749 #ifdef CONFIG_PREEMPT_RT_FULL
1750 init_waitqueue_head(&base->wait_for_running_timer);
1753 base->timer_jiffies = jiffies;
1754 base->next_timer = base->timer_jiffies;
1757 static void __init init_timer_cpus(void)
1761 for_each_possible_cpu(cpu)
1762 init_timer_cpu(cpu);
1765 void __init init_timers(void)
1769 timer_register_cpu_notifier();
1770 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1774 * msleep - sleep safely even with waitqueue interruptions
1775 * @msecs: Time in milliseconds to sleep for
1777 void msleep(unsigned int msecs)
1779 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1782 timeout = schedule_timeout_uninterruptible(timeout);
1785 EXPORT_SYMBOL(msleep);
1788 * msleep_interruptible - sleep waiting for signals
1789 * @msecs: Time in milliseconds to sleep for
1791 unsigned long msleep_interruptible(unsigned int msecs)
1793 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1795 while (timeout && !signal_pending(current))
1796 timeout = schedule_timeout_interruptible(timeout);
1797 return jiffies_to_msecs(timeout);
1800 EXPORT_SYMBOL(msleep_interruptible);
1802 static void __sched do_usleep_range(unsigned long min, unsigned long max)
1805 unsigned long delta;
1807 kmin = ktime_set(0, min * NSEC_PER_USEC);
1808 delta = (max - min) * NSEC_PER_USEC;
1809 schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1813 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1814 * @min: Minimum time in usecs to sleep
1815 * @max: Maximum time in usecs to sleep
1817 void __sched usleep_range(unsigned long min, unsigned long max)
1819 __set_current_state(TASK_UNINTERRUPTIBLE);
1820 do_usleep_range(min, max);
1822 EXPORT_SYMBOL(usleep_range);