4 * Kernel internal timers
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/export.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/irq_work.h>
41 #include <linux/sched.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/slab.h>
44 #include <linux/compat.h>
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include <asm/div64.h>
49 #include <asm/timex.h>
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/timer.h>
55 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
57 EXPORT_SYMBOL(jiffies_64);
60 * per-CPU timer vector definitions:
62 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
63 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
64 #define TVN_SIZE (1 << TVN_BITS)
65 #define TVR_SIZE (1 << TVR_BITS)
66 #define TVN_MASK (TVN_SIZE - 1)
67 #define TVR_MASK (TVR_SIZE - 1)
68 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
71 struct list_head vec[TVN_SIZE];
75 struct list_head vec[TVR_SIZE];
80 struct timer_list *running_timer;
81 #ifdef CONFIG_PREEMPT_RT_FULL
82 wait_queue_head_t wait_for_running_timer;
84 unsigned long timer_jiffies;
85 unsigned long next_timer;
86 unsigned long active_timers;
87 unsigned long all_timers;
94 } ____cacheline_aligned;
97 * __TIMER_INITIALIZER() needs to set ->base to a valid pointer (because we've
98 * made NULL special, hint: lock_timer_base()) and we cannot get a compile time
99 * pointer to per-cpu entries because we don't know where we'll map the section,
100 * even for the boot cpu.
102 * And so we use boot_tvec_bases for boot CPU and per-cpu __tvec_bases for the
105 struct tvec_base boot_tvec_bases;
106 EXPORT_SYMBOL(boot_tvec_bases);
108 static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
110 /* Functions below help us manage 'deferrable' flag */
111 static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
113 return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
116 static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
118 return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
121 static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
123 return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
127 timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
129 unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
131 timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
134 static unsigned long round_jiffies_common(unsigned long j, int cpu,
138 unsigned long original = j;
141 * We don't want all cpus firing their timers at once hitting the
142 * same lock or cachelines, so we skew each extra cpu with an extra
143 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
145 * The skew is done by adding 3*cpunr, then round, then subtract this
146 * extra offset again.
153 * If the target jiffie is just after a whole second (which can happen
154 * due to delays of the timer irq, long irq off times etc etc) then
155 * we should round down to the whole second, not up. Use 1/4th second
156 * as cutoff for this rounding as an extreme upper bound for this.
157 * But never round down if @force_up is set.
159 if (rem < HZ/4 && !force_up) /* round down */
164 /* now that we have rounded, subtract the extra skew again */
168 * Make sure j is still in the future. Otherwise return the
171 return time_is_after_jiffies(j) ? j : original;
175 * __round_jiffies - function to round jiffies to a full second
176 * @j: the time in (absolute) jiffies that should be rounded
177 * @cpu: the processor number on which the timeout will happen
179 * __round_jiffies() rounds an absolute time in the future (in jiffies)
180 * up or down to (approximately) full seconds. This is useful for timers
181 * for which the exact time they fire does not matter too much, as long as
182 * they fire approximately every X seconds.
184 * By rounding these timers to whole seconds, all such timers will fire
185 * at the same time, rather than at various times spread out. The goal
186 * of this is to have the CPU wake up less, which saves power.
188 * The exact rounding is skewed for each processor to avoid all
189 * processors firing at the exact same time, which could lead
190 * to lock contention or spurious cache line bouncing.
192 * The return value is the rounded version of the @j parameter.
194 unsigned long __round_jiffies(unsigned long j, int cpu)
196 return round_jiffies_common(j, cpu, false);
198 EXPORT_SYMBOL_GPL(__round_jiffies);
201 * __round_jiffies_relative - function to round jiffies to a full second
202 * @j: the time in (relative) jiffies that should be rounded
203 * @cpu: the processor number on which the timeout will happen
205 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
206 * up or down to (approximately) full seconds. This is useful for timers
207 * for which the exact time they fire does not matter too much, as long as
208 * they fire approximately every X seconds.
210 * By rounding these timers to whole seconds, all such timers will fire
211 * at the same time, rather than at various times spread out. The goal
212 * of this is to have the CPU wake up less, which saves power.
214 * The exact rounding is skewed for each processor to avoid all
215 * processors firing at the exact same time, which could lead
216 * to lock contention or spurious cache line bouncing.
218 * The return value is the rounded version of the @j parameter.
220 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
222 unsigned long j0 = jiffies;
224 /* Use j0 because jiffies might change while we run */
225 return round_jiffies_common(j + j0, cpu, false) - j0;
227 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
230 * round_jiffies - function to round jiffies to a full second
231 * @j: the time in (absolute) jiffies that should be rounded
233 * round_jiffies() rounds an absolute time in the future (in jiffies)
234 * up or down to (approximately) full seconds. This is useful for timers
235 * for which the exact time they fire does not matter too much, as long as
236 * they fire approximately every X seconds.
238 * By rounding these timers to whole seconds, all such timers will fire
239 * at the same time, rather than at various times spread out. The goal
240 * of this is to have the CPU wake up less, which saves power.
242 * The return value is the rounded version of the @j parameter.
244 unsigned long round_jiffies(unsigned long j)
246 return round_jiffies_common(j, raw_smp_processor_id(), false);
248 EXPORT_SYMBOL_GPL(round_jiffies);
251 * round_jiffies_relative - function to round jiffies to a full second
252 * @j: the time in (relative) jiffies that should be rounded
254 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
255 * up or down to (approximately) full seconds. This is useful for timers
256 * for which the exact time they fire does not matter too much, as long as
257 * they fire approximately every X seconds.
259 * By rounding these timers to whole seconds, all such timers will fire
260 * at the same time, rather than at various times spread out. The goal
261 * of this is to have the CPU wake up less, which saves power.
263 * The return value is the rounded version of the @j parameter.
265 unsigned long round_jiffies_relative(unsigned long j)
267 return __round_jiffies_relative(j, raw_smp_processor_id());
269 EXPORT_SYMBOL_GPL(round_jiffies_relative);
272 * __round_jiffies_up - function to round jiffies up to a full second
273 * @j: the time in (absolute) jiffies that should be rounded
274 * @cpu: the processor number on which the timeout will happen
276 * This is the same as __round_jiffies() except that it will never
277 * round down. This is useful for timeouts for which the exact time
278 * of firing does not matter too much, as long as they don't fire too
281 unsigned long __round_jiffies_up(unsigned long j, int cpu)
283 return round_jiffies_common(j, cpu, true);
285 EXPORT_SYMBOL_GPL(__round_jiffies_up);
288 * __round_jiffies_up_relative - function to round jiffies up to a full second
289 * @j: the time in (relative) jiffies that should be rounded
290 * @cpu: the processor number on which the timeout will happen
292 * This is the same as __round_jiffies_relative() except that it will never
293 * round down. This is useful for timeouts for which the exact time
294 * of firing does not matter too much, as long as they don't fire too
297 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
299 unsigned long j0 = jiffies;
301 /* Use j0 because jiffies might change while we run */
302 return round_jiffies_common(j + j0, cpu, true) - j0;
304 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
307 * round_jiffies_up - function to round jiffies up to a full second
308 * @j: the time in (absolute) jiffies that should be rounded
310 * This is the same as round_jiffies() except that it will never
311 * round down. This is useful for timeouts for which the exact time
312 * of firing does not matter too much, as long as they don't fire too
315 unsigned long round_jiffies_up(unsigned long j)
317 return round_jiffies_common(j, raw_smp_processor_id(), true);
319 EXPORT_SYMBOL_GPL(round_jiffies_up);
322 * round_jiffies_up_relative - function to round jiffies up to a full second
323 * @j: the time in (relative) jiffies that should be rounded
325 * This is the same as round_jiffies_relative() except that it will never
326 * round down. This is useful for timeouts for which the exact time
327 * of firing does not matter too much, as long as they don't fire too
330 unsigned long round_jiffies_up_relative(unsigned long j)
332 return __round_jiffies_up_relative(j, raw_smp_processor_id());
334 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
337 * set_timer_slack - set the allowed slack for a timer
338 * @timer: the timer to be modified
339 * @slack_hz: the amount of time (in jiffies) allowed for rounding
341 * Set the amount of time, in jiffies, that a certain timer has
342 * in terms of slack. By setting this value, the timer subsystem
343 * will schedule the actual timer somewhere between
344 * the time mod_timer() asks for, and that time plus the slack.
346 * By setting the slack to -1, a percentage of the delay is used
349 void set_timer_slack(struct timer_list *timer, int slack_hz)
351 timer->slack = slack_hz;
353 EXPORT_SYMBOL_GPL(set_timer_slack);
356 * If the list is empty, catch up ->timer_jiffies to the current time.
357 * The caller must hold the tvec_base lock. Returns true if the list
358 * was empty and therefore ->timer_jiffies was updated.
360 static bool catchup_timer_jiffies(struct tvec_base *base)
362 if (!base->all_timers) {
363 base->timer_jiffies = jiffies;
370 __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
372 unsigned long expires = timer->expires;
373 unsigned long idx = expires - base->timer_jiffies;
374 struct list_head *vec;
376 if (idx < TVR_SIZE) {
377 int i = expires & TVR_MASK;
378 vec = base->tv1.vec + i;
379 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
380 int i = (expires >> TVR_BITS) & TVN_MASK;
381 vec = base->tv2.vec + i;
382 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
383 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
384 vec = base->tv3.vec + i;
385 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
386 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
387 vec = base->tv4.vec + i;
388 } else if ((signed long) idx < 0) {
390 * Can happen if you add a timer with expires == jiffies,
391 * or you set a timer to go off in the past
393 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
396 /* If the timeout is larger than MAX_TVAL (on 64-bit
397 * architectures or with CONFIG_BASE_SMALL=1) then we
398 * use the maximum timeout.
400 if (idx > MAX_TVAL) {
402 expires = idx + base->timer_jiffies;
404 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
405 vec = base->tv5.vec + i;
410 list_add_tail(&timer->entry, vec);
413 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
415 (void)catchup_timer_jiffies(base);
416 __internal_add_timer(base, timer);
418 * Update base->active_timers and base->next_timer
420 if (!tbase_get_deferrable(timer->base)) {
421 if (!base->active_timers++ ||
422 time_before(timer->expires, base->next_timer))
423 base->next_timer = timer->expires;
428 * Check whether the other CPU is in dynticks mode and needs
429 * to be triggered to reevaluate the timer wheel.
430 * We are protected against the other CPU fiddling
431 * with the timer by holding the timer base lock. This also
432 * makes sure that a CPU on the way to stop its tick can not
433 * evaluate the timer wheel.
435 * Spare the IPI for deferrable timers on idle targets though.
436 * The next busy ticks will take care of it. Except full dynticks
437 * require special care against races with idle_cpu(), lets deal
440 if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu))
441 wake_up_nohz_cpu(base->cpu);
444 #ifdef CONFIG_TIMER_STATS
445 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
447 if (timer->start_site)
450 timer->start_site = addr;
451 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
452 timer->start_pid = current->pid;
455 static void timer_stats_account_timer(struct timer_list *timer)
457 unsigned int flag = 0;
459 if (likely(!timer->start_site))
461 if (unlikely(tbase_get_deferrable(timer->base)))
462 flag |= TIMER_STATS_FLAG_DEFERRABLE;
464 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
465 timer->function, timer->start_comm, flag);
469 static void timer_stats_account_timer(struct timer_list *timer) {}
472 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
474 static struct debug_obj_descr timer_debug_descr;
476 static void *timer_debug_hint(void *addr)
478 return ((struct timer_list *) addr)->function;
482 * fixup_init is called when:
483 * - an active object is initialized
485 static int timer_fixup_init(void *addr, enum debug_obj_state state)
487 struct timer_list *timer = addr;
490 case ODEBUG_STATE_ACTIVE:
491 del_timer_sync(timer);
492 debug_object_init(timer, &timer_debug_descr);
499 /* Stub timer callback for improperly used timers. */
500 static void stub_timer(unsigned long data)
506 * fixup_activate is called when:
507 * - an active object is activated
508 * - an unknown object is activated (might be a statically initialized object)
510 static int timer_fixup_activate(void *addr, enum debug_obj_state state)
512 struct timer_list *timer = addr;
516 case ODEBUG_STATE_NOTAVAILABLE:
518 * This is not really a fixup. The timer was
519 * statically initialized. We just make sure that it
520 * is tracked in the object tracker.
522 if (timer->entry.next == NULL &&
523 timer->entry.prev == TIMER_ENTRY_STATIC) {
524 debug_object_init(timer, &timer_debug_descr);
525 debug_object_activate(timer, &timer_debug_descr);
528 setup_timer(timer, stub_timer, 0);
533 case ODEBUG_STATE_ACTIVE:
542 * fixup_free is called when:
543 * - an active object is freed
545 static int timer_fixup_free(void *addr, enum debug_obj_state state)
547 struct timer_list *timer = addr;
550 case ODEBUG_STATE_ACTIVE:
551 del_timer_sync(timer);
552 debug_object_free(timer, &timer_debug_descr);
560 * fixup_assert_init is called when:
561 * - an untracked/uninit-ed object is found
563 static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
565 struct timer_list *timer = addr;
568 case ODEBUG_STATE_NOTAVAILABLE:
569 if (timer->entry.prev == TIMER_ENTRY_STATIC) {
571 * This is not really a fixup. The timer was
572 * statically initialized. We just make sure that it
573 * is tracked in the object tracker.
575 debug_object_init(timer, &timer_debug_descr);
578 setup_timer(timer, stub_timer, 0);
586 static struct debug_obj_descr timer_debug_descr = {
587 .name = "timer_list",
588 .debug_hint = timer_debug_hint,
589 .fixup_init = timer_fixup_init,
590 .fixup_activate = timer_fixup_activate,
591 .fixup_free = timer_fixup_free,
592 .fixup_assert_init = timer_fixup_assert_init,
595 static inline void debug_timer_init(struct timer_list *timer)
597 debug_object_init(timer, &timer_debug_descr);
600 static inline void debug_timer_activate(struct timer_list *timer)
602 debug_object_activate(timer, &timer_debug_descr);
605 static inline void debug_timer_deactivate(struct timer_list *timer)
607 debug_object_deactivate(timer, &timer_debug_descr);
610 static inline void debug_timer_free(struct timer_list *timer)
612 debug_object_free(timer, &timer_debug_descr);
615 static inline void debug_timer_assert_init(struct timer_list *timer)
617 debug_object_assert_init(timer, &timer_debug_descr);
620 static void do_init_timer(struct timer_list *timer, unsigned int flags,
621 const char *name, struct lock_class_key *key);
623 void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
624 const char *name, struct lock_class_key *key)
626 debug_object_init_on_stack(timer, &timer_debug_descr);
627 do_init_timer(timer, flags, name, key);
629 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
631 void destroy_timer_on_stack(struct timer_list *timer)
633 debug_object_free(timer, &timer_debug_descr);
635 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
638 static inline void debug_timer_init(struct timer_list *timer) { }
639 static inline void debug_timer_activate(struct timer_list *timer) { }
640 static inline void debug_timer_deactivate(struct timer_list *timer) { }
641 static inline void debug_timer_assert_init(struct timer_list *timer) { }
644 static inline void debug_init(struct timer_list *timer)
646 debug_timer_init(timer);
647 trace_timer_init(timer);
651 debug_activate(struct timer_list *timer, unsigned long expires)
653 debug_timer_activate(timer);
654 trace_timer_start(timer, expires);
657 static inline void debug_deactivate(struct timer_list *timer)
659 debug_timer_deactivate(timer);
660 trace_timer_cancel(timer);
663 static inline void debug_assert_init(struct timer_list *timer)
665 debug_timer_assert_init(timer);
668 static void do_init_timer(struct timer_list *timer, unsigned int flags,
669 const char *name, struct lock_class_key *key)
671 struct tvec_base *base = raw_cpu_read(tvec_bases);
673 timer->entry.next = NULL;
674 timer->base = (void *)((unsigned long)base | flags);
676 #ifdef CONFIG_TIMER_STATS
677 timer->start_site = NULL;
678 timer->start_pid = -1;
679 memset(timer->start_comm, 0, TASK_COMM_LEN);
681 lockdep_init_map(&timer->lockdep_map, name, key, 0);
685 * init_timer_key - initialize a timer
686 * @timer: the timer to be initialized
687 * @flags: timer flags
688 * @name: name of the timer
689 * @key: lockdep class key of the fake lock used for tracking timer
690 * sync lock dependencies
692 * init_timer_key() must be done to a timer prior calling *any* of the
693 * other timer functions.
695 void init_timer_key(struct timer_list *timer, unsigned int flags,
696 const char *name, struct lock_class_key *key)
699 do_init_timer(timer, flags, name, key);
701 EXPORT_SYMBOL(init_timer_key);
703 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
705 struct list_head *entry = &timer->entry;
707 debug_deactivate(timer);
709 __list_del(entry->prev, entry->next);
712 entry->prev = LIST_POISON2;
716 detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
718 detach_timer(timer, true);
719 if (!tbase_get_deferrable(timer->base))
720 base->active_timers--;
722 (void)catchup_timer_jiffies(base);
725 static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
728 if (!timer_pending(timer))
731 detach_timer(timer, clear_pending);
732 if (!tbase_get_deferrable(timer->base)) {
733 base->active_timers--;
734 if (timer->expires == base->next_timer)
735 base->next_timer = base->timer_jiffies;
738 (void)catchup_timer_jiffies(base);
743 * We are using hashed locking: holding per_cpu(tvec_bases).lock
744 * means that all timers which are tied to this base via timer->base are
745 * locked, and the base itself is locked too.
747 * So __run_timers/migrate_timers can safely modify all timers which could
748 * be found on ->tvX lists.
750 * When the timer's base is locked, and the timer removed from list, it is
751 * possible to set timer->base = NULL and drop the lock: the timer remains
754 static struct tvec_base *lock_timer_base(struct timer_list *timer,
755 unsigned long *flags)
756 __acquires(timer->base->lock)
758 struct tvec_base *base;
761 struct tvec_base *prelock_base = timer->base;
762 base = tbase_get_base(prelock_base);
763 if (likely(base != NULL)) {
764 spin_lock_irqsave(&base->lock, *flags);
765 if (likely(prelock_base == timer->base))
767 /* The timer has migrated to another CPU */
768 spin_unlock_irqrestore(&base->lock, *flags);
774 #ifndef CONFIG_PREEMPT_RT_FULL
775 static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
776 struct tvec_base *old,
777 struct tvec_base *new)
779 /* See the comment in lock_timer_base() */
780 timer_set_base(timer, NULL);
781 spin_unlock(&old->lock);
782 spin_lock(&new->lock);
783 timer_set_base(timer, new);
787 static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
788 struct tvec_base *old,
789 struct tvec_base *new)
792 * We cannot do the above because we might be preempted and
793 * then the preempter would see NULL and loop forever.
795 if (spin_trylock(&new->lock)) {
796 timer_set_base(timer, new);
797 spin_unlock(&old->lock);
805 __mod_timer(struct timer_list *timer, unsigned long expires,
806 bool pending_only, int pinned)
808 struct tvec_base *base, *new_base;
812 timer_stats_timer_set_start_info(timer);
813 BUG_ON(!timer->function);
815 base = lock_timer_base(timer, &flags);
817 ret = detach_if_pending(timer, base, false);
818 if (!ret && pending_only)
821 debug_activate(timer, expires);
823 cpu = get_nohz_timer_target(pinned);
824 new_base = per_cpu(tvec_bases, cpu);
826 if (base != new_base) {
828 * We are trying to schedule the timer on the local CPU.
829 * However we can't change timer's base while it is running,
830 * otherwise del_timer_sync() can't detect that the timer's
831 * handler yet has not finished. This also guarantees that
832 * the timer is serialized wrt itself.
834 if (likely(base->running_timer != timer))
835 base = switch_timer_base(timer, base, new_base);
838 timer->expires = expires;
839 internal_add_timer(base, timer);
842 spin_unlock_irqrestore(&base->lock, flags);
848 * mod_timer_pending - modify a pending timer's timeout
849 * @timer: the pending timer to be modified
850 * @expires: new timeout in jiffies
852 * mod_timer_pending() is the same for pending timers as mod_timer(),
853 * but will not re-activate and modify already deleted timers.
855 * It is useful for unserialized use of timers.
857 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
859 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
861 EXPORT_SYMBOL(mod_timer_pending);
864 * Decide where to put the timer while taking the slack into account
867 * 1) calculate the maximum (absolute) time
868 * 2) calculate the highest bit where the expires and new max are different
869 * 3) use this bit to make a mask
870 * 4) use the bitmask to round down the maximum time, so that all last
874 unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
876 unsigned long expires_limit, mask;
879 if (timer->slack >= 0) {
880 expires_limit = expires + timer->slack;
882 long delta = expires - jiffies;
887 expires_limit = expires + delta / 256;
889 mask = expires ^ expires_limit;
893 bit = find_last_bit(&mask, BITS_PER_LONG);
895 mask = (1UL << bit) - 1;
897 expires_limit = expires_limit & ~(mask);
899 return expires_limit;
903 * mod_timer - modify a timer's timeout
904 * @timer: the timer to be modified
905 * @expires: new timeout in jiffies
907 * mod_timer() is a more efficient way to update the expire field of an
908 * active timer (if the timer is inactive it will be activated)
910 * mod_timer(timer, expires) is equivalent to:
912 * del_timer(timer); timer->expires = expires; add_timer(timer);
914 * Note that if there are multiple unserialized concurrent users of the
915 * same timer, then mod_timer() is the only safe way to modify the timeout,
916 * since add_timer() cannot modify an already running timer.
918 * The function returns whether it has modified a pending timer or not.
919 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
920 * active timer returns 1.)
922 int mod_timer(struct timer_list *timer, unsigned long expires)
924 expires = apply_slack(timer, expires);
927 * This is a common optimization triggered by the
928 * networking code - if the timer is re-modified
929 * to be the same thing then just return:
931 if (timer_pending(timer) && timer->expires == expires)
934 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
936 EXPORT_SYMBOL(mod_timer);
939 * mod_timer_pinned - modify a timer's timeout
940 * @timer: the timer to be modified
941 * @expires: new timeout in jiffies
943 * mod_timer_pinned() is a way to update the expire field of an
944 * active timer (if the timer is inactive it will be activated)
945 * and to ensure that the timer is scheduled on the current CPU.
947 * Note that this does not prevent the timer from being migrated
948 * when the current CPU goes offline. If this is a problem for
949 * you, use CPU-hotplug notifiers to handle it correctly, for
950 * example, cancelling the timer when the corresponding CPU goes
953 * mod_timer_pinned(timer, expires) is equivalent to:
955 * del_timer(timer); timer->expires = expires; add_timer(timer);
957 int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
959 if (timer->expires == expires && timer_pending(timer))
962 return __mod_timer(timer, expires, false, TIMER_PINNED);
964 EXPORT_SYMBOL(mod_timer_pinned);
967 * add_timer - start a timer
968 * @timer: the timer to be added
970 * The kernel will do a ->function(->data) callback from the
971 * timer interrupt at the ->expires point in the future. The
972 * current time is 'jiffies'.
974 * The timer's ->expires, ->function (and if the handler uses it, ->data)
975 * fields must be set prior calling this function.
977 * Timers with an ->expires field in the past will be executed in the next
980 void add_timer(struct timer_list *timer)
982 BUG_ON(timer_pending(timer));
983 mod_timer(timer, timer->expires);
985 EXPORT_SYMBOL(add_timer);
988 * add_timer_on - start a timer on a particular CPU
989 * @timer: the timer to be added
990 * @cpu: the CPU to start it on
992 * This is not very scalable on SMP. Double adds are not possible.
994 void add_timer_on(struct timer_list *timer, int cpu)
996 struct tvec_base *base = per_cpu(tvec_bases, cpu);
999 timer_stats_timer_set_start_info(timer);
1000 BUG_ON(timer_pending(timer) || !timer->function);
1001 spin_lock_irqsave(&base->lock, flags);
1002 timer_set_base(timer, base);
1003 debug_activate(timer, timer->expires);
1004 internal_add_timer(base, timer);
1005 spin_unlock_irqrestore(&base->lock, flags);
1007 EXPORT_SYMBOL_GPL(add_timer_on);
1009 #ifdef CONFIG_PREEMPT_RT_FULL
1011 * Wait for a running timer
1013 static void wait_for_running_timer(struct timer_list *timer)
1015 struct tvec_base *base = timer->base;
1017 if (base->running_timer == timer)
1018 wait_event(base->wait_for_running_timer,
1019 base->running_timer != timer);
1022 # define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer)
1024 static inline void wait_for_running_timer(struct timer_list *timer)
1029 # define wakeup_timer_waiters(b) do { } while (0)
1033 * del_timer - deactive a timer.
1034 * @timer: the timer to be deactivated
1036 * del_timer() deactivates a timer - this works on both active and inactive
1039 * The function returns whether it has deactivated a pending timer or not.
1040 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1041 * active timer returns 1.)
1043 int del_timer(struct timer_list *timer)
1045 struct tvec_base *base;
1046 unsigned long flags;
1049 debug_assert_init(timer);
1051 timer_stats_timer_clear_start_info(timer);
1052 if (timer_pending(timer)) {
1053 base = lock_timer_base(timer, &flags);
1054 ret = detach_if_pending(timer, base, true);
1055 spin_unlock_irqrestore(&base->lock, flags);
1060 EXPORT_SYMBOL(del_timer);
1063 * try_to_del_timer_sync - Try to deactivate a timer
1064 * @timer: timer do del
1066 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1067 * exit the timer is not queued and the handler is not running on any CPU.
1069 int try_to_del_timer_sync(struct timer_list *timer)
1071 struct tvec_base *base;
1072 unsigned long flags;
1075 debug_assert_init(timer);
1077 base = lock_timer_base(timer, &flags);
1079 if (base->running_timer != timer) {
1080 timer_stats_timer_clear_start_info(timer);
1081 ret = detach_if_pending(timer, base, true);
1083 spin_unlock_irqrestore(&base->lock, flags);
1087 EXPORT_SYMBOL(try_to_del_timer_sync);
1089 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
1090 static DEFINE_PER_CPU(struct tvec_base, __tvec_bases);
1093 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1094 * @timer: the timer to be deactivated
1096 * This function only differs from del_timer() on SMP: besides deactivating
1097 * the timer it also makes sure the handler has finished executing on other
1100 * Synchronization rules: Callers must prevent restarting of the timer,
1101 * otherwise this function is meaningless. It must not be called from
1102 * interrupt contexts unless the timer is an irqsafe one. The caller must
1103 * not hold locks which would prevent completion of the timer's
1104 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1105 * timer is not queued and the handler is not running on any CPU.
1107 * Note: For !irqsafe timers, you must not hold locks that are held in
1108 * interrupt context while calling this function. Even if the lock has
1109 * nothing to do with the timer in question. Here's why:
1115 * base->running_timer = mytimer;
1116 * spin_lock_irq(somelock);
1118 * spin_lock(somelock);
1119 * del_timer_sync(mytimer);
1120 * while (base->running_timer == mytimer);
1122 * Now del_timer_sync() will never return and never release somelock.
1123 * The interrupt on the other CPU is waiting to grab somelock but
1124 * it has interrupted the softirq that CPU0 is waiting to finish.
1126 * The function returns whether it has deactivated a pending timer or not.
1128 int del_timer_sync(struct timer_list *timer)
1130 #ifdef CONFIG_LOCKDEP
1131 unsigned long flags;
1134 * If lockdep gives a backtrace here, please reference
1135 * the synchronization rules above.
1137 local_irq_save(flags);
1138 lock_map_acquire(&timer->lockdep_map);
1139 lock_map_release(&timer->lockdep_map);
1140 local_irq_restore(flags);
1143 * don't use it in hardirq context, because it
1144 * could lead to deadlock.
1146 WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
1148 int ret = try_to_del_timer_sync(timer);
1151 wait_for_running_timer(timer);
1154 EXPORT_SYMBOL(del_timer_sync);
1157 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1159 /* cascade all the timers from tv up one level */
1160 struct timer_list *timer, *tmp;
1161 struct list_head tv_list;
1163 list_replace_init(tv->vec + index, &tv_list);
1166 * We are removing _all_ timers from the list, so we
1167 * don't have to detach them individually.
1169 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1170 BUG_ON(tbase_get_base(timer->base) != base);
1171 /* No accounting, while moving them */
1172 __internal_add_timer(base, timer);
1178 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1181 int count = preempt_count();
1183 #ifdef CONFIG_LOCKDEP
1185 * It is permissible to free the timer from inside the
1186 * function that is called from it, this we need to take into
1187 * account for lockdep too. To avoid bogus "held lock freed"
1188 * warnings as well as problems when looking into
1189 * timer->lockdep_map, make a copy and use that here.
1191 struct lockdep_map lockdep_map;
1193 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1196 * Couple the lock chain with the lock chain at
1197 * del_timer_sync() by acquiring the lock_map around the fn()
1198 * call here and in del_timer_sync().
1200 lock_map_acquire(&lockdep_map);
1202 trace_timer_expire_entry(timer);
1204 trace_timer_expire_exit(timer);
1206 lock_map_release(&lockdep_map);
1208 if (count != preempt_count()) {
1209 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1210 fn, count, preempt_count());
1212 * Restore the preempt count. That gives us a decent
1213 * chance to survive and extract information. If the
1214 * callback kept a lock held, bad luck, but not worse
1215 * than the BUG() we had.
1217 preempt_count_set(count);
1221 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1224 * __run_timers - run all expired timers (if any) on this CPU.
1225 * @base: the timer vector to be processed.
1227 * This function cascades all vectors and executes all expired timer
1230 static inline void __run_timers(struct tvec_base *base)
1232 struct timer_list *timer;
1234 spin_lock_irq(&base->lock);
1235 if (catchup_timer_jiffies(base)) {
1236 spin_unlock_irq(&base->lock);
1239 while (time_after_eq(jiffies, base->timer_jiffies)) {
1240 struct list_head work_list;
1241 struct list_head *head = &work_list;
1242 int index = base->timer_jiffies & TVR_MASK;
1248 (!cascade(base, &base->tv2, INDEX(0))) &&
1249 (!cascade(base, &base->tv3, INDEX(1))) &&
1250 !cascade(base, &base->tv4, INDEX(2)))
1251 cascade(base, &base->tv5, INDEX(3));
1252 ++base->timer_jiffies;
1253 list_replace_init(base->tv1.vec + index, head);
1254 while (!list_empty(head)) {
1255 void (*fn)(unsigned long);
1259 timer = list_first_entry(head, struct timer_list,entry);
1260 fn = timer->function;
1262 irqsafe = tbase_get_irqsafe(timer->base);
1264 timer_stats_account_timer(timer);
1266 base->running_timer = timer;
1267 detach_expired_timer(timer, base);
1270 spin_unlock(&base->lock);
1271 call_timer_fn(timer, fn, data);
1272 base->running_timer = NULL;
1273 spin_lock(&base->lock);
1275 spin_unlock_irq(&base->lock);
1276 call_timer_fn(timer, fn, data);
1277 base->running_timer = NULL;
1278 spin_lock_irq(&base->lock);
1282 wakeup_timer_waiters(base);
1283 spin_unlock_irq(&base->lock);
1286 #ifdef CONFIG_NO_HZ_COMMON
1288 * Find out when the next timer event is due to happen. This
1289 * is used on S/390 to stop all activity when a CPU is idle.
1290 * This function needs to be called with interrupts disabled.
1292 static unsigned long __next_timer_interrupt(struct tvec_base *base)
1294 unsigned long timer_jiffies = base->timer_jiffies;
1295 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1296 int index, slot, array, found = 0;
1297 struct timer_list *nte;
1298 struct tvec *varray[4];
1300 /* Look for timer events in tv1. */
1301 index = slot = timer_jiffies & TVR_MASK;
1303 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
1304 if (tbase_get_deferrable(nte->base))
1308 expires = nte->expires;
1309 /* Look at the cascade bucket(s)? */
1310 if (!index || slot < index)
1314 slot = (slot + 1) & TVR_MASK;
1315 } while (slot != index);
1318 /* Calculate the next cascade event */
1320 timer_jiffies += TVR_SIZE - index;
1321 timer_jiffies >>= TVR_BITS;
1323 /* Check tv2-tv5. */
1324 varray[0] = &base->tv2;
1325 varray[1] = &base->tv3;
1326 varray[2] = &base->tv4;
1327 varray[3] = &base->tv5;
1329 for (array = 0; array < 4; array++) {
1330 struct tvec *varp = varray[array];
1332 index = slot = timer_jiffies & TVN_MASK;
1334 list_for_each_entry(nte, varp->vec + slot, entry) {
1335 if (tbase_get_deferrable(nte->base))
1339 if (time_before(nte->expires, expires))
1340 expires = nte->expires;
1343 * Do we still search for the first timer or are
1344 * we looking up the cascade buckets ?
1347 /* Look at the cascade bucket(s)? */
1348 if (!index || slot < index)
1352 slot = (slot + 1) & TVN_MASK;
1353 } while (slot != index);
1356 timer_jiffies += TVN_SIZE - index;
1357 timer_jiffies >>= TVN_BITS;
1363 * Check, if the next hrtimer event is before the next timer wheel
1366 static unsigned long cmp_next_hrtimer_event(unsigned long now,
1367 unsigned long expires)
1369 ktime_t hr_delta = hrtimer_get_next_event();
1370 struct timespec tsdelta;
1371 unsigned long delta;
1373 if (hr_delta.tv64 == KTIME_MAX)
1377 * Expired timer available, let it expire in the next tick
1379 if (hr_delta.tv64 <= 0)
1382 tsdelta = ktime_to_timespec(hr_delta);
1383 delta = timespec_to_jiffies(&tsdelta);
1386 * Limit the delta to the max value, which is checked in
1387 * tick_nohz_stop_sched_tick():
1389 if (delta > NEXT_TIMER_MAX_DELTA)
1390 delta = NEXT_TIMER_MAX_DELTA;
1393 * Take rounding errors in to account and make sure, that it
1394 * expires in the next tick. Otherwise we go into an endless
1395 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1401 if (time_before(now, expires))
1407 * get_next_timer_interrupt - return the jiffy of the next pending timer
1408 * @now: current time (in jiffies)
1410 unsigned long get_next_timer_interrupt(unsigned long now)
1412 struct tvec_base *base = __this_cpu_read(tvec_bases);
1413 unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
1416 * Pretend that there is no timer pending if the cpu is offline.
1417 * Possible pending timers will be migrated later to an active cpu.
1419 if (cpu_is_offline(smp_processor_id()))
1422 #ifdef CONFIG_PREEMPT_RT_FULL
1424 * On PREEMPT_RT we cannot sleep here. As a result we can't take
1425 * the base lock to check when the next timer is pending and so
1426 * we assume the next jiffy.
1430 spin_lock(&base->lock);
1431 if (base->active_timers) {
1432 if (time_before_eq(base->next_timer, base->timer_jiffies))
1433 base->next_timer = __next_timer_interrupt(base);
1434 expires = base->next_timer;
1436 spin_unlock(&base->lock);
1438 if (time_before_eq(expires, now))
1441 return cmp_next_hrtimer_event(now, expires);
1446 * Called from the timer interrupt handler to charge one tick to the current
1447 * process. user_tick is 1 if the tick is user time, 0 for system.
1449 void update_process_times(int user_tick)
1451 struct task_struct *p = current;
1453 /* Note: this timer irq context must be accounted for as well. */
1454 account_process_tick(p, user_tick);
1457 rcu_check_callbacks(user_tick);
1458 #if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
1462 run_posix_cpu_timers(p);
1466 * This function runs timers and the timer-tq in bottom half context.
1468 static void run_timer_softirq(struct softirq_action *h)
1470 struct tvec_base *base = __this_cpu_read(tvec_bases);
1472 hrtimer_run_pending();
1474 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
1478 if (time_after_eq(jiffies, base->timer_jiffies))
1483 * Called by the local, per-CPU timer interrupt on SMP.
1485 void run_local_timers(void)
1487 hrtimer_run_queues();
1488 raise_softirq(TIMER_SOFTIRQ);
1491 #ifdef __ARCH_WANT_SYS_ALARM
1494 * For backwards compatibility? This can be done in libc so Alpha
1495 * and all newer ports shouldn't need it.
1497 SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1499 return alarm_setitimer(seconds);
1504 static void process_timeout(unsigned long __data)
1506 wake_up_process((struct task_struct *)__data);
1510 * schedule_timeout - sleep until timeout
1511 * @timeout: timeout value in jiffies
1513 * Make the current task sleep until @timeout jiffies have
1514 * elapsed. The routine will return immediately unless
1515 * the current task state has been set (see set_current_state()).
1517 * You can set the task state as follows -
1519 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1520 * pass before the routine returns. The routine will return 0
1522 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1523 * delivered to the current task. In this case the remaining time
1524 * in jiffies will be returned, or 0 if the timer expired in time
1526 * The current task state is guaranteed to be TASK_RUNNING when this
1529 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1530 * the CPU away without a bound on the timeout. In this case the return
1531 * value will be %MAX_SCHEDULE_TIMEOUT.
1533 * In all cases the return value is guaranteed to be non-negative.
1535 signed long __sched schedule_timeout(signed long timeout)
1537 struct timer_list timer;
1538 unsigned long expire;
1542 case MAX_SCHEDULE_TIMEOUT:
1544 * These two special cases are useful to be comfortable
1545 * in the caller. Nothing more. We could take
1546 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1547 * but I' d like to return a valid offset (>=0) to allow
1548 * the caller to do everything it want with the retval.
1554 * Another bit of PARANOID. Note that the retval will be
1555 * 0 since no piece of kernel is supposed to do a check
1556 * for a negative retval of schedule_timeout() (since it
1557 * should never happens anyway). You just have the printk()
1558 * that will tell you if something is gone wrong and where.
1561 printk(KERN_ERR "schedule_timeout: wrong timeout "
1562 "value %lx\n", timeout);
1564 current->state = TASK_RUNNING;
1569 expire = timeout + jiffies;
1571 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1572 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1574 del_singleshot_timer_sync(&timer);
1576 /* Remove the timer from the object tracker */
1577 destroy_timer_on_stack(&timer);
1579 timeout = expire - jiffies;
1582 return timeout < 0 ? 0 : timeout;
1584 EXPORT_SYMBOL(schedule_timeout);
1587 * We can use __set_current_state() here because schedule_timeout() calls
1588 * schedule() unconditionally.
1590 signed long __sched schedule_timeout_interruptible(signed long timeout)
1592 __set_current_state(TASK_INTERRUPTIBLE);
1593 return schedule_timeout(timeout);
1595 EXPORT_SYMBOL(schedule_timeout_interruptible);
1597 signed long __sched schedule_timeout_killable(signed long timeout)
1599 __set_current_state(TASK_KILLABLE);
1600 return schedule_timeout(timeout);
1602 EXPORT_SYMBOL(schedule_timeout_killable);
1604 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1606 __set_current_state(TASK_UNINTERRUPTIBLE);
1607 return schedule_timeout(timeout);
1609 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1611 #ifdef CONFIG_HOTPLUG_CPU
1612 static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1614 struct timer_list *timer;
1616 while (!list_empty(head)) {
1617 timer = list_first_entry(head, struct timer_list, entry);
1618 /* We ignore the accounting on the dying cpu */
1619 detach_timer(timer, false);
1620 timer_set_base(timer, new_base);
1621 internal_add_timer(new_base, timer);
1625 static void migrate_timers(int cpu)
1627 struct tvec_base *old_base;
1628 struct tvec_base *new_base;
1631 BUG_ON(cpu_online(cpu));
1632 old_base = per_cpu(tvec_bases, cpu);
1633 new_base = get_local_var(tvec_bases);
1635 * The caller is globally serialized and nobody else
1636 * takes two locks at once, deadlock is not possible.
1638 spin_lock_irq(&new_base->lock);
1639 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1641 BUG_ON(old_base->running_timer);
1643 for (i = 0; i < TVR_SIZE; i++)
1644 migrate_timer_list(new_base, old_base->tv1.vec + i);
1645 for (i = 0; i < TVN_SIZE; i++) {
1646 migrate_timer_list(new_base, old_base->tv2.vec + i);
1647 migrate_timer_list(new_base, old_base->tv3.vec + i);
1648 migrate_timer_list(new_base, old_base->tv4.vec + i);
1649 migrate_timer_list(new_base, old_base->tv5.vec + i);
1652 old_base->active_timers = 0;
1653 old_base->all_timers = 0;
1655 spin_unlock(&old_base->lock);
1656 spin_unlock_irq(&new_base->lock);
1657 put_local_var(tvec_bases);
1660 static int timer_cpu_notify(struct notifier_block *self,
1661 unsigned long action, void *hcpu)
1665 case CPU_DEAD_FROZEN:
1666 migrate_timers((long)hcpu);
1675 static inline void timer_register_cpu_notifier(void)
1677 cpu_notifier(timer_cpu_notify, 0);
1680 static inline void timer_register_cpu_notifier(void) { }
1681 #endif /* CONFIG_HOTPLUG_CPU */
1683 static void __init init_timer_cpu(struct tvec_base *base, int cpu)
1687 BUG_ON(base != tbase_get_base(base));
1690 per_cpu(tvec_bases, cpu) = base;
1691 spin_lock_init(&base->lock);
1692 #ifdef CONFIG_PREEMPT_RT_FULL
1693 init_waitqueue_head(&base->wait_for_running_timer);
1696 for (j = 0; j < TVN_SIZE; j++) {
1697 INIT_LIST_HEAD(base->tv5.vec + j);
1698 INIT_LIST_HEAD(base->tv4.vec + j);
1699 INIT_LIST_HEAD(base->tv3.vec + j);
1700 INIT_LIST_HEAD(base->tv2.vec + j);
1702 for (j = 0; j < TVR_SIZE; j++)
1703 INIT_LIST_HEAD(base->tv1.vec + j);
1705 base->timer_jiffies = jiffies;
1706 base->next_timer = base->timer_jiffies;
1709 static void __init init_timer_cpus(void)
1711 struct tvec_base *base;
1712 int local_cpu = smp_processor_id();
1715 for_each_possible_cpu(cpu) {
1716 if (cpu == local_cpu)
1717 base = &boot_tvec_bases;
1720 base = per_cpu_ptr(&__tvec_bases, cpu);
1723 init_timer_cpu(base, cpu);
1727 void __init init_timers(void)
1729 /* ensure there are enough low bits for flags in timer->base pointer */
1730 BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
1734 timer_register_cpu_notifier();
1735 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1739 * msleep - sleep safely even with waitqueue interruptions
1740 * @msecs: Time in milliseconds to sleep for
1742 void msleep(unsigned int msecs)
1744 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1747 timeout = schedule_timeout_uninterruptible(timeout);
1750 EXPORT_SYMBOL(msleep);
1753 * msleep_interruptible - sleep waiting for signals
1754 * @msecs: Time in milliseconds to sleep for
1756 unsigned long msleep_interruptible(unsigned int msecs)
1758 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1760 while (timeout && !signal_pending(current))
1761 timeout = schedule_timeout_interruptible(timeout);
1762 return jiffies_to_msecs(timeout);
1765 EXPORT_SYMBOL(msleep_interruptible);
1767 static int __sched do_usleep_range(unsigned long min, unsigned long max)
1770 unsigned long delta;
1772 kmin = ktime_set(0, min * NSEC_PER_USEC);
1773 delta = (max - min) * NSEC_PER_USEC;
1774 return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1778 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1779 * @min: Minimum time in usecs to sleep
1780 * @max: Maximum time in usecs to sleep
1782 void usleep_range(unsigned long min, unsigned long max)
1784 __set_current_state(TASK_UNINTERRUPTIBLE);
1785 do_usleep_range(min, max);
1787 EXPORT_SYMBOL(usleep_range);