2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/sched/rt.h>
7 #include <linux/posix-timers.h>
8 #include <linux/errno.h>
9 #include <linux/math64.h>
10 #include <asm/uaccess.h>
11 #include <linux/kernel_stat.h>
12 #include <trace/events/timer.h>
13 #include <linux/random.h>
14 #include <linux/tick.h>
15 #include <linux/workqueue.h>
18 * Called after updating RLIMIT_CPU to run cpu timer and update
19 * tsk->signal->cputime_expires expiration cache if necessary. Needs
20 * siglock protection since other code may update expiration cache as
23 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
25 cputime_t cputime = secs_to_cputime(rlim_new);
27 spin_lock_irq(&task->sighand->siglock);
28 set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
29 spin_unlock_irq(&task->sighand->siglock);
32 static int check_clock(const clockid_t which_clock)
35 struct task_struct *p;
36 const pid_t pid = CPUCLOCK_PID(which_clock);
38 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
45 p = find_task_by_vpid(pid);
46 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
47 same_thread_group(p, current) : has_group_leader_pid(p))) {
55 static inline unsigned long long
56 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
58 unsigned long long ret;
60 ret = 0; /* high half always zero when .cpu used */
61 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
62 ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
64 ret = cputime_to_expires(timespec_to_cputime(tp));
69 static void sample_to_timespec(const clockid_t which_clock,
70 unsigned long long expires,
73 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
74 *tp = ns_to_timespec(expires);
76 cputime_to_timespec((__force cputime_t)expires, tp);
80 * Update expiry time from increment, and increase overrun count,
81 * given the current clock sample.
83 static void bump_cpu_timer(struct k_itimer *timer,
84 unsigned long long now)
87 unsigned long long delta, incr;
89 if (timer->it.cpu.incr == 0)
92 if (now < timer->it.cpu.expires)
95 incr = timer->it.cpu.incr;
96 delta = now + incr - timer->it.cpu.expires;
98 /* Don't use (incr*2 < delta), incr*2 might overflow. */
99 for (i = 0; incr < delta - incr; i++)
102 for (; i >= 0; incr >>= 1, i--) {
106 timer->it.cpu.expires += incr;
107 timer->it_overrun += 1 << i;
113 * task_cputime_zero - Check a task_cputime struct for all zero fields.
115 * @cputime: The struct to compare.
117 * Checks @cputime to see if all fields are zero. Returns true if all fields
118 * are zero, false if any field is nonzero.
120 static inline int task_cputime_zero(const struct task_cputime *cputime)
122 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
127 static inline unsigned long long prof_ticks(struct task_struct *p)
129 cputime_t utime, stime;
131 task_cputime(p, &utime, &stime);
133 return cputime_to_expires(utime + stime);
135 static inline unsigned long long virt_ticks(struct task_struct *p)
139 task_cputime(p, &utime, NULL);
141 return cputime_to_expires(utime);
145 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
147 int error = check_clock(which_clock);
150 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
151 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
153 * If sched_clock is using a cycle counter, we
154 * don't have any idea of its true resolution
155 * exported, but it is much more than 1s/HZ.
164 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
167 * You can never reset a CPU clock, but we check for other errors
168 * in the call before failing with EPERM.
170 int error = check_clock(which_clock);
179 * Sample a per-thread clock for the given task.
181 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
182 unsigned long long *sample)
184 switch (CPUCLOCK_WHICH(which_clock)) {
188 *sample = prof_ticks(p);
191 *sample = virt_ticks(p);
194 *sample = task_sched_runtime(p);
201 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
202 * to avoid race conditions with concurrent updates to cputime.
204 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
208 curr_cputime = atomic64_read(cputime);
209 if (sum_cputime > curr_cputime) {
210 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
215 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
217 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
218 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
219 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
222 /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
223 static inline void sample_cputime_atomic(struct task_cputime *times,
224 struct task_cputime_atomic *atomic_times)
226 times->utime = atomic64_read(&atomic_times->utime);
227 times->stime = atomic64_read(&atomic_times->stime);
228 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
231 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
233 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
234 struct task_cputime sum;
236 /* Check if cputimer isn't running. This is accessed without locking. */
237 if (!READ_ONCE(cputimer->running)) {
239 * The POSIX timer interface allows for absolute time expiry
240 * values through the TIMER_ABSTIME flag, therefore we have
241 * to synchronize the timer to the clock every time we start it.
243 thread_group_cputime(tsk, &sum);
244 update_gt_cputime(&cputimer->cputime_atomic, &sum);
247 * We're setting cputimer->running without a lock. Ensure
248 * this only gets written to in one operation. We set
249 * running after update_gt_cputime() as a small optimization,
250 * but barriers are not required because update_gt_cputime()
251 * can handle concurrent updates.
253 WRITE_ONCE(cputimer->running, true);
255 sample_cputime_atomic(times, &cputimer->cputime_atomic);
259 * Sample a process (thread group) clock for the given group_leader task.
260 * Must be called with task sighand lock held for safe while_each_thread()
263 static int cpu_clock_sample_group(const clockid_t which_clock,
264 struct task_struct *p,
265 unsigned long long *sample)
267 struct task_cputime cputime;
269 switch (CPUCLOCK_WHICH(which_clock)) {
273 thread_group_cputime(p, &cputime);
274 *sample = cputime_to_expires(cputime.utime + cputime.stime);
277 thread_group_cputime(p, &cputime);
278 *sample = cputime_to_expires(cputime.utime);
281 thread_group_cputime(p, &cputime);
282 *sample = cputime.sum_exec_runtime;
288 static int posix_cpu_clock_get_task(struct task_struct *tsk,
289 const clockid_t which_clock,
293 unsigned long long rtn;
295 if (CPUCLOCK_PERTHREAD(which_clock)) {
296 if (same_thread_group(tsk, current))
297 err = cpu_clock_sample(which_clock, tsk, &rtn);
299 if (tsk == current || thread_group_leader(tsk))
300 err = cpu_clock_sample_group(which_clock, tsk, &rtn);
304 sample_to_timespec(which_clock, rtn, tp);
310 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
312 const pid_t pid = CPUCLOCK_PID(which_clock);
317 * Special case constant value for our own clocks.
318 * We don't have to do any lookup to find ourselves.
320 err = posix_cpu_clock_get_task(current, which_clock, tp);
323 * Find the given PID, and validate that the caller
324 * should be able to see it.
326 struct task_struct *p;
328 p = find_task_by_vpid(pid);
330 err = posix_cpu_clock_get_task(p, which_clock, tp);
339 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
340 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
341 * new timer already all-zeros initialized.
343 static int posix_cpu_timer_create(struct k_itimer *new_timer)
346 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
347 struct task_struct *p;
349 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
352 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
355 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
359 p = find_task_by_vpid(pid);
360 if (p && !same_thread_group(p, current))
365 p = current->group_leader;
367 p = find_task_by_vpid(pid);
368 if (p && !has_group_leader_pid(p))
372 new_timer->it.cpu.task = p;
384 * Clean up a CPU-clock timer that is about to be destroyed.
385 * This is called from timer deletion with the timer already locked.
386 * If we return TIMER_RETRY, it's necessary to release the timer's lock
387 * and try again. (This happens when the timer is in the middle of firing.)
389 static int posix_cpu_timer_del(struct k_itimer *timer)
393 struct sighand_struct *sighand;
394 struct task_struct *p = timer->it.cpu.task;
396 WARN_ON_ONCE(p == NULL);
399 * Protect against sighand release/switch in exit/exec and process/
400 * thread timer list entry concurrent read/writes.
402 sighand = lock_task_sighand(p, &flags);
403 if (unlikely(sighand == NULL)) {
405 * We raced with the reaping of the task.
406 * The deletion should have cleared us off the list.
408 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
410 if (timer->it.cpu.firing)
413 list_del(&timer->it.cpu.entry);
415 unlock_task_sighand(p, &flags);
424 static void cleanup_timers_list(struct list_head *head)
426 struct cpu_timer_list *timer, *next;
428 list_for_each_entry_safe(timer, next, head, entry)
429 list_del_init(&timer->entry);
433 * Clean out CPU timers still ticking when a thread exited. The task
434 * pointer is cleared, and the expiry time is replaced with the residual
435 * time for later timer_gettime calls to return.
436 * This must be called with the siglock held.
438 static void cleanup_timers(struct list_head *head)
440 cleanup_timers_list(head);
441 cleanup_timers_list(++head);
442 cleanup_timers_list(++head);
446 * These are both called with the siglock held, when the current thread
447 * is being reaped. When the final (leader) thread in the group is reaped,
448 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
450 void posix_cpu_timers_exit(struct task_struct *tsk)
452 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
453 sizeof(unsigned long long));
454 cleanup_timers(tsk->cpu_timers);
457 void posix_cpu_timers_exit_group(struct task_struct *tsk)
459 cleanup_timers(tsk->signal->cpu_timers);
462 static inline int expires_gt(cputime_t expires, cputime_t new_exp)
464 return expires == 0 || expires > new_exp;
468 * Insert the timer on the appropriate list before any timers that
469 * expire later. This must be called with the sighand lock held.
471 static void arm_timer(struct k_itimer *timer)
473 struct task_struct *p = timer->it.cpu.task;
474 struct list_head *head, *listpos;
475 struct task_cputime *cputime_expires;
476 struct cpu_timer_list *const nt = &timer->it.cpu;
477 struct cpu_timer_list *next;
479 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
480 head = p->cpu_timers;
481 cputime_expires = &p->cputime_expires;
483 head = p->signal->cpu_timers;
484 cputime_expires = &p->signal->cputime_expires;
486 head += CPUCLOCK_WHICH(timer->it_clock);
489 list_for_each_entry(next, head, entry) {
490 if (nt->expires < next->expires)
492 listpos = &next->entry;
494 list_add(&nt->entry, listpos);
496 if (listpos == head) {
497 unsigned long long exp = nt->expires;
500 * We are the new earliest-expiring POSIX 1.b timer, hence
501 * need to update expiration cache. Take into account that
502 * for process timers we share expiration cache with itimers
503 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
506 switch (CPUCLOCK_WHICH(timer->it_clock)) {
508 if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp)))
509 cputime_expires->prof_exp = expires_to_cputime(exp);
512 if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp)))
513 cputime_expires->virt_exp = expires_to_cputime(exp);
516 if (cputime_expires->sched_exp == 0 ||
517 cputime_expires->sched_exp > exp)
518 cputime_expires->sched_exp = exp;
525 * The timer is locked, fire it and arrange for its reload.
527 static void cpu_timer_fire(struct k_itimer *timer)
529 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
531 * User don't want any signal.
533 timer->it.cpu.expires = 0;
534 } else if (unlikely(timer->sigq == NULL)) {
536 * This a special case for clock_nanosleep,
537 * not a normal timer from sys_timer_create.
539 wake_up_process(timer->it_process);
540 timer->it.cpu.expires = 0;
541 } else if (timer->it.cpu.incr == 0) {
543 * One-shot timer. Clear it as soon as it's fired.
545 posix_timer_event(timer, 0);
546 timer->it.cpu.expires = 0;
547 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
549 * The signal did not get queued because the signal
550 * was ignored, so we won't get any callback to
551 * reload the timer. But we need to keep it
552 * ticking in case the signal is deliverable next time.
554 posix_cpu_timer_schedule(timer);
559 * Sample a process (thread group) timer for the given group_leader task.
560 * Must be called with task sighand lock held for safe while_each_thread()
563 static int cpu_timer_sample_group(const clockid_t which_clock,
564 struct task_struct *p,
565 unsigned long long *sample)
567 struct task_cputime cputime;
569 thread_group_cputimer(p, &cputime);
570 switch (CPUCLOCK_WHICH(which_clock)) {
574 *sample = cputime_to_expires(cputime.utime + cputime.stime);
577 *sample = cputime_to_expires(cputime.utime);
580 *sample = cputime.sum_exec_runtime;
586 #ifdef CONFIG_NO_HZ_FULL
587 static void nohz_kick_work_fn(struct work_struct *work)
589 tick_nohz_full_kick_all();
592 static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
595 * We need the IPIs to be sent from sane process context.
596 * The posix cpu timers are always set with irqs disabled.
598 static void posix_cpu_timer_kick_nohz(void)
600 if (context_tracking_is_enabled())
601 schedule_work(&nohz_kick_work);
604 bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
606 if (!task_cputime_zero(&tsk->cputime_expires))
609 /* Check if cputimer is running. This is accessed without locking. */
610 if (READ_ONCE(tsk->signal->cputimer.running))
616 static inline void posix_cpu_timer_kick_nohz(void) { }
620 * Guts of sys_timer_settime for CPU timers.
621 * This is called with the timer locked and interrupts disabled.
622 * If we return TIMER_RETRY, it's necessary to release the timer's lock
623 * and try again. (This happens when the timer is in the middle of firing.)
625 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
626 struct itimerspec *new, struct itimerspec *old)
629 struct sighand_struct *sighand;
630 struct task_struct *p = timer->it.cpu.task;
631 unsigned long long old_expires, new_expires, old_incr, val;
634 WARN_ON_ONCE(p == NULL);
636 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
639 * Protect against sighand release/switch in exit/exec and p->cpu_timers
640 * and p->signal->cpu_timers read/write in arm_timer()
642 sighand = lock_task_sighand(p, &flags);
644 * If p has just been reaped, we can no
645 * longer get any information about it at all.
647 if (unlikely(sighand == NULL)) {
652 * Disarm any old timer after extracting its expiry time.
654 WARN_ON_ONCE_NONRT(!irqs_disabled());
657 old_incr = timer->it.cpu.incr;
658 old_expires = timer->it.cpu.expires;
659 if (unlikely(timer->it.cpu.firing)) {
660 timer->it.cpu.firing = -1;
663 list_del_init(&timer->it.cpu.entry);
666 * We need to sample the current value to convert the new
667 * value from to relative and absolute, and to convert the
668 * old value from absolute to relative. To set a process
669 * timer, we need a sample to balance the thread expiry
670 * times (in arm_timer). With an absolute time, we must
671 * check if it's already passed. In short, we need a sample.
673 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
674 cpu_clock_sample(timer->it_clock, p, &val);
676 cpu_timer_sample_group(timer->it_clock, p, &val);
680 if (old_expires == 0) {
681 old->it_value.tv_sec = 0;
682 old->it_value.tv_nsec = 0;
685 * Update the timer in case it has
686 * overrun already. If it has,
687 * we'll report it as having overrun
688 * and with the next reloaded timer
689 * already ticking, though we are
690 * swallowing that pending
691 * notification here to install the
694 bump_cpu_timer(timer, val);
695 if (val < timer->it.cpu.expires) {
696 old_expires = timer->it.cpu.expires - val;
697 sample_to_timespec(timer->it_clock,
701 old->it_value.tv_nsec = 1;
702 old->it_value.tv_sec = 0;
709 * We are colliding with the timer actually firing.
710 * Punt after filling in the timer's old value, and
711 * disable this firing since we are already reporting
712 * it as an overrun (thanks to bump_cpu_timer above).
714 unlock_task_sighand(p, &flags);
718 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
723 * Install the new expiry time (or zero).
724 * For a timer with no notification action, we don't actually
725 * arm the timer (we'll just fake it for timer_gettime).
727 timer->it.cpu.expires = new_expires;
728 if (new_expires != 0 && val < new_expires) {
732 unlock_task_sighand(p, &flags);
734 * Install the new reload setting, and
735 * set up the signal and overrun bookkeeping.
737 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
741 * This acts as a modification timestamp for the timer,
742 * so any automatic reload attempt will punt on seeing
743 * that we have reset the timer manually.
745 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
747 timer->it_overrun_last = 0;
748 timer->it_overrun = -1;
750 if (new_expires != 0 && !(val < new_expires)) {
752 * The designated time already passed, so we notify
753 * immediately, even if the thread never runs to
754 * accumulate more time on this clock.
756 cpu_timer_fire(timer);
762 sample_to_timespec(timer->it_clock,
763 old_incr, &old->it_interval);
766 posix_cpu_timer_kick_nohz();
770 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
772 unsigned long long now;
773 struct task_struct *p = timer->it.cpu.task;
775 WARN_ON_ONCE(p == NULL);
778 * Easy part: convert the reload time.
780 sample_to_timespec(timer->it_clock,
781 timer->it.cpu.incr, &itp->it_interval);
783 if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */
784 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
789 * Sample the clock to take the difference with the expiry time.
791 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
792 cpu_clock_sample(timer->it_clock, p, &now);
794 struct sighand_struct *sighand;
798 * Protect against sighand release/switch in exit/exec and
799 * also make timer sampling safe if it ends up calling
800 * thread_group_cputime().
802 sighand = lock_task_sighand(p, &flags);
803 if (unlikely(sighand == NULL)) {
805 * The process has been reaped.
806 * We can't even collect a sample any more.
807 * Call the timer disarmed, nothing else to do.
809 timer->it.cpu.expires = 0;
810 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
814 cpu_timer_sample_group(timer->it_clock, p, &now);
815 unlock_task_sighand(p, &flags);
819 if (now < timer->it.cpu.expires) {
820 sample_to_timespec(timer->it_clock,
821 timer->it.cpu.expires - now,
825 * The timer should have expired already, but the firing
826 * hasn't taken place yet. Say it's just about to expire.
828 itp->it_value.tv_nsec = 1;
829 itp->it_value.tv_sec = 0;
833 static unsigned long long
834 check_timers_list(struct list_head *timers,
835 struct list_head *firing,
836 unsigned long long curr)
840 while (!list_empty(timers)) {
841 struct cpu_timer_list *t;
843 t = list_first_entry(timers, struct cpu_timer_list, entry);
845 if (!--maxfire || curr < t->expires)
849 list_move_tail(&t->entry, firing);
856 * Check for any per-thread CPU timers that have fired and move them off
857 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
858 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
860 static void check_thread_timers(struct task_struct *tsk,
861 struct list_head *firing)
863 struct list_head *timers = tsk->cpu_timers;
864 struct signal_struct *const sig = tsk->signal;
865 struct task_cputime *tsk_expires = &tsk->cputime_expires;
866 unsigned long long expires;
870 * If cputime_expires is zero, then there are no active
871 * per thread CPU timers.
873 if (task_cputime_zero(&tsk->cputime_expires))
876 expires = check_timers_list(timers, firing, prof_ticks(tsk));
877 tsk_expires->prof_exp = expires_to_cputime(expires);
879 expires = check_timers_list(++timers, firing, virt_ticks(tsk));
880 tsk_expires->virt_exp = expires_to_cputime(expires);
882 tsk_expires->sched_exp = check_timers_list(++timers, firing,
883 tsk->se.sum_exec_runtime);
886 * Check for the special case thread timers.
888 soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
889 if (soft != RLIM_INFINITY) {
891 READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
893 if (hard != RLIM_INFINITY &&
894 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
896 * At the hard limit, we just die.
897 * No need to calculate anything else now.
899 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
902 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
904 * At the soft limit, send a SIGXCPU every second.
907 soft += USEC_PER_SEC;
908 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
911 "RT Watchdog Timeout: %s[%d]\n",
912 tsk->comm, task_pid_nr(tsk));
913 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
918 static inline void stop_process_timers(struct signal_struct *sig)
920 struct thread_group_cputimer *cputimer = &sig->cputimer;
922 /* Turn off cputimer->running. This is done without locking. */
923 WRITE_ONCE(cputimer->running, false);
926 static u32 onecputick;
928 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
929 unsigned long long *expires,
930 unsigned long long cur_time, int signo)
935 if (cur_time >= it->expires) {
937 it->expires += it->incr;
938 it->error += it->incr_error;
939 if (it->error >= onecputick) {
940 it->expires -= cputime_one_jiffy;
941 it->error -= onecputick;
947 trace_itimer_expire(signo == SIGPROF ?
948 ITIMER_PROF : ITIMER_VIRTUAL,
949 tsk->signal->leader_pid, cur_time);
950 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
953 if (it->expires && (!*expires || it->expires < *expires)) {
954 *expires = it->expires;
959 * Check for any per-thread CPU timers that have fired and move them
960 * off the tsk->*_timers list onto the firing list. Per-thread timers
961 * have already been taken off.
963 static void check_process_timers(struct task_struct *tsk,
964 struct list_head *firing)
966 struct signal_struct *const sig = tsk->signal;
967 unsigned long long utime, ptime, virt_expires, prof_expires;
968 unsigned long long sum_sched_runtime, sched_expires;
969 struct list_head *timers = sig->cpu_timers;
970 struct task_cputime cputime;
974 * If cputimer is not running, then there are no active
975 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
977 if (!READ_ONCE(tsk->signal->cputimer.running))
981 * Signify that a thread is checking for process timers.
982 * Write access to this field is protected by the sighand lock.
984 sig->cputimer.checking_timer = true;
987 * Collect the current process totals.
989 thread_group_cputimer(tsk, &cputime);
990 utime = cputime_to_expires(cputime.utime);
991 ptime = utime + cputime_to_expires(cputime.stime);
992 sum_sched_runtime = cputime.sum_exec_runtime;
994 prof_expires = check_timers_list(timers, firing, ptime);
995 virt_expires = check_timers_list(++timers, firing, utime);
996 sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
999 * Check for the special case process timers.
1001 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1003 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1005 soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1006 if (soft != RLIM_INFINITY) {
1007 unsigned long psecs = cputime_to_secs(ptime);
1008 unsigned long hard =
1009 READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
1011 if (psecs >= hard) {
1013 * At the hard limit, we just die.
1014 * No need to calculate anything else now.
1016 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1019 if (psecs >= soft) {
1021 * At the soft limit, send a SIGXCPU every second.
1023 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1026 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
1029 x = secs_to_cputime(soft);
1030 if (!prof_expires || x < prof_expires) {
1035 sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires);
1036 sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires);
1037 sig->cputime_expires.sched_exp = sched_expires;
1038 if (task_cputime_zero(&sig->cputime_expires))
1039 stop_process_timers(sig);
1041 sig->cputimer.checking_timer = false;
1045 * This is called from the signal code (via do_schedule_next_timer)
1046 * when the last timer signal was delivered and we have to reload the timer.
1048 void posix_cpu_timer_schedule(struct k_itimer *timer)
1050 struct sighand_struct *sighand;
1051 unsigned long flags;
1052 struct task_struct *p = timer->it.cpu.task;
1053 unsigned long long now;
1055 WARN_ON_ONCE(p == NULL);
1058 * Fetch the current sample and update the timer's expiry time.
1060 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1061 cpu_clock_sample(timer->it_clock, p, &now);
1062 bump_cpu_timer(timer, now);
1063 if (unlikely(p->exit_state))
1066 /* Protect timer list r/w in arm_timer() */
1067 sighand = lock_task_sighand(p, &flags);
1072 * Protect arm_timer() and timer sampling in case of call to
1073 * thread_group_cputime().
1075 sighand = lock_task_sighand(p, &flags);
1076 if (unlikely(sighand == NULL)) {
1078 * The process has been reaped.
1079 * We can't even collect a sample any more.
1081 timer->it.cpu.expires = 0;
1083 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1084 unlock_task_sighand(p, &flags);
1085 /* Optimizations: if the process is dying, no need to rearm */
1088 cpu_timer_sample_group(timer->it_clock, p, &now);
1089 bump_cpu_timer(timer, now);
1090 /* Leave the sighand locked for the call below. */
1094 * Now re-arm for the new expiry time.
1096 WARN_ON_ONCE_NONRT(!irqs_disabled());
1098 unlock_task_sighand(p, &flags);
1100 /* Kick full dynticks CPUs in case they need to tick on the new timer */
1101 posix_cpu_timer_kick_nohz();
1103 timer->it_overrun_last = timer->it_overrun;
1104 timer->it_overrun = -1;
1105 ++timer->it_requeue_pending;
1109 * task_cputime_expired - Compare two task_cputime entities.
1111 * @sample: The task_cputime structure to be checked for expiration.
1112 * @expires: Expiration times, against which @sample will be checked.
1114 * Checks @sample against @expires to see if any field of @sample has expired.
1115 * Returns true if any field of the former is greater than the corresponding
1116 * field of the latter if the latter field is set. Otherwise returns false.
1118 static inline int task_cputime_expired(const struct task_cputime *sample,
1119 const struct task_cputime *expires)
1121 if (expires->utime && sample->utime >= expires->utime)
1123 if (expires->stime && sample->utime + sample->stime >= expires->stime)
1125 if (expires->sum_exec_runtime != 0 &&
1126 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1132 * fastpath_timer_check - POSIX CPU timers fast path.
1134 * @tsk: The task (thread) being checked.
1136 * Check the task and thread group timers. If both are zero (there are no
1137 * timers set) return false. Otherwise snapshot the task and thread group
1138 * timers and compare them with the corresponding expiration times. Return
1139 * true if a timer has expired, else return false.
1141 static inline int fastpath_timer_check(struct task_struct *tsk)
1143 struct signal_struct *sig;
1145 if (!task_cputime_zero(&tsk->cputime_expires)) {
1146 struct task_cputime task_sample;
1148 task_cputime(tsk, &task_sample.utime, &task_sample.stime);
1149 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
1150 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1156 * Check if thread group timers expired when the cputimer is
1157 * running and no other thread in the group is already checking
1158 * for thread group cputimers. These fields are read without the
1159 * sighand lock. However, this is fine because this is meant to
1160 * be a fastpath heuristic to determine whether we should try to
1161 * acquire the sighand lock to check/handle timers.
1163 * In the worst case scenario, if 'running' or 'checking_timer' gets
1164 * set but the current thread doesn't see the change yet, we'll wait
1165 * until the next thread in the group gets a scheduler interrupt to
1166 * handle the timer. This isn't an issue in practice because these
1167 * types of delays with signals actually getting sent are expected.
1169 if (READ_ONCE(sig->cputimer.running) &&
1170 !READ_ONCE(sig->cputimer.checking_timer)) {
1171 struct task_cputime group_sample;
1173 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
1175 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1183 * This is called from the timer interrupt handler. The irq handler has
1184 * already updated our counts. We need to check if any timers fire now.
1185 * Interrupts are disabled.
1187 static void __run_posix_cpu_timers(struct task_struct *tsk)
1190 struct k_itimer *timer, *next;
1191 unsigned long flags;
1193 WARN_ON_ONCE_NONRT(!irqs_disabled());
1196 * The fast path checks that there are no expired thread or thread
1197 * group timers. If that's so, just return.
1199 if (!fastpath_timer_check(tsk))
1202 if (!lock_task_sighand(tsk, &flags))
1205 * Here we take off tsk->signal->cpu_timers[N] and
1206 * tsk->cpu_timers[N] all the timers that are firing, and
1207 * put them on the firing list.
1209 check_thread_timers(tsk, &firing);
1211 check_process_timers(tsk, &firing);
1214 * We must release these locks before taking any timer's lock.
1215 * There is a potential race with timer deletion here, as the
1216 * siglock now protects our private firing list. We have set
1217 * the firing flag in each timer, so that a deletion attempt
1218 * that gets the timer lock before we do will give it up and
1219 * spin until we've taken care of that timer below.
1221 unlock_task_sighand(tsk, &flags);
1224 * Now that all the timers on our list have the firing flag,
1225 * no one will touch their list entries but us. We'll take
1226 * each timer's lock before clearing its firing flag, so no
1227 * timer call will interfere.
1229 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1232 spin_lock(&timer->it_lock);
1233 list_del_init(&timer->it.cpu.entry);
1234 cpu_firing = timer->it.cpu.firing;
1235 timer->it.cpu.firing = 0;
1237 * The firing flag is -1 if we collided with a reset
1238 * of the timer, which already reported this
1239 * almost-firing as an overrun. So don't generate an event.
1241 if (likely(cpu_firing >= 0))
1242 cpu_timer_fire(timer);
1243 spin_unlock(&timer->it_lock);
1247 #ifdef CONFIG_PREEMPT_RT_BASE
1248 #include <linux/kthread.h>
1249 #include <linux/cpu.h>
1250 DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
1251 DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
1253 static int posix_cpu_timers_thread(void *data)
1255 int cpu = (long)data;
1257 BUG_ON(per_cpu(posix_timer_task,cpu) != current);
1259 while (!kthread_should_stop()) {
1260 struct task_struct *tsk = NULL;
1261 struct task_struct *next = NULL;
1263 if (cpu_is_offline(cpu))
1266 /* grab task list */
1267 raw_local_irq_disable();
1268 tsk = per_cpu(posix_timer_tasklist, cpu);
1269 per_cpu(posix_timer_tasklist, cpu) = NULL;
1270 raw_local_irq_enable();
1272 /* its possible the list is empty, just return */
1274 set_current_state(TASK_INTERRUPTIBLE);
1276 __set_current_state(TASK_RUNNING);
1280 /* Process task list */
1283 next = tsk->posix_timer_list;
1285 /* run the task timers, clear its ptr and
1288 __run_posix_cpu_timers(tsk);
1289 tsk->posix_timer_list = NULL;
1290 put_task_struct(tsk);
1292 /* check if this is the last on the list */
1301 /* Wait for kthread_stop */
1302 set_current_state(TASK_INTERRUPTIBLE);
1303 while (!kthread_should_stop()) {
1305 set_current_state(TASK_INTERRUPTIBLE);
1307 __set_current_state(TASK_RUNNING);
1311 static inline int __fastpath_timer_check(struct task_struct *tsk)
1313 /* tsk == current, ensure it is safe to use ->signal/sighand */
1314 if (unlikely(tsk->exit_state))
1317 if (!task_cputime_zero(&tsk->cputime_expires))
1320 if (!task_cputime_zero(&tsk->signal->cputime_expires))
1326 void run_posix_cpu_timers(struct task_struct *tsk)
1328 unsigned long cpu = smp_processor_id();
1329 struct task_struct *tasklist;
1331 BUG_ON(!irqs_disabled());
1332 if(!per_cpu(posix_timer_task, cpu))
1334 /* get per-cpu references */
1335 tasklist = per_cpu(posix_timer_tasklist, cpu);
1337 /* check to see if we're already queued */
1338 if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
1339 get_task_struct(tsk);
1341 tsk->posix_timer_list = tasklist;
1344 * The list is terminated by a self-pointing
1347 tsk->posix_timer_list = tsk;
1349 per_cpu(posix_timer_tasklist, cpu) = tsk;
1351 wake_up_process(per_cpu(posix_timer_task, cpu));
1356 * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
1357 * Here we can start up the necessary migration thread for the new CPU.
1359 static int posix_cpu_thread_call(struct notifier_block *nfb,
1360 unsigned long action, void *hcpu)
1362 int cpu = (long)hcpu;
1363 struct task_struct *p;
1364 struct sched_param param;
1367 case CPU_UP_PREPARE:
1368 p = kthread_create(posix_cpu_timers_thread, hcpu,
1369 "posixcputmr/%d",cpu);
1372 p->flags |= PF_NOFREEZE;
1373 kthread_bind(p, cpu);
1374 /* Must be high prio to avoid getting starved */
1375 param.sched_priority = MAX_RT_PRIO-1;
1376 sched_setscheduler(p, SCHED_FIFO, ¶m);
1377 per_cpu(posix_timer_task,cpu) = p;
1380 /* Strictly unneccessary, as first user will wake it. */
1381 wake_up_process(per_cpu(posix_timer_task,cpu));
1383 #ifdef CONFIG_HOTPLUG_CPU
1384 case CPU_UP_CANCELED:
1385 /* Unbind it from offline cpu so it can run. Fall thru. */
1386 kthread_bind(per_cpu(posix_timer_task, cpu),
1387 cpumask_any(cpu_online_mask));
1388 kthread_stop(per_cpu(posix_timer_task,cpu));
1389 per_cpu(posix_timer_task,cpu) = NULL;
1392 kthread_stop(per_cpu(posix_timer_task,cpu));
1393 per_cpu(posix_timer_task,cpu) = NULL;
1400 /* Register at highest priority so that task migration (migrate_all_tasks)
1401 * happens before everything else.
1403 static struct notifier_block posix_cpu_thread_notifier = {
1404 .notifier_call = posix_cpu_thread_call,
1408 static int __init posix_cpu_thread_init(void)
1410 void *hcpu = (void *)(long)smp_processor_id();
1411 /* Start one for boot CPU. */
1414 /* init the per-cpu posix_timer_tasklets */
1415 for_each_possible_cpu(cpu)
1416 per_cpu(posix_timer_tasklist, cpu) = NULL;
1418 posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
1419 posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
1420 register_cpu_notifier(&posix_cpu_thread_notifier);
1423 early_initcall(posix_cpu_thread_init);
1424 #else /* CONFIG_PREEMPT_RT_BASE */
1425 void run_posix_cpu_timers(struct task_struct *tsk)
1427 __run_posix_cpu_timers(tsk);
1429 #endif /* CONFIG_PREEMPT_RT_BASE */
1432 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1433 * The tsk->sighand->siglock must be held by the caller.
1435 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1436 cputime_t *newval, cputime_t *oldval)
1438 unsigned long long now;
1440 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1441 cpu_timer_sample_group(clock_idx, tsk, &now);
1445 * We are setting itimer. The *oldval is absolute and we update
1446 * it to be relative, *newval argument is relative and we update
1447 * it to be absolute.
1450 if (*oldval <= now) {
1451 /* Just about to fire. */
1452 *oldval = cputime_one_jiffy;
1464 * Update expiration cache if we are the earliest timer, or eventually
1465 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1467 switch (clock_idx) {
1469 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1470 tsk->signal->cputime_expires.prof_exp = *newval;
1473 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1474 tsk->signal->cputime_expires.virt_exp = *newval;
1478 posix_cpu_timer_kick_nohz();
1481 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1482 struct timespec *rqtp, struct itimerspec *it)
1484 struct k_itimer timer;
1488 * Set up a temporary timer and then wait for it to go off.
1490 memset(&timer, 0, sizeof timer);
1491 spin_lock_init(&timer.it_lock);
1492 timer.it_clock = which_clock;
1493 timer.it_overrun = -1;
1494 error = posix_cpu_timer_create(&timer);
1495 timer.it_process = current;
1497 static struct itimerspec zero_it;
1499 memset(it, 0, sizeof *it);
1500 it->it_value = *rqtp;
1502 spin_lock_irq(&timer.it_lock);
1503 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1505 spin_unlock_irq(&timer.it_lock);
1509 while (!signal_pending(current)) {
1510 if (timer.it.cpu.expires == 0) {
1512 * Our timer fired and was reset, below
1513 * deletion can not fail.
1515 posix_cpu_timer_del(&timer);
1516 spin_unlock_irq(&timer.it_lock);
1521 * Block until cpu_timer_fire (or a signal) wakes us.
1523 __set_current_state(TASK_INTERRUPTIBLE);
1524 spin_unlock_irq(&timer.it_lock);
1526 spin_lock_irq(&timer.it_lock);
1530 * We were interrupted by a signal.
1532 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1533 error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1536 * Timer is now unarmed, deletion can not fail.
1538 posix_cpu_timer_del(&timer);
1540 spin_unlock_irq(&timer.it_lock);
1542 while (error == TIMER_RETRY) {
1544 * We need to handle case when timer was or is in the
1545 * middle of firing. In other cases we already freed
1548 spin_lock_irq(&timer.it_lock);
1549 error = posix_cpu_timer_del(&timer);
1550 spin_unlock_irq(&timer.it_lock);
1553 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1555 * It actually did fire already.
1560 error = -ERESTART_RESTARTBLOCK;
1566 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1568 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1569 struct timespec *rqtp, struct timespec __user *rmtp)
1571 struct restart_block *restart_block = ¤t->restart_block;
1572 struct itimerspec it;
1576 * Diagnose required errors first.
1578 if (CPUCLOCK_PERTHREAD(which_clock) &&
1579 (CPUCLOCK_PID(which_clock) == 0 ||
1580 CPUCLOCK_PID(which_clock) == current->pid))
1583 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1585 if (error == -ERESTART_RESTARTBLOCK) {
1587 if (flags & TIMER_ABSTIME)
1588 return -ERESTARTNOHAND;
1590 * Report back to the user the time still remaining.
1592 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1595 restart_block->fn = posix_cpu_nsleep_restart;
1596 restart_block->nanosleep.clockid = which_clock;
1597 restart_block->nanosleep.rmtp = rmtp;
1598 restart_block->nanosleep.expires = timespec_to_ns(rqtp);
1603 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1605 clockid_t which_clock = restart_block->nanosleep.clockid;
1607 struct itimerspec it;
1610 t = ns_to_timespec(restart_block->nanosleep.expires);
1612 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1614 if (error == -ERESTART_RESTARTBLOCK) {
1615 struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
1617 * Report back to the user the time still remaining.
1619 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1622 restart_block->nanosleep.expires = timespec_to_ns(&t);
1628 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1629 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1631 static int process_cpu_clock_getres(const clockid_t which_clock,
1632 struct timespec *tp)
1634 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1636 static int process_cpu_clock_get(const clockid_t which_clock,
1637 struct timespec *tp)
1639 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1641 static int process_cpu_timer_create(struct k_itimer *timer)
1643 timer->it_clock = PROCESS_CLOCK;
1644 return posix_cpu_timer_create(timer);
1646 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1647 struct timespec *rqtp,
1648 struct timespec __user *rmtp)
1650 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1652 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1656 static int thread_cpu_clock_getres(const clockid_t which_clock,
1657 struct timespec *tp)
1659 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1661 static int thread_cpu_clock_get(const clockid_t which_clock,
1662 struct timespec *tp)
1664 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1666 static int thread_cpu_timer_create(struct k_itimer *timer)
1668 timer->it_clock = THREAD_CLOCK;
1669 return posix_cpu_timer_create(timer);
1672 struct k_clock clock_posix_cpu = {
1673 .clock_getres = posix_cpu_clock_getres,
1674 .clock_set = posix_cpu_clock_set,
1675 .clock_get = posix_cpu_clock_get,
1676 .timer_create = posix_cpu_timer_create,
1677 .nsleep = posix_cpu_nsleep,
1678 .nsleep_restart = posix_cpu_nsleep_restart,
1679 .timer_set = posix_cpu_timer_set,
1680 .timer_del = posix_cpu_timer_del,
1681 .timer_get = posix_cpu_timer_get,
1684 static __init int init_posix_cpu_timers(void)
1686 struct k_clock process = {
1687 .clock_getres = process_cpu_clock_getres,
1688 .clock_get = process_cpu_clock_get,
1689 .timer_create = process_cpu_timer_create,
1690 .nsleep = process_cpu_nsleep,
1691 .nsleep_restart = process_cpu_nsleep_restart,
1693 struct k_clock thread = {
1694 .clock_getres = thread_cpu_clock_getres,
1695 .clock_get = thread_cpu_clock_get,
1696 .timer_create = thread_cpu_timer_create,
1700 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1701 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1703 cputime_to_timespec(cputime_one_jiffy, &ts);
1704 onecputick = ts.tv_nsec;
1705 WARN_ON(ts.tv_sec != 0);
1709 __initcall(init_posix_cpu_timers);