2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
8 #include <linux/slab.h>
9 #include <linux/irq_work.h>
11 int sched_rr_timeslice = RR_TIMESLICE;
13 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
15 struct rt_bandwidth def_rt_bandwidth;
17 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
19 struct rt_bandwidth *rt_b =
20 container_of(timer, struct rt_bandwidth, rt_period_timer);
26 now = hrtimer_cb_get_time(timer);
27 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
32 idle = do_sched_rt_period_timer(rt_b, overrun);
35 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
38 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
40 rt_b->rt_period = ns_to_ktime(period);
41 rt_b->rt_runtime = runtime;
43 raw_spin_lock_init(&rt_b->rt_runtime_lock);
45 hrtimer_init(&rt_b->rt_period_timer,
46 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
47 rt_b->rt_period_timer.irqsafe = 1;
48 rt_b->rt_period_timer.function = sched_rt_period_timer;
51 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
53 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
56 if (hrtimer_active(&rt_b->rt_period_timer))
59 raw_spin_lock(&rt_b->rt_runtime_lock);
60 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
61 raw_spin_unlock(&rt_b->rt_runtime_lock);
65 static void push_irq_work_func(struct irq_work *work);
68 void init_rt_rq(struct rt_rq *rt_rq)
70 struct rt_prio_array *array;
73 array = &rt_rq->active;
74 for (i = 0; i < MAX_RT_PRIO; i++) {
75 INIT_LIST_HEAD(array->queue + i);
76 __clear_bit(i, array->bitmap);
78 /* delimiter for bitsearch: */
79 __set_bit(MAX_RT_PRIO, array->bitmap);
81 #if defined CONFIG_SMP
82 rt_rq->highest_prio.curr = MAX_RT_PRIO;
83 rt_rq->highest_prio.next = MAX_RT_PRIO;
84 rt_rq->rt_nr_migratory = 0;
85 rt_rq->overloaded = 0;
86 plist_head_init(&rt_rq->pushable_tasks);
88 #ifdef HAVE_RT_PUSH_IPI
89 rt_rq->push_flags = 0;
90 rt_rq->push_cpu = nr_cpu_ids;
91 raw_spin_lock_init(&rt_rq->push_lock);
92 init_irq_work(&rt_rq->push_work, push_irq_work_func);
93 rt_rq->push_work.flags |= IRQ_WORK_HARD_IRQ;
95 #endif /* CONFIG_SMP */
96 /* We start is dequeued state, because no RT tasks are queued */
100 rt_rq->rt_throttled = 0;
101 rt_rq->rt_runtime = 0;
102 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
105 #ifdef CONFIG_RT_GROUP_SCHED
106 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
108 hrtimer_cancel(&rt_b->rt_period_timer);
111 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
113 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
115 #ifdef CONFIG_SCHED_DEBUG
116 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
118 return container_of(rt_se, struct task_struct, rt);
121 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
126 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
131 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
133 struct rt_rq *rt_rq = rt_se->rt_rq;
138 void free_rt_sched_group(struct task_group *tg)
143 destroy_rt_bandwidth(&tg->rt_bandwidth);
145 for_each_possible_cpu(i) {
156 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
157 struct sched_rt_entity *rt_se, int cpu,
158 struct sched_rt_entity *parent)
160 struct rq *rq = cpu_rq(cpu);
162 rt_rq->highest_prio.curr = MAX_RT_PRIO;
163 rt_rq->rt_nr_boosted = 0;
167 tg->rt_rq[cpu] = rt_rq;
168 tg->rt_se[cpu] = rt_se;
174 rt_se->rt_rq = &rq->rt;
176 rt_se->rt_rq = parent->my_q;
179 rt_se->parent = parent;
180 INIT_LIST_HEAD(&rt_se->run_list);
183 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
186 struct sched_rt_entity *rt_se;
189 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
192 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
196 init_rt_bandwidth(&tg->rt_bandwidth,
197 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
199 for_each_possible_cpu(i) {
200 rt_rq = kzalloc_node(sizeof(struct rt_rq),
201 GFP_KERNEL, cpu_to_node(i));
205 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
206 GFP_KERNEL, cpu_to_node(i));
211 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
212 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
223 #else /* CONFIG_RT_GROUP_SCHED */
225 #define rt_entity_is_task(rt_se) (1)
227 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
229 return container_of(rt_se, struct task_struct, rt);
232 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
234 return container_of(rt_rq, struct rq, rt);
237 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
239 struct task_struct *p = rt_task_of(rt_se);
244 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
246 struct rq *rq = rq_of_rt_se(rt_se);
251 void free_rt_sched_group(struct task_group *tg) { }
253 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
257 #endif /* CONFIG_RT_GROUP_SCHED */
261 static int pull_rt_task(struct rq *this_rq);
263 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
265 /* Try to pull RT tasks here if we lower this rq's prio */
266 return rq->rt.highest_prio.curr > prev->prio;
269 static inline int rt_overloaded(struct rq *rq)
271 return atomic_read(&rq->rd->rto_count);
274 static inline void rt_set_overload(struct rq *rq)
279 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
281 * Make sure the mask is visible before we set
282 * the overload count. That is checked to determine
283 * if we should look at the mask. It would be a shame
284 * if we looked at the mask, but the mask was not
287 * Matched by the barrier in pull_rt_task().
290 atomic_inc(&rq->rd->rto_count);
293 static inline void rt_clear_overload(struct rq *rq)
298 /* the order here really doesn't matter */
299 atomic_dec(&rq->rd->rto_count);
300 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
303 static void update_rt_migration(struct rt_rq *rt_rq)
305 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
306 if (!rt_rq->overloaded) {
307 rt_set_overload(rq_of_rt_rq(rt_rq));
308 rt_rq->overloaded = 1;
310 } else if (rt_rq->overloaded) {
311 rt_clear_overload(rq_of_rt_rq(rt_rq));
312 rt_rq->overloaded = 0;
316 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
318 struct task_struct *p;
320 if (!rt_entity_is_task(rt_se))
323 p = rt_task_of(rt_se);
324 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
326 rt_rq->rt_nr_total++;
327 if (p->nr_cpus_allowed > 1)
328 rt_rq->rt_nr_migratory++;
330 update_rt_migration(rt_rq);
333 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
335 struct task_struct *p;
337 if (!rt_entity_is_task(rt_se))
340 p = rt_task_of(rt_se);
341 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
343 rt_rq->rt_nr_total--;
344 if (p->nr_cpus_allowed > 1)
345 rt_rq->rt_nr_migratory--;
347 update_rt_migration(rt_rq);
350 static inline int has_pushable_tasks(struct rq *rq)
352 return !plist_head_empty(&rq->rt.pushable_tasks);
355 static inline void set_post_schedule(struct rq *rq)
358 * We detect this state here so that we can avoid taking the RQ
359 * lock again later if there is no need to push
361 rq->post_schedule = has_pushable_tasks(rq);
364 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
366 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
367 plist_node_init(&p->pushable_tasks, p->prio);
368 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
370 /* Update the highest prio pushable task */
371 if (p->prio < rq->rt.highest_prio.next)
372 rq->rt.highest_prio.next = p->prio;
375 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
377 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
379 /* Update the new highest prio pushable task */
380 if (has_pushable_tasks(rq)) {
381 p = plist_first_entry(&rq->rt.pushable_tasks,
382 struct task_struct, pushable_tasks);
383 rq->rt.highest_prio.next = p->prio;
385 rq->rt.highest_prio.next = MAX_RT_PRIO;
390 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
394 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
399 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
404 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
408 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
413 static inline int pull_rt_task(struct rq *this_rq)
418 static inline void set_post_schedule(struct rq *rq)
421 #endif /* CONFIG_SMP */
423 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
424 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
426 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
428 return !list_empty(&rt_se->run_list);
431 #ifdef CONFIG_RT_GROUP_SCHED
433 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
438 return rt_rq->rt_runtime;
441 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
443 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
446 typedef struct task_group *rt_rq_iter_t;
448 static inline struct task_group *next_task_group(struct task_group *tg)
451 tg = list_entry_rcu(tg->list.next,
452 typeof(struct task_group), list);
453 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
455 if (&tg->list == &task_groups)
461 #define for_each_rt_rq(rt_rq, iter, rq) \
462 for (iter = container_of(&task_groups, typeof(*iter), list); \
463 (iter = next_task_group(iter)) && \
464 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
466 #define for_each_sched_rt_entity(rt_se) \
467 for (; rt_se; rt_se = rt_se->parent)
469 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
474 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
475 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
477 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
479 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
480 struct rq *rq = rq_of_rt_rq(rt_rq);
481 struct sched_rt_entity *rt_se;
483 int cpu = cpu_of(rq);
485 rt_se = rt_rq->tg->rt_se[cpu];
487 if (rt_rq->rt_nr_running) {
489 enqueue_top_rt_rq(rt_rq);
490 else if (!on_rt_rq(rt_se))
491 enqueue_rt_entity(rt_se, false);
493 if (rt_rq->highest_prio.curr < curr->prio)
498 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
500 struct sched_rt_entity *rt_se;
501 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
503 rt_se = rt_rq->tg->rt_se[cpu];
506 dequeue_top_rt_rq(rt_rq);
507 else if (on_rt_rq(rt_se))
508 dequeue_rt_entity(rt_se);
511 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
513 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
516 static int rt_se_boosted(struct sched_rt_entity *rt_se)
518 struct rt_rq *rt_rq = group_rt_rq(rt_se);
519 struct task_struct *p;
522 return !!rt_rq->rt_nr_boosted;
524 p = rt_task_of(rt_se);
525 return p->prio != p->normal_prio;
529 static inline const struct cpumask *sched_rt_period_mask(void)
531 return this_rq()->rd->span;
534 static inline const struct cpumask *sched_rt_period_mask(void)
536 return cpu_online_mask;
541 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
543 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
546 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
548 return &rt_rq->tg->rt_bandwidth;
551 #else /* !CONFIG_RT_GROUP_SCHED */
553 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
555 return rt_rq->rt_runtime;
558 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
560 return ktime_to_ns(def_rt_bandwidth.rt_period);
563 typedef struct rt_rq *rt_rq_iter_t;
565 #define for_each_rt_rq(rt_rq, iter, rq) \
566 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
568 #define for_each_sched_rt_entity(rt_se) \
569 for (; rt_se; rt_se = NULL)
571 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
576 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
578 struct rq *rq = rq_of_rt_rq(rt_rq);
580 if (!rt_rq->rt_nr_running)
583 enqueue_top_rt_rq(rt_rq);
587 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
589 dequeue_top_rt_rq(rt_rq);
592 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
594 return rt_rq->rt_throttled;
597 static inline const struct cpumask *sched_rt_period_mask(void)
599 return cpu_online_mask;
603 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
605 return &cpu_rq(cpu)->rt;
608 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
610 return &def_rt_bandwidth;
613 #endif /* CONFIG_RT_GROUP_SCHED */
615 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
617 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
619 return (hrtimer_active(&rt_b->rt_period_timer) ||
620 rt_rq->rt_time < rt_b->rt_runtime);
625 * We ran out of runtime, see if we can borrow some from our neighbours.
627 static int do_balance_runtime(struct rt_rq *rt_rq)
629 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
630 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
631 int i, weight, more = 0;
634 weight = cpumask_weight(rd->span);
636 raw_spin_lock(&rt_b->rt_runtime_lock);
637 rt_period = ktime_to_ns(rt_b->rt_period);
638 for_each_cpu(i, rd->span) {
639 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
645 raw_spin_lock(&iter->rt_runtime_lock);
647 * Either all rqs have inf runtime and there's nothing to steal
648 * or __disable_runtime() below sets a specific rq to inf to
649 * indicate its been disabled and disalow stealing.
651 if (iter->rt_runtime == RUNTIME_INF)
655 * From runqueues with spare time, take 1/n part of their
656 * spare time, but no more than our period.
658 diff = iter->rt_runtime - iter->rt_time;
660 diff = div_u64((u64)diff, weight);
661 if (rt_rq->rt_runtime + diff > rt_period)
662 diff = rt_period - rt_rq->rt_runtime;
663 iter->rt_runtime -= diff;
664 rt_rq->rt_runtime += diff;
666 if (rt_rq->rt_runtime == rt_period) {
667 raw_spin_unlock(&iter->rt_runtime_lock);
672 raw_spin_unlock(&iter->rt_runtime_lock);
674 raw_spin_unlock(&rt_b->rt_runtime_lock);
680 * Ensure this RQ takes back all the runtime it lend to its neighbours.
682 static void __disable_runtime(struct rq *rq)
684 struct root_domain *rd = rq->rd;
688 if (unlikely(!scheduler_running))
691 for_each_rt_rq(rt_rq, iter, rq) {
692 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
696 raw_spin_lock(&rt_b->rt_runtime_lock);
697 raw_spin_lock(&rt_rq->rt_runtime_lock);
699 * Either we're all inf and nobody needs to borrow, or we're
700 * already disabled and thus have nothing to do, or we have
701 * exactly the right amount of runtime to take out.
703 if (rt_rq->rt_runtime == RUNTIME_INF ||
704 rt_rq->rt_runtime == rt_b->rt_runtime)
706 raw_spin_unlock(&rt_rq->rt_runtime_lock);
709 * Calculate the difference between what we started out with
710 * and what we current have, that's the amount of runtime
711 * we lend and now have to reclaim.
713 want = rt_b->rt_runtime - rt_rq->rt_runtime;
716 * Greedy reclaim, take back as much as we can.
718 for_each_cpu(i, rd->span) {
719 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
723 * Can't reclaim from ourselves or disabled runqueues.
725 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
728 raw_spin_lock(&iter->rt_runtime_lock);
730 diff = min_t(s64, iter->rt_runtime, want);
731 iter->rt_runtime -= diff;
734 iter->rt_runtime -= want;
737 raw_spin_unlock(&iter->rt_runtime_lock);
743 raw_spin_lock(&rt_rq->rt_runtime_lock);
745 * We cannot be left wanting - that would mean some runtime
746 * leaked out of the system.
751 * Disable all the borrow logic by pretending we have inf
752 * runtime - in which case borrowing doesn't make sense.
754 rt_rq->rt_runtime = RUNTIME_INF;
755 rt_rq->rt_throttled = 0;
756 raw_spin_unlock(&rt_rq->rt_runtime_lock);
757 raw_spin_unlock(&rt_b->rt_runtime_lock);
759 /* Make rt_rq available for pick_next_task() */
760 sched_rt_rq_enqueue(rt_rq);
764 static void __enable_runtime(struct rq *rq)
769 if (unlikely(!scheduler_running))
773 * Reset each runqueue's bandwidth settings
775 for_each_rt_rq(rt_rq, iter, rq) {
776 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
778 raw_spin_lock(&rt_b->rt_runtime_lock);
779 raw_spin_lock(&rt_rq->rt_runtime_lock);
780 rt_rq->rt_runtime = rt_b->rt_runtime;
782 rt_rq->rt_throttled = 0;
783 raw_spin_unlock(&rt_rq->rt_runtime_lock);
784 raw_spin_unlock(&rt_b->rt_runtime_lock);
788 static int balance_runtime(struct rt_rq *rt_rq)
792 if (!sched_feat(RT_RUNTIME_SHARE))
795 if (rt_rq->rt_time > rt_rq->rt_runtime) {
796 raw_spin_unlock(&rt_rq->rt_runtime_lock);
797 more = do_balance_runtime(rt_rq);
798 raw_spin_lock(&rt_rq->rt_runtime_lock);
803 #else /* !CONFIG_SMP */
804 static inline int balance_runtime(struct rt_rq *rt_rq)
808 #endif /* CONFIG_SMP */
810 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
812 int i, idle = 1, throttled = 0;
813 const struct cpumask *span;
815 span = sched_rt_period_mask();
816 #ifdef CONFIG_RT_GROUP_SCHED
818 * FIXME: isolated CPUs should really leave the root task group,
819 * whether they are isolcpus or were isolated via cpusets, lest
820 * the timer run on a CPU which does not service all runqueues,
821 * potentially leaving other CPUs indefinitely throttled. If
822 * isolation is really required, the user will turn the throttle
823 * off to kill the perturbations it causes anyway. Meanwhile,
824 * this maintains functionality for boot and/or troubleshooting.
826 if (rt_b == &root_task_group.rt_bandwidth)
827 span = cpu_online_mask;
829 for_each_cpu(i, span) {
831 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
832 struct rq *rq = rq_of_rt_rq(rt_rq);
834 raw_spin_lock(&rq->lock);
835 if (rt_rq->rt_time) {
838 raw_spin_lock(&rt_rq->rt_runtime_lock);
839 if (rt_rq->rt_throttled)
840 balance_runtime(rt_rq);
841 runtime = rt_rq->rt_runtime;
842 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
843 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
844 rt_rq->rt_throttled = 0;
848 * When we're idle and a woken (rt) task is
849 * throttled check_preempt_curr() will set
850 * skip_update and the time between the wakeup
851 * and this unthrottle will get accounted as
854 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
855 rq_clock_skip_update(rq, false);
857 if (rt_rq->rt_time || rt_rq->rt_nr_running)
859 raw_spin_unlock(&rt_rq->rt_runtime_lock);
860 } else if (rt_rq->rt_nr_running) {
862 if (!rt_rq_throttled(rt_rq))
865 if (rt_rq->rt_throttled)
869 sched_rt_rq_enqueue(rt_rq);
870 raw_spin_unlock(&rq->lock);
873 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
879 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
881 #ifdef CONFIG_RT_GROUP_SCHED
882 struct rt_rq *rt_rq = group_rt_rq(rt_se);
885 return rt_rq->highest_prio.curr;
888 return rt_task_of(rt_se)->prio;
891 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
893 u64 runtime = sched_rt_runtime(rt_rq);
895 if (rt_rq->rt_throttled)
896 return rt_rq_throttled(rt_rq);
898 if (runtime >= sched_rt_period(rt_rq))
901 balance_runtime(rt_rq);
902 runtime = sched_rt_runtime(rt_rq);
903 if (runtime == RUNTIME_INF)
906 if (rt_rq->rt_time > runtime) {
907 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
910 * Don't actually throttle groups that have no runtime assigned
911 * but accrue some time due to boosting.
913 if (likely(rt_b->rt_runtime)) {
914 rt_rq->rt_throttled = 1;
915 printk_deferred_once("sched: RT throttling activated\n");
918 * In case we did anyway, make it go away,
919 * replenishment is a joke, since it will replenish us
925 if (rt_rq_throttled(rt_rq)) {
926 sched_rt_rq_dequeue(rt_rq);
935 * Update the current task's runtime statistics. Skip current tasks that
936 * are not in our scheduling class.
938 static void update_curr_rt(struct rq *rq)
940 struct task_struct *curr = rq->curr;
941 struct sched_rt_entity *rt_se = &curr->rt;
944 if (curr->sched_class != &rt_sched_class)
947 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
948 if (unlikely((s64)delta_exec <= 0))
951 schedstat_set(curr->se.statistics.exec_max,
952 max(curr->se.statistics.exec_max, delta_exec));
954 curr->se.sum_exec_runtime += delta_exec;
955 account_group_exec_runtime(curr, delta_exec);
957 curr->se.exec_start = rq_clock_task(rq);
958 cpuacct_charge(curr, delta_exec);
960 sched_rt_avg_update(rq, delta_exec);
962 if (!rt_bandwidth_enabled())
965 for_each_sched_rt_entity(rt_se) {
966 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
968 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
969 raw_spin_lock(&rt_rq->rt_runtime_lock);
970 rt_rq->rt_time += delta_exec;
971 if (sched_rt_runtime_exceeded(rt_rq))
973 raw_spin_unlock(&rt_rq->rt_runtime_lock);
979 dequeue_top_rt_rq(struct rt_rq *rt_rq)
981 struct rq *rq = rq_of_rt_rq(rt_rq);
983 BUG_ON(&rq->rt != rt_rq);
985 if (!rt_rq->rt_queued)
988 BUG_ON(!rq->nr_running);
990 sub_nr_running(rq, rt_rq->rt_nr_running);
991 rt_rq->rt_queued = 0;
995 enqueue_top_rt_rq(struct rt_rq *rt_rq)
997 struct rq *rq = rq_of_rt_rq(rt_rq);
999 BUG_ON(&rq->rt != rt_rq);
1001 if (rt_rq->rt_queued)
1003 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1006 add_nr_running(rq, rt_rq->rt_nr_running);
1007 rt_rq->rt_queued = 1;
1010 #if defined CONFIG_SMP
1013 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1015 struct rq *rq = rq_of_rt_rq(rt_rq);
1017 #ifdef CONFIG_RT_GROUP_SCHED
1019 * Change rq's cpupri only if rt_rq is the top queue.
1021 if (&rq->rt != rt_rq)
1024 if (rq->online && prio < prev_prio)
1025 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1029 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1031 struct rq *rq = rq_of_rt_rq(rt_rq);
1033 #ifdef CONFIG_RT_GROUP_SCHED
1035 * Change rq's cpupri only if rt_rq is the top queue.
1037 if (&rq->rt != rt_rq)
1040 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1041 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1044 #else /* CONFIG_SMP */
1047 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1049 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1051 #endif /* CONFIG_SMP */
1053 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1055 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1057 int prev_prio = rt_rq->highest_prio.curr;
1059 if (prio < prev_prio)
1060 rt_rq->highest_prio.curr = prio;
1062 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1066 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1068 int prev_prio = rt_rq->highest_prio.curr;
1070 if (rt_rq->rt_nr_running) {
1072 WARN_ON(prio < prev_prio);
1075 * This may have been our highest task, and therefore
1076 * we may have some recomputation to do
1078 if (prio == prev_prio) {
1079 struct rt_prio_array *array = &rt_rq->active;
1081 rt_rq->highest_prio.curr =
1082 sched_find_first_bit(array->bitmap);
1086 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1088 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1093 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1094 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1096 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1098 #ifdef CONFIG_RT_GROUP_SCHED
1101 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1103 if (rt_se_boosted(rt_se))
1104 rt_rq->rt_nr_boosted++;
1107 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1111 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1113 if (rt_se_boosted(rt_se))
1114 rt_rq->rt_nr_boosted--;
1116 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1119 #else /* CONFIG_RT_GROUP_SCHED */
1122 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1124 start_rt_bandwidth(&def_rt_bandwidth);
1128 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1130 #endif /* CONFIG_RT_GROUP_SCHED */
1133 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1135 struct rt_rq *group_rq = group_rt_rq(rt_se);
1138 return group_rq->rt_nr_running;
1144 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1146 int prio = rt_se_prio(rt_se);
1148 WARN_ON(!rt_prio(prio));
1149 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1151 inc_rt_prio(rt_rq, prio);
1152 inc_rt_migration(rt_se, rt_rq);
1153 inc_rt_group(rt_se, rt_rq);
1157 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1159 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1160 WARN_ON(!rt_rq->rt_nr_running);
1161 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1163 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1164 dec_rt_migration(rt_se, rt_rq);
1165 dec_rt_group(rt_se, rt_rq);
1168 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1170 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1171 struct rt_prio_array *array = &rt_rq->active;
1172 struct rt_rq *group_rq = group_rt_rq(rt_se);
1173 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1176 * Don't enqueue the group if its throttled, or when empty.
1177 * The latter is a consequence of the former when a child group
1178 * get throttled and the current group doesn't have any other
1181 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1185 list_add(&rt_se->run_list, queue);
1187 list_add_tail(&rt_se->run_list, queue);
1188 __set_bit(rt_se_prio(rt_se), array->bitmap);
1190 inc_rt_tasks(rt_se, rt_rq);
1193 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1195 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1196 struct rt_prio_array *array = &rt_rq->active;
1198 list_del_init(&rt_se->run_list);
1199 if (list_empty(array->queue + rt_se_prio(rt_se)))
1200 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1202 dec_rt_tasks(rt_se, rt_rq);
1206 * Because the prio of an upper entry depends on the lower
1207 * entries, we must remove entries top - down.
1209 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1211 struct sched_rt_entity *back = NULL;
1213 for_each_sched_rt_entity(rt_se) {
1218 dequeue_top_rt_rq(rt_rq_of_se(back));
1220 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1221 if (on_rt_rq(rt_se))
1222 __dequeue_rt_entity(rt_se);
1226 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1228 struct rq *rq = rq_of_rt_se(rt_se);
1230 dequeue_rt_stack(rt_se);
1231 for_each_sched_rt_entity(rt_se)
1232 __enqueue_rt_entity(rt_se, head);
1233 enqueue_top_rt_rq(&rq->rt);
1236 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1238 struct rq *rq = rq_of_rt_se(rt_se);
1240 dequeue_rt_stack(rt_se);
1242 for_each_sched_rt_entity(rt_se) {
1243 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1245 if (rt_rq && rt_rq->rt_nr_running)
1246 __enqueue_rt_entity(rt_se, false);
1248 enqueue_top_rt_rq(&rq->rt);
1252 * Adding/removing a task to/from a priority array:
1255 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1257 struct sched_rt_entity *rt_se = &p->rt;
1259 if (flags & ENQUEUE_WAKEUP)
1262 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1264 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1265 enqueue_pushable_task(rq, p);
1268 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1270 struct sched_rt_entity *rt_se = &p->rt;
1273 dequeue_rt_entity(rt_se);
1275 dequeue_pushable_task(rq, p);
1279 * Put task to the head or the end of the run list without the overhead of
1280 * dequeue followed by enqueue.
1283 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1285 if (on_rt_rq(rt_se)) {
1286 struct rt_prio_array *array = &rt_rq->active;
1287 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1290 list_move(&rt_se->run_list, queue);
1292 list_move_tail(&rt_se->run_list, queue);
1296 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1298 struct sched_rt_entity *rt_se = &p->rt;
1299 struct rt_rq *rt_rq;
1301 for_each_sched_rt_entity(rt_se) {
1302 rt_rq = rt_rq_of_se(rt_se);
1303 requeue_rt_entity(rt_rq, rt_se, head);
1307 static void yield_task_rt(struct rq *rq)
1309 requeue_task_rt(rq, rq->curr, 0);
1313 static int find_lowest_rq(struct task_struct *task);
1316 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1318 struct task_struct *curr;
1321 /* For anything but wake ups, just return the task_cpu */
1322 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1328 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1331 * If the current task on @p's runqueue is an RT task, then
1332 * try to see if we can wake this RT task up on another
1333 * runqueue. Otherwise simply start this RT task
1334 * on its current runqueue.
1336 * We want to avoid overloading runqueues. If the woken
1337 * task is a higher priority, then it will stay on this CPU
1338 * and the lower prio task should be moved to another CPU.
1339 * Even though this will probably make the lower prio task
1340 * lose its cache, we do not want to bounce a higher task
1341 * around just because it gave up its CPU, perhaps for a
1344 * For equal prio tasks, we just let the scheduler sort it out.
1346 * Otherwise, just let it ride on the affined RQ and the
1347 * post-schedule router will push the preempted task away
1349 * This test is optimistic, if we get it wrong the load-balancer
1350 * will have to sort it out.
1352 if (curr && unlikely(rt_task(curr)) &&
1353 (curr->nr_cpus_allowed < 2 ||
1354 curr->prio <= p->prio)) {
1355 int target = find_lowest_rq(p);
1358 * Don't bother moving it if the destination CPU is
1359 * not running a lower priority task.
1362 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1371 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1374 * Current can't be migrated, useless to reschedule,
1375 * let's hope p can move out.
1377 if (rq->curr->nr_cpus_allowed == 1 ||
1378 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1382 * p is migratable, so let's not schedule it and
1383 * see if it is pushed or pulled somewhere else.
1385 if (p->nr_cpus_allowed != 1
1386 && cpupri_find(&rq->rd->cpupri, p, NULL))
1390 * There appears to be other cpus that can accept
1391 * current and none to run 'p', so lets reschedule
1392 * to try and push current away:
1394 requeue_task_rt(rq, p, 1);
1398 #endif /* CONFIG_SMP */
1401 * Preempt the current task with a newly woken task if needed:
1403 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1405 if (p->prio < rq->curr->prio) {
1414 * - the newly woken task is of equal priority to the current task
1415 * - the newly woken task is non-migratable while current is migratable
1416 * - current will be preempted on the next reschedule
1418 * we should check to see if current can readily move to a different
1419 * cpu. If so, we will reschedule to allow the push logic to try
1420 * to move current somewhere else, making room for our non-migratable
1423 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1424 check_preempt_equal_prio(rq, p);
1428 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1429 struct rt_rq *rt_rq)
1431 struct rt_prio_array *array = &rt_rq->active;
1432 struct sched_rt_entity *next = NULL;
1433 struct list_head *queue;
1436 idx = sched_find_first_bit(array->bitmap);
1437 BUG_ON(idx >= MAX_RT_PRIO);
1439 queue = array->queue + idx;
1440 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1445 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1447 struct sched_rt_entity *rt_se;
1448 struct task_struct *p;
1449 struct rt_rq *rt_rq = &rq->rt;
1452 rt_se = pick_next_rt_entity(rq, rt_rq);
1454 rt_rq = group_rt_rq(rt_se);
1457 p = rt_task_of(rt_se);
1458 p->se.exec_start = rq_clock_task(rq);
1463 static struct task_struct *
1464 pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1466 struct task_struct *p;
1467 struct rt_rq *rt_rq = &rq->rt;
1469 if (need_pull_rt_task(rq, prev)) {
1472 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1473 * means a dl or stop task can slip in, in which case we need
1474 * to re-start task selection.
1476 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1477 rq->dl.dl_nr_running))
1482 * We may dequeue prev's rt_rq in put_prev_task().
1483 * So, we update time before rt_nr_running check.
1485 if (prev->sched_class == &rt_sched_class)
1488 if (!rt_rq->rt_queued)
1491 put_prev_task(rq, prev);
1493 p = _pick_next_task_rt(rq);
1495 /* The running task is never eligible for pushing */
1496 dequeue_pushable_task(rq, p);
1498 set_post_schedule(rq);
1503 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1508 * The previous task needs to be made eligible for pushing
1509 * if it is still active
1511 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1512 enqueue_pushable_task(rq, p);
1517 /* Only try algorithms three times */
1518 #define RT_MAX_TRIES 3
1520 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1522 if (!task_running(rq, p) &&
1523 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1529 * Return the highest pushable rq's task, which is suitable to be executed
1530 * on the cpu, NULL otherwise
1532 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1534 struct plist_head *head = &rq->rt.pushable_tasks;
1535 struct task_struct *p;
1537 if (!has_pushable_tasks(rq))
1540 plist_for_each_entry(p, head, pushable_tasks) {
1541 if (pick_rt_task(rq, p, cpu))
1548 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1550 static int find_lowest_rq(struct task_struct *task)
1552 struct sched_domain *sd;
1553 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1554 int this_cpu = smp_processor_id();
1555 int cpu = task_cpu(task);
1557 /* Make sure the mask is initialized first */
1558 if (unlikely(!lowest_mask))
1561 if (task->nr_cpus_allowed == 1)
1562 return -1; /* No other targets possible */
1564 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1565 return -1; /* No targets found */
1568 * At this point we have built a mask of cpus representing the
1569 * lowest priority tasks in the system. Now we want to elect
1570 * the best one based on our affinity and topology.
1572 * We prioritize the last cpu that the task executed on since
1573 * it is most likely cache-hot in that location.
1575 if (cpumask_test_cpu(cpu, lowest_mask))
1579 * Otherwise, we consult the sched_domains span maps to figure
1580 * out which cpu is logically closest to our hot cache data.
1582 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1583 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1586 for_each_domain(cpu, sd) {
1587 if (sd->flags & SD_WAKE_AFFINE) {
1591 * "this_cpu" is cheaper to preempt than a
1594 if (this_cpu != -1 &&
1595 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1600 best_cpu = cpumask_first_and(lowest_mask,
1601 sched_domain_span(sd));
1602 if (best_cpu < nr_cpu_ids) {
1611 * And finally, if there were no matches within the domains
1612 * just give the caller *something* to work with from the compatible
1618 cpu = cpumask_any(lowest_mask);
1619 if (cpu < nr_cpu_ids)
1624 /* Will lock the rq it finds */
1625 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1627 struct rq *lowest_rq = NULL;
1631 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1632 cpu = find_lowest_rq(task);
1634 if ((cpu == -1) || (cpu == rq->cpu))
1637 lowest_rq = cpu_rq(cpu);
1639 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1641 * Target rq has tasks of equal or higher priority,
1642 * retrying does not release any lock and is unlikely
1643 * to yield a different result.
1649 /* if the prio of this runqueue changed, try again */
1650 if (double_lock_balance(rq, lowest_rq)) {
1652 * We had to unlock the run queue. In
1653 * the mean time, task could have
1654 * migrated already or had its affinity changed.
1655 * Also make sure that it wasn't scheduled on its rq.
1657 if (unlikely(task_rq(task) != rq ||
1658 !cpumask_test_cpu(lowest_rq->cpu,
1659 tsk_cpus_allowed(task)) ||
1660 task_running(rq, task) ||
1661 !task_on_rq_queued(task))) {
1663 double_unlock_balance(rq, lowest_rq);
1669 /* If this rq is still suitable use it. */
1670 if (lowest_rq->rt.highest_prio.curr > task->prio)
1674 double_unlock_balance(rq, lowest_rq);
1681 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1683 struct task_struct *p;
1685 if (!has_pushable_tasks(rq))
1688 p = plist_first_entry(&rq->rt.pushable_tasks,
1689 struct task_struct, pushable_tasks);
1691 BUG_ON(rq->cpu != task_cpu(p));
1692 BUG_ON(task_current(rq, p));
1693 BUG_ON(p->nr_cpus_allowed <= 1);
1695 BUG_ON(!task_on_rq_queued(p));
1696 BUG_ON(!rt_task(p));
1702 * If the current CPU has more than one RT task, see if the non
1703 * running task can migrate over to a CPU that is running a task
1704 * of lesser priority.
1706 static int push_rt_task(struct rq *rq)
1708 struct task_struct *next_task;
1709 struct rq *lowest_rq;
1712 if (!rq->rt.overloaded)
1715 next_task = pick_next_pushable_task(rq);
1720 if (unlikely(next_task == rq->curr)) {
1726 * It's possible that the next_task slipped in of
1727 * higher priority than current. If that's the case
1728 * just reschedule current.
1730 if (unlikely(next_task->prio < rq->curr->prio)) {
1735 /* We might release rq lock */
1736 get_task_struct(next_task);
1738 /* find_lock_lowest_rq locks the rq if found */
1739 lowest_rq = find_lock_lowest_rq(next_task, rq);
1741 struct task_struct *task;
1743 * find_lock_lowest_rq releases rq->lock
1744 * so it is possible that next_task has migrated.
1746 * We need to make sure that the task is still on the same
1747 * run-queue and is also still the next task eligible for
1750 task = pick_next_pushable_task(rq);
1751 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1753 * The task hasn't migrated, and is still the next
1754 * eligible task, but we failed to find a run-queue
1755 * to push it to. Do not retry in this case, since
1756 * other cpus will pull from us when ready.
1762 /* No more tasks, just exit */
1766 * Something has shifted, try again.
1768 put_task_struct(next_task);
1773 deactivate_task(rq, next_task, 0);
1774 set_task_cpu(next_task, lowest_rq->cpu);
1775 activate_task(lowest_rq, next_task, 0);
1778 resched_curr(lowest_rq);
1780 double_unlock_balance(rq, lowest_rq);
1783 put_task_struct(next_task);
1788 static void push_rt_tasks(struct rq *rq)
1790 /* push_rt_task will return true if it moved an RT */
1791 while (push_rt_task(rq))
1795 #ifdef HAVE_RT_PUSH_IPI
1797 * The search for the next cpu always starts at rq->cpu and ends
1798 * when we reach rq->cpu again. It will never return rq->cpu.
1799 * This returns the next cpu to check, or nr_cpu_ids if the loop
1802 * rq->rt.push_cpu holds the last cpu returned by this function,
1803 * or if this is the first instance, it must hold rq->cpu.
1805 static int rto_next_cpu(struct rq *rq)
1807 int prev_cpu = rq->rt.push_cpu;
1810 cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1813 * If the previous cpu is less than the rq's CPU, then it already
1814 * passed the end of the mask, and has started from the beginning.
1815 * We end if the next CPU is greater or equal to rq's CPU.
1817 if (prev_cpu < rq->cpu) {
1821 } else if (cpu >= nr_cpu_ids) {
1823 * We passed the end of the mask, start at the beginning.
1824 * If the result is greater or equal to the rq's CPU, then
1825 * the loop is finished.
1827 cpu = cpumask_first(rq->rd->rto_mask);
1831 rq->rt.push_cpu = cpu;
1833 /* Return cpu to let the caller know if the loop is finished or not */
1837 static int find_next_push_cpu(struct rq *rq)
1843 cpu = rto_next_cpu(rq);
1844 if (cpu >= nr_cpu_ids)
1846 next_rq = cpu_rq(cpu);
1848 /* Make sure the next rq can push to this rq */
1849 if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1856 #define RT_PUSH_IPI_EXECUTING 1
1857 #define RT_PUSH_IPI_RESTART 2
1859 static void tell_cpu_to_push(struct rq *rq)
1863 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1864 raw_spin_lock(&rq->rt.push_lock);
1865 /* Make sure it's still executing */
1866 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1868 * Tell the IPI to restart the loop as things have
1869 * changed since it started.
1871 rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1872 raw_spin_unlock(&rq->rt.push_lock);
1875 raw_spin_unlock(&rq->rt.push_lock);
1878 /* When here, there's no IPI going around */
1880 rq->rt.push_cpu = rq->cpu;
1881 cpu = find_next_push_cpu(rq);
1882 if (cpu >= nr_cpu_ids)
1885 rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
1887 irq_work_queue_on(&rq->rt.push_work, cpu);
1890 /* Called from hardirq context */
1891 static void try_to_push_tasks(void *arg)
1893 struct rt_rq *rt_rq = arg;
1894 struct rq *rq, *src_rq;
1898 this_cpu = rt_rq->push_cpu;
1900 /* Paranoid check */
1901 BUG_ON(this_cpu != smp_processor_id());
1903 rq = cpu_rq(this_cpu);
1904 src_rq = rq_of_rt_rq(rt_rq);
1907 if (has_pushable_tasks(rq)) {
1908 raw_spin_lock(&rq->lock);
1910 raw_spin_unlock(&rq->lock);
1913 /* Pass the IPI to the next rt overloaded queue */
1914 raw_spin_lock(&rt_rq->push_lock);
1916 * If the source queue changed since the IPI went out,
1917 * we need to restart the search from that CPU again.
1919 if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
1920 rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
1921 rt_rq->push_cpu = src_rq->cpu;
1924 cpu = find_next_push_cpu(src_rq);
1926 if (cpu >= nr_cpu_ids)
1927 rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
1928 raw_spin_unlock(&rt_rq->push_lock);
1930 if (cpu >= nr_cpu_ids)
1934 * It is possible that a restart caused this CPU to be
1935 * chosen again. Don't bother with an IPI, just see if we
1936 * have more to push.
1938 if (unlikely(cpu == rq->cpu))
1941 /* Try the next RT overloaded CPU */
1942 irq_work_queue_on(&rt_rq->push_work, cpu);
1945 static void push_irq_work_func(struct irq_work *work)
1947 struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
1949 try_to_push_tasks(rt_rq);
1951 #endif /* HAVE_RT_PUSH_IPI */
1953 static int pull_rt_task(struct rq *this_rq)
1955 int this_cpu = this_rq->cpu, ret = 0, cpu;
1956 struct task_struct *p;
1959 if (likely(!rt_overloaded(this_rq)))
1963 * Match the barrier from rt_set_overloaded; this guarantees that if we
1964 * see overloaded we must also see the rto_mask bit.
1968 #ifdef HAVE_RT_PUSH_IPI
1969 if (sched_feat(RT_PUSH_IPI)) {
1970 tell_cpu_to_push(this_rq);
1975 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1976 if (this_cpu == cpu)
1979 src_rq = cpu_rq(cpu);
1982 * Don't bother taking the src_rq->lock if the next highest
1983 * task is known to be lower-priority than our current task.
1984 * This may look racy, but if this value is about to go
1985 * logically higher, the src_rq will push this task away.
1986 * And if its going logically lower, we do not care
1988 if (src_rq->rt.highest_prio.next >=
1989 this_rq->rt.highest_prio.curr)
1993 * We can potentially drop this_rq's lock in
1994 * double_lock_balance, and another CPU could
1997 double_lock_balance(this_rq, src_rq);
2000 * We can pull only a task, which is pushable
2001 * on its rq, and no others.
2003 p = pick_highest_pushable_task(src_rq, this_cpu);
2006 * Do we have an RT task that preempts
2007 * the to-be-scheduled task?
2009 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2010 WARN_ON(p == src_rq->curr);
2011 WARN_ON(!task_on_rq_queued(p));
2014 * There's a chance that p is higher in priority
2015 * than what's currently running on its cpu.
2016 * This is just that p is wakeing up and hasn't
2017 * had a chance to schedule. We only pull
2018 * p if it is lower in priority than the
2019 * current task on the run queue
2021 if (p->prio < src_rq->curr->prio)
2026 deactivate_task(src_rq, p, 0);
2027 set_task_cpu(p, this_cpu);
2028 activate_task(this_rq, p, 0);
2030 * We continue with the search, just in
2031 * case there's an even higher prio task
2032 * in another runqueue. (low likelihood
2037 double_unlock_balance(this_rq, src_rq);
2043 static void post_schedule_rt(struct rq *rq)
2049 * If we are not running and we are not going to reschedule soon, we should
2050 * try to push tasks away now
2052 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2054 if (!task_running(rq, p) &&
2055 !test_tsk_need_resched(rq->curr) &&
2056 has_pushable_tasks(rq) &&
2057 p->nr_cpus_allowed > 1 &&
2058 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2059 (rq->curr->nr_cpus_allowed < 2 ||
2060 rq->curr->prio <= p->prio))
2064 static void set_cpus_allowed_rt(struct task_struct *p,
2065 const struct cpumask *new_mask)
2070 BUG_ON(!rt_task(p));
2072 if (!task_on_rq_queued(p))
2075 weight = cpumask_weight(new_mask);
2078 * Only update if the process changes its state from whether it
2079 * can migrate or not.
2081 if ((p->nr_cpus_allowed > 1) == (weight > 1))
2087 * The process used to be able to migrate OR it can now migrate
2090 if (!task_current(rq, p))
2091 dequeue_pushable_task(rq, p);
2092 BUG_ON(!rq->rt.rt_nr_migratory);
2093 rq->rt.rt_nr_migratory--;
2095 if (!task_current(rq, p))
2096 enqueue_pushable_task(rq, p);
2097 rq->rt.rt_nr_migratory++;
2100 update_rt_migration(&rq->rt);
2103 /* Assumes rq->lock is held */
2104 static void rq_online_rt(struct rq *rq)
2106 if (rq->rt.overloaded)
2107 rt_set_overload(rq);
2109 __enable_runtime(rq);
2111 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2114 /* Assumes rq->lock is held */
2115 static void rq_offline_rt(struct rq *rq)
2117 if (rq->rt.overloaded)
2118 rt_clear_overload(rq);
2120 __disable_runtime(rq);
2122 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2126 * When switch from the rt queue, we bring ourselves to a position
2127 * that we might want to pull RT tasks from other runqueues.
2129 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2132 * If there are other RT tasks then we will reschedule
2133 * and the scheduling of the other RT tasks will handle
2134 * the balancing. But if we are the last RT task
2135 * we may need to handle the pulling of RT tasks
2138 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2141 if (pull_rt_task(rq))
2145 void __init init_sched_rt_class(void)
2149 for_each_possible_cpu(i) {
2150 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2151 GFP_KERNEL, cpu_to_node(i));
2154 #endif /* CONFIG_SMP */
2157 * When switching a task to RT, we may overload the runqueue
2158 * with RT tasks. In this case we try to push them off to
2161 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2163 int check_resched = 1;
2166 * If we are already running, then there's nothing
2167 * that needs to be done. But if we are not running
2168 * we may need to preempt the current running task.
2169 * If that current running task is also an RT task
2170 * then see if we can move to another run queue.
2172 if (task_on_rq_queued(p) && rq->curr != p) {
2174 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
2175 /* Don't resched if we changed runqueues */
2176 push_rt_task(rq) && rq != task_rq(p))
2178 #endif /* CONFIG_SMP */
2179 if (check_resched && p->prio < rq->curr->prio)
2185 * Priority of the task has changed. This may cause
2186 * us to initiate a push or pull.
2189 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2191 if (!task_on_rq_queued(p))
2194 if (rq->curr == p) {
2197 * If our priority decreases while running, we
2198 * may need to pull tasks to this runqueue.
2200 if (oldprio < p->prio)
2203 * If there's a higher priority task waiting to run
2204 * then reschedule. Note, the above pull_rt_task
2205 * can release the rq lock and p could migrate.
2206 * Only reschedule if p is still on the same runqueue.
2208 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
2211 /* For UP simply resched on drop of prio */
2212 if (oldprio < p->prio)
2214 #endif /* CONFIG_SMP */
2217 * This task is not running, but if it is
2218 * greater than the current running task
2221 if (p->prio < rq->curr->prio)
2226 static void watchdog(struct rq *rq, struct task_struct *p)
2228 unsigned long soft, hard;
2230 /* max may change after cur was read, this will be fixed next tick */
2231 soft = task_rlimit(p, RLIMIT_RTTIME);
2232 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2234 if (soft != RLIM_INFINITY) {
2237 if (p->rt.watchdog_stamp != jiffies) {
2239 p->rt.watchdog_stamp = jiffies;
2242 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2243 if (p->rt.timeout > next)
2244 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2248 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2250 struct sched_rt_entity *rt_se = &p->rt;
2257 * RR tasks need a special form of timeslice management.
2258 * FIFO tasks have no timeslices.
2260 if (p->policy != SCHED_RR)
2263 if (--p->rt.time_slice)
2266 p->rt.time_slice = sched_rr_timeslice;
2269 * Requeue to the end of queue if we (and all of our ancestors) are not
2270 * the only element on the queue
2272 for_each_sched_rt_entity(rt_se) {
2273 if (rt_se->run_list.prev != rt_se->run_list.next) {
2274 requeue_task_rt(rq, p, 0);
2281 static void set_curr_task_rt(struct rq *rq)
2283 struct task_struct *p = rq->curr;
2285 p->se.exec_start = rq_clock_task(rq);
2287 /* The running task is never eligible for pushing */
2288 dequeue_pushable_task(rq, p);
2291 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2294 * Time slice is 0 for SCHED_FIFO tasks
2296 if (task->policy == SCHED_RR)
2297 return sched_rr_timeslice;
2302 const struct sched_class rt_sched_class = {
2303 .next = &fair_sched_class,
2304 .enqueue_task = enqueue_task_rt,
2305 .dequeue_task = dequeue_task_rt,
2306 .yield_task = yield_task_rt,
2308 .check_preempt_curr = check_preempt_curr_rt,
2310 .pick_next_task = pick_next_task_rt,
2311 .put_prev_task = put_prev_task_rt,
2314 .select_task_rq = select_task_rq_rt,
2316 .set_cpus_allowed = set_cpus_allowed_rt,
2317 .rq_online = rq_online_rt,
2318 .rq_offline = rq_offline_rt,
2319 .post_schedule = post_schedule_rt,
2320 .task_woken = task_woken_rt,
2321 .switched_from = switched_from_rt,
2324 .set_curr_task = set_curr_task_rt,
2325 .task_tick = task_tick_rt,
2327 .get_rr_interval = get_rr_interval_rt,
2329 .prio_changed = prio_changed_rt,
2330 .switched_to = switched_to_rt,
2332 .update_curr = update_curr_rt,
2335 #ifdef CONFIG_SCHED_DEBUG
2336 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2338 void print_rt_stats(struct seq_file *m, int cpu)
2341 struct rt_rq *rt_rq;
2344 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2345 print_rt_rq(m, cpu, rt_rq);
2348 #endif /* CONFIG_SCHED_DEBUG */