These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / sched / idle.c
index fefcb1f..4a2ef5a 100644 (file)
 
 #include "sched.h"
 
+/**
+ * sched_idle_set_state - Record idle state for the current CPU.
+ * @idle_state: State to record.
+ */
+void sched_idle_set_state(struct cpuidle_state *idle_state)
+{
+       idle_set_state(this_rq(), idle_state);
+}
+
 static int __read_mostly cpu_idle_force_poll;
 
 void cpu_idle_poll_ctrl(bool enable)
@@ -48,9 +57,11 @@ static inline int cpu_idle_poll(void)
        rcu_idle_enter();
        trace_cpu_idle_rcuidle(0, smp_processor_id());
        local_irq_enable();
+       stop_critical_timings();
        while (!tif_need_resched() &&
                (cpu_idle_force_poll || tick_check_broadcast_expired()))
                cpu_relax();
+       start_critical_timings();
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        rcu_idle_exit();
        return 1;
@@ -67,6 +78,49 @@ void __weak arch_cpu_idle(void)
        local_irq_enable();
 }
 
+/**
+ * default_idle_call - Default CPU idle routine.
+ *
+ * To use when the cpuidle framework cannot be used.
+ */
+void default_idle_call(void)
+{
+       if (current_clr_polling_and_test()) {
+               local_irq_enable();
+       } else {
+               stop_critical_timings();
+               arch_cpu_idle();
+               start_critical_timings();
+       }
+}
+
+static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+                     int next_state)
+{
+       /* Fall back to the default arch idle method on errors. */
+       if (next_state < 0) {
+               default_idle_call();
+               return next_state;
+       }
+
+       /*
+        * The idle task must be scheduled, it is pointless to go to idle, just
+        * update no idle residency and return.
+        */
+       if (current_clr_polling_and_test()) {
+               dev->last_residency = 0;
+               local_irq_enable();
+               return -EBUSY;
+       }
+
+       /*
+        * Enter the idle state previously returned by the governor decision.
+        * This function will block until an interrupt occurs and will take
+        * care of re-enabling the local interrupts
+        */
+       return cpuidle_enter(drv, dev, next_state);
+}
+
 /**
  * cpuidle_idle_call - the main idle function
  *
@@ -81,7 +135,6 @@ static void cpuidle_idle_call(void)
        struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
        int next_state, entered_state;
-       bool reflect;
 
        /*
         * Check if the idle task must be rescheduled. If it is the
@@ -92,12 +145,6 @@ static void cpuidle_idle_call(void)
                return;
        }
 
-       /*
-        * During the idle period, stop measuring the disabled irqs
-        * critical sections latencies
-        */
-       stop_critical_timings();
-
        /*
         * Tell the RCU framework we are entering an idle section,
         * so no more rcu read side critical sections and one more
@@ -105,8 +152,10 @@ static void cpuidle_idle_call(void)
         */
        rcu_idle_enter();
 
-       if (cpuidle_not_available(drv, dev))
-               goto use_default;
+       if (cpuidle_not_available(drv, dev)) {
+               default_idle_call();
+               goto exit_idle;
+       }
 
        /*
         * Suspend-to-idle ("freeze") is a system state in which all user space
@@ -124,52 +173,19 @@ static void cpuidle_idle_call(void)
                        goto exit_idle;
                }
 
-               reflect = false;
                next_state = cpuidle_find_deepest_state(drv, dev);
+               call_cpuidle(drv, dev, next_state);
        } else {
-               reflect = true;
                /*
                 * Ask the cpuidle framework to choose a convenient idle state.
                 */
                next_state = cpuidle_select(drv, dev);
-       }
-       /* Fall back to the default arch idle method on errors. */
-       if (next_state < 0)
-               goto use_default;
-
-       /*
-        * The idle task must be scheduled, it is pointless to
-        * go to idle, just update no idle residency and get
-        * out of this function
-        */
-       if (current_clr_polling_and_test()) {
-               dev->last_residency = 0;
-               entered_state = next_state;
-               local_irq_enable();
-               goto exit_idle;
-       }
-
-       /* Take note of the planned idle state. */
-       idle_set_state(this_rq(), &drv->states[next_state]);
-
-       /*
-        * Enter the idle state previously returned by the governor decision.
-        * This function will block until an interrupt occurs and will take
-        * care of re-enabling the local interrupts
-        */
-       entered_state = cpuidle_enter(drv, dev, next_state);
-
-       /* The cpu is no longer idle or about to enter idle. */
-       idle_set_state(this_rq(), NULL);
-
-       if (entered_state == -EBUSY)
-               goto use_default;
-
-       /*
-        * Give the governor an opportunity to reflect on the outcome
-        */
-       if (reflect)
+               entered_state = call_cpuidle(drv, dev, next_state);
+               /*
+                * Give the governor an opportunity to reflect on the outcome
+                */
                cpuidle_reflect(dev, entered_state);
+       }
 
 exit_idle:
        __current_set_polling();
@@ -181,20 +197,6 @@ exit_idle:
                local_irq_enable();
 
        rcu_idle_exit();
-       start_critical_timings();
-       return;
-
-use_default:
-       /*
-        * We can't use the cpuidle framework, let's use the default
-        * idle routine.
-        */
-       if (current_clr_polling_and_test())
-               local_irq_enable();
-       else
-               arch_cpu_idle();
-
-       goto exit_idle;
 }
 
 DEFINE_PER_CPU(bool, cpu_dead_idle);