These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / context_tracking.c
index 72d59a1..d8560ee 100644 (file)
@@ -30,12 +30,23 @@ EXPORT_SYMBOL_GPL(context_tracking_enabled);
 DEFINE_PER_CPU(struct context_tracking, context_tracking);
 EXPORT_SYMBOL_GPL(context_tracking);
 
-void context_tracking_cpu_set(int cpu)
+static bool context_tracking_recursion_enter(void)
 {
-       if (!per_cpu(context_tracking.active, cpu)) {
-               per_cpu(context_tracking.active, cpu) = true;
-               static_key_slow_inc(&context_tracking_enabled);
-       }
+       int recursion;
+
+       recursion = __this_cpu_inc_return(context_tracking.recursion);
+       if (recursion == 1)
+               return true;
+
+       WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d\n", recursion);
+       __this_cpu_dec(context_tracking.recursion);
+
+       return false;
+}
+
+static void context_tracking_recursion_exit(void)
+{
+       __this_cpu_dec(context_tracking.recursion);
 }
 
 /**
@@ -47,34 +58,14 @@ void context_tracking_cpu_set(int cpu)
  * instructions to execute won't use any RCU read side critical section
  * because this function sets RCU in extended quiescent state.
  */
-void context_tracking_enter(enum ctx_state state)
+void __context_tracking_enter(enum ctx_state state)
 {
-       unsigned long flags;
-
-       /*
-        * Repeat the user_enter() check here because some archs may be calling
-        * this from asm and if no CPU needs context tracking, they shouldn't
-        * go further. Repeat the check here until they support the inline static
-        * key check.
-        */
-       if (!context_tracking_is_enabled())
-               return;
-
-       /*
-        * Some contexts may involve an exception occuring in an irq,
-        * leading to that nesting:
-        * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
-        * This would mess up the dyntick_nesting count though. And rcu_irq_*()
-        * helpers are enough to protect RCU uses inside the exception. So
-        * just return immediately if we detect we are in an IRQ.
-        */
-       if (in_interrupt())
-               return;
-
        /* Kernel threads aren't supposed to go to userspace */
        WARN_ON_ONCE(!current->mm);
 
-       local_irq_save(flags);
+       if (!context_tracking_recursion_enter())
+               return;
+
        if ( __this_cpu_read(context_tracking.state) != state) {
                if (__this_cpu_read(context_tracking.active)) {
                        /*
@@ -105,6 +96,28 @@ void context_tracking_enter(enum ctx_state state)
                 */
                __this_cpu_write(context_tracking.state, state);
        }
+       context_tracking_recursion_exit();
+}
+NOKPROBE_SYMBOL(__context_tracking_enter);
+EXPORT_SYMBOL_GPL(__context_tracking_enter);
+
+void context_tracking_enter(enum ctx_state state)
+{
+       unsigned long flags;
+
+       /*
+        * Some contexts may involve an exception occuring in an irq,
+        * leading to that nesting:
+        * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
+        * This would mess up the dyntick_nesting count though. And rcu_irq_*()
+        * helpers are enough to protect RCU uses inside the exception. So
+        * just return immediately if we detect we are in an IRQ.
+        */
+       if (in_interrupt())
+               return;
+
+       local_irq_save(flags);
+       __context_tracking_enter(state);
        local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(context_tracking_enter);
@@ -112,7 +125,7 @@ EXPORT_SYMBOL_GPL(context_tracking_enter);
 
 void context_tracking_user_enter(void)
 {
-       context_tracking_enter(CONTEXT_USER);
+       user_enter();
 }
 NOKPROBE_SYMBOL(context_tracking_user_enter);
 
@@ -128,17 +141,11 @@ NOKPROBE_SYMBOL(context_tracking_user_enter);
  * This call supports re-entrancy. This way it can be called from any exception
  * handler without needing to know if we came from userspace or not.
  */
-void context_tracking_exit(enum ctx_state state)
+void __context_tracking_exit(enum ctx_state state)
 {
-       unsigned long flags;
-
-       if (!context_tracking_is_enabled())
+       if (!context_tracking_recursion_enter())
                return;
 
-       if (in_interrupt())
-               return;
-
-       local_irq_save(flags);
        if (__this_cpu_read(context_tracking.state) == state) {
                if (__this_cpu_read(context_tracking.active)) {
                        /*
@@ -153,6 +160,20 @@ void context_tracking_exit(enum ctx_state state)
                }
                __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
        }
+       context_tracking_recursion_exit();
+}
+NOKPROBE_SYMBOL(__context_tracking_exit);
+EXPORT_SYMBOL_GPL(__context_tracking_exit);
+
+void context_tracking_exit(enum ctx_state state)
+{
+       unsigned long flags;
+
+       if (in_interrupt())
+               return;
+
+       local_irq_save(flags);
+       __context_tracking_exit(state);
        local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(context_tracking_exit);
@@ -160,28 +181,30 @@ EXPORT_SYMBOL_GPL(context_tracking_exit);
 
 void context_tracking_user_exit(void)
 {
-       context_tracking_exit(CONTEXT_USER);
+       user_exit();
 }
 NOKPROBE_SYMBOL(context_tracking_user_exit);
 
-/**
- * __context_tracking_task_switch - context switch the syscall callbacks
- * @prev: the task that is being switched out
- * @next: the task that is being switched in
- *
- * The context tracking uses the syscall slow path to implement its user-kernel
- * boundaries probes on syscalls. This way it doesn't impact the syscall fast
- * path on CPUs that don't do context tracking.
- *
- * But we need to clear the flag on the previous task because it may later
- * migrate to some CPU that doesn't do the context tracking. As such the TIF
- * flag may not be desired there.
- */
-void __context_tracking_task_switch(struct task_struct *prev,
-                                   struct task_struct *next)
+void __init context_tracking_cpu_set(int cpu)
 {
-       clear_tsk_thread_flag(prev, TIF_NOHZ);
-       set_tsk_thread_flag(next, TIF_NOHZ);
+       static __initdata bool initialized = false;
+
+       if (!per_cpu(context_tracking.active, cpu)) {
+               per_cpu(context_tracking.active, cpu) = true;
+               static_key_slow_inc(&context_tracking_enabled);
+       }
+
+       if (initialized)
+               return;
+
+       /*
+        * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork
+        * This assumes that init is the only task at this early boot stage.
+        */
+       set_tsk_thread_flag(&init_task, TIF_NOHZ);
+       WARN_ON_ONCE(!tasklist_empty());
+
+       initialized = true;
 }
 
 #ifdef CONFIG_CONTEXT_TRACKING_FORCE