These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / rcu / update.c
index 1718c4f..9a39046 100644 (file)
@@ -62,6 +62,55 @@ MODULE_ALIAS("rcupdate");
 
 module_param(rcu_expedited, int, 0);
 
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_PREEMPT_COUNT)
+/**
+ * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
+ *
+ * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
+ * RCU-sched read-side critical section.  In absence of
+ * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
+ * critical section unless it can prove otherwise.  Note that disabling
+ * of preemption (including disabling irqs) counts as an RCU-sched
+ * read-side critical section.  This is useful for debug checks in functions
+ * that required that they be called within an RCU-sched read-side
+ * critical section.
+ *
+ * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * and while lockdep is disabled.
+ *
+ * Note that if the CPU is in the idle loop from an RCU point of
+ * view (ie: that we are in the section between rcu_idle_enter() and
+ * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
+ * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
+ * that are in such a section, considering these as in extended quiescent
+ * state, so such a CPU is effectively never in an RCU read-side critical
+ * section regardless of what RCU primitives it invokes.  This state of
+ * affairs is required --- we need to keep an RCU-free window in idle
+ * where the CPU may possibly enter into low power mode. This way we can
+ * notice an extended quiescent state to other CPUs that started a grace
+ * period. Otherwise we would delay any grace period as long as we run in
+ * the idle task.
+ *
+ * Similarly, we avoid claiming an SRCU read lock held if the current
+ * CPU is offline.
+ */
+int rcu_read_lock_sched_held(void)
+{
+       int lockdep_opinion = 0;
+
+       if (!debug_lockdep_rcu_enabled())
+               return 1;
+       if (!rcu_is_watching())
+               return 0;
+       if (!rcu_lockdep_current_cpu_online())
+               return 0;
+       if (debug_locks)
+               lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
+       return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
+}
+EXPORT_SYMBOL(rcu_read_lock_sched_held);
+#endif
+
 #ifndef CONFIG_TINY_RCU
 
 static atomic_t rcu_expedited_nesting =
@@ -150,14 +199,14 @@ void __rcu_read_unlock(void)
                barrier();  /* critical section before exit code. */
                t->rcu_read_lock_nesting = INT_MIN;
                barrier();  /* assign before ->rcu_read_unlock_special load */
-               if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special.s)))
+               if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
                        rcu_read_unlock_special(t);
                barrier();  /* ->rcu_read_unlock_special load before assign */
                t->rcu_read_lock_nesting = 0;
        }
 #ifdef CONFIG_PROVE_LOCKING
        {
-               int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
+               int rrln = READ_ONCE(t->rcu_read_lock_nesting);
 
                WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
        }
@@ -271,20 +320,37 @@ void wakeme_after_rcu(struct rcu_head *head)
        rcu = container_of(head, struct rcu_synchronize, head);
        complete(&rcu->completion);
 }
+EXPORT_SYMBOL_GPL(wakeme_after_rcu);
 
-void wait_rcu_gp(call_rcu_func_t crf)
+void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
+                  struct rcu_synchronize *rs_array)
 {
-       struct rcu_synchronize rcu;
+       int i;
 
-       init_rcu_head_on_stack(&rcu.head);
-       init_completion(&rcu.completion);
-       /* Will wake me after RCU finished. */
-       crf(&rcu.head, wakeme_after_rcu);
-       /* Wait for it. */
-       wait_for_completion(&rcu.completion);
-       destroy_rcu_head_on_stack(&rcu.head);
+       /* Initialize and register callbacks for each flavor specified. */
+       for (i = 0; i < n; i++) {
+               if (checktiny &&
+                   (crcu_array[i] == call_rcu ||
+                    crcu_array[i] == call_rcu_bh)) {
+                       might_sleep();
+                       continue;
+               }
+               init_rcu_head_on_stack(&rs_array[i].head);
+               init_completion(&rs_array[i].completion);
+               (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
+       }
+
+       /* Wait for all callbacks to be invoked. */
+       for (i = 0; i < n; i++) {
+               if (checktiny &&
+                   (crcu_array[i] == call_rcu ||
+                    crcu_array[i] == call_rcu_bh))
+                       continue;
+               wait_for_completion(&rs_array[i].completion);
+               destroy_rcu_head_on_stack(&rs_array[i].head);
+       }
 }
-EXPORT_SYMBOL_GPL(wait_rcu_gp);
+EXPORT_SYMBOL_GPL(__wait_rcu_gp);
 
 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
 void init_rcu_head(struct rcu_head *head)
@@ -391,17 +457,17 @@ module_param(rcu_cpu_stall_timeout, int, 0644);
 
 int rcu_jiffies_till_stall_check(void)
 {
-       int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
+       int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
 
        /*
         * Limit check must be consistent with the Kconfig limits
         * for CONFIG_RCU_CPU_STALL_TIMEOUT.
         */
        if (till_stall_check < 3) {
-               ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
+               WRITE_ONCE(rcu_cpu_stall_timeout, 3);
                till_stall_check = 3;
        } else if (till_stall_check > 300) {
-               ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
+               WRITE_ONCE(rcu_cpu_stall_timeout, 300);
                till_stall_check = 300;
        }
        return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
@@ -470,7 +536,7 @@ static void rcu_spawn_tasks_kthread(void);
  * Post an RCU-tasks callback.  First call must be from process context
  * after the scheduler if fully operational.
  */
-void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp))
+void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
 {
        unsigned long flags;
        bool needwake;
@@ -525,8 +591,8 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
 void synchronize_rcu_tasks(void)
 {
        /* Complain if the scheduler has not started.  */
-       rcu_lockdep_assert(!rcu_scheduler_active,
-                          "synchronize_rcu_tasks called too soon");
+       RCU_LOCKDEP_WARN(!rcu_scheduler_active,
+                        "synchronize_rcu_tasks called too soon");
 
        /* Wait for the grace period. */
        wait_rcu_gp(call_rcu_tasks);
@@ -552,12 +618,12 @@ static void check_holdout_task(struct task_struct *t,
 {
        int cpu;
 
-       if (!ACCESS_ONCE(t->rcu_tasks_holdout) ||
-           t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) ||
-           !ACCESS_ONCE(t->on_rq) ||
+       if (!READ_ONCE(t->rcu_tasks_holdout) ||
+           t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
+           !READ_ONCE(t->on_rq) ||
            (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
             !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
-               ACCESS_ONCE(t->rcu_tasks_holdout) = false;
+               WRITE_ONCE(t->rcu_tasks_holdout, false);
                list_del_init(&t->rcu_tasks_holdout_list);
                put_task_struct(t);
                return;
@@ -641,11 +707,11 @@ static int __noreturn rcu_tasks_kthread(void *arg)
                 */
                rcu_read_lock();
                for_each_process_thread(g, t) {
-                       if (t != current && ACCESS_ONCE(t->on_rq) &&
+                       if (t != current && READ_ONCE(t->on_rq) &&
                            !is_idle_task(t)) {
                                get_task_struct(t);
-                               t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
-                               ACCESS_ONCE(t->rcu_tasks_holdout) = true;
+                               t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
+                               WRITE_ONCE(t->rcu_tasks_holdout, true);
                                list_add(&t->rcu_tasks_holdout_list,
                                         &rcu_tasks_holdouts);
                        }
@@ -674,7 +740,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
                        struct task_struct *t1;
 
                        schedule_timeout_interruptible(HZ);
-                       rtst = ACCESS_ONCE(rcu_task_stall_timeout);
+                       rtst = READ_ONCE(rcu_task_stall_timeout);
                        needreport = rtst > 0 &&
                                     time_after(jiffies, lastreport + rtst);
                        if (needreport)
@@ -730,7 +796,7 @@ static void rcu_spawn_tasks_kthread(void)
        static struct task_struct *rcu_tasks_kthread_ptr;
        struct task_struct *t;
 
-       if (ACCESS_ONCE(rcu_tasks_kthread_ptr)) {
+       if (READ_ONCE(rcu_tasks_kthread_ptr)) {
                smp_mb(); /* Ensure caller sees full kthread. */
                return;
        }
@@ -742,7 +808,7 @@ static void rcu_spawn_tasks_kthread(void)
        t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
        BUG_ON(IS_ERR(t));
        smp_mb(); /* Ensure others see full kthread. */
-       ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
+       WRITE_ONCE(rcu_tasks_kthread_ptr, t);
        mutex_unlock(&rcu_tasks_kthread_mutex);
 }