These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / softirq.c
index 49baf81..cb9c1d5 100644 (file)
@@ -58,6 +58,10 @@ EXPORT_SYMBOL(irq_stat);
 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 
 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+#ifdef CONFIG_PREEMPT_RT_FULL
+#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ))
+DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd);
+#endif
 
 const char * const softirq_to_name[NR_SOFTIRQS] = {
        "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
@@ -171,6 +175,17 @@ static void wakeup_softirqd(void)
                wake_up_process(tsk);
 }
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void wakeup_timer_softirqd(void)
+{
+       /* Interrupts are disabled: no need to stop preemption */
+       struct task_struct *tsk = __this_cpu_read(ktimer_softirqd);
+
+       if (tsk && tsk->state != TASK_RUNNING)
+               wake_up_process(tsk);
+}
+#endif
+
 static void handle_softirq(unsigned int vec_nr)
 {
        struct softirq_action *h = softirq_vec + vec_nr;
@@ -272,9 +287,9 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 
        if (preempt_count() == cnt) {
 #ifdef CONFIG_DEBUG_PREEMPT
-               current->preempt_disable_ip = get_parent_ip(CALLER_ADDR1);
+               current->preempt_disable_ip = get_lock_parent_ip();
 #endif
-               trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+               trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
        }
 }
 EXPORT_SYMBOL(__local_bh_disable_ip);
@@ -473,7 +488,6 @@ void __raise_softirq_irqoff(unsigned int nr)
 static inline void local_bh_disable_nort(void) { local_bh_disable(); }
 static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
 static void ksoftirqd_set_sched_params(unsigned int cpu) { }
-static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { }
 
 #else /* !PREEMPT_RT_FULL */
 
@@ -549,31 +563,21 @@ static void do_current_softirqs(void)
                        do_single_softirq(i);
                }
                softirq_clr_runner(i);
-               unlock_softirq(i);
                WARN_ON(current->softirq_nestcnt != 1);
+               local_irq_enable();
+               unlock_softirq(i);
+               local_irq_disable();
        }
 }
 
-static void __local_bh_disable(void)
+void __local_bh_disable(void)
 {
        if (++current->softirq_nestcnt == 1)
                migrate_disable();
 }
+EXPORT_SYMBOL(__local_bh_disable);
 
-void local_bh_disable(void)
-{
-       __local_bh_disable();
-}
-EXPORT_SYMBOL(local_bh_disable);
-
-void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
-{
-       __local_bh_disable();
-       if (cnt & PREEMPT_CHECK_OFFSET)
-               preempt_disable();
-}
-
-static void __local_bh_enable(void)
+void __local_bh_enable(void)
 {
        if (WARN_ON(current->softirq_nestcnt == 0))
                return;
@@ -586,25 +590,7 @@ static void __local_bh_enable(void)
        if (--current->softirq_nestcnt == 0)
                migrate_enable();
 }
-
-void local_bh_enable(void)
-{
-       __local_bh_enable();
-}
-EXPORT_SYMBOL(local_bh_enable);
-
-extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
-{
-       __local_bh_enable();
-       if (cnt & PREEMPT_CHECK_OFFSET)
-               preempt_enable();
-}
-
-void local_bh_enable_ip(unsigned long ip)
-{
-       local_bh_enable();
-}
-EXPORT_SYMBOL(local_bh_enable_ip);
+EXPORT_SYMBOL(__local_bh_enable);
 
 void _local_bh_enable(void)
 {
@@ -629,8 +615,8 @@ static void run_ksoftirqd(unsigned int cpu)
 
        do_current_softirqs();
        current->softirq_nestcnt--;
-       rcu_note_context_switch();
        local_irq_enable();
+       cond_resched_rcu_qs();
 }
 
 /*
@@ -648,8 +634,12 @@ void thread_do_softirq(void)
 
 static void do_raise_softirq_irqoff(unsigned int nr)
 {
+       unsigned int mask;
+
+       mask = 1UL << nr;
+
        trace_softirq_raise(nr);
-       or_softirq_pending(1UL << nr);
+       or_softirq_pending(mask);
 
        /*
         * If we are not in a hard interrupt and inside a bh disabled
@@ -658,16 +648,51 @@ static void do_raise_softirq_irqoff(unsigned int nr)
         * delegate it to ksoftirqd.
         */
        if (!in_irq() && current->softirq_nestcnt)
-               current->softirqs_raised |= (1U << nr);
-       else if (__this_cpu_read(ksoftirqd))
-               __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
+               current->softirqs_raised |= mask;
+       else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd))
+               return;
+
+       if (mask & TIMER_SOFTIRQS)
+               __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
+       else
+               __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
+}
+
+static void wakeup_proper_softirq(unsigned int nr)
+{
+       if ((1UL << nr) & TIMER_SOFTIRQS)
+               wakeup_timer_softirqd();
+       else
+               wakeup_softirqd();
 }
 
+
 void __raise_softirq_irqoff(unsigned int nr)
 {
        do_raise_softirq_irqoff(nr);
        if (!in_irq() && !current->softirq_nestcnt)
-               wakeup_softirqd();
+               wakeup_proper_softirq(nr);
+}
+
+/*
+ * Same as __raise_softirq_irqoff() but will process them in ksoftirqd
+ */
+void __raise_softirq_irqoff_ksoft(unsigned int nr)
+{
+       unsigned int mask;
+
+       if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) ||
+                        !__this_cpu_read(ktimer_softirqd)))
+               return;
+       mask = 1UL << nr;
+
+       trace_softirq_raise(nr);
+       or_softirq_pending(mask);
+       if (mask & TIMER_SOFTIRQS)
+               __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
+       else
+               __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
+       wakeup_proper_softirq(nr);
 }
 
 /*
@@ -693,7 +718,7 @@ void raise_softirq_irqoff(unsigned int nr)
         * raise a WARN() if the condition is met.
         */
        if (!current->softirq_nestcnt)
-               wakeup_softirqd();
+               wakeup_proper_softirq(nr);
 }
 
 static inline int ksoftirqd_softirq_pending(void)
@@ -705,23 +730,38 @@ static inline void local_bh_disable_nort(void) { }
 static inline void _local_bh_enable_nort(void) { }
 
 static inline void ksoftirqd_set_sched_params(unsigned int cpu)
+{
+       /* Take over all but timer pending softirqs when starting */
+       local_irq_disable();
+       current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS;
+       local_irq_enable();
+}
+
+static inline void ktimer_softirqd_set_sched_params(unsigned int cpu)
 {
        struct sched_param param = { .sched_priority = 1 };
 
        sched_setscheduler(current, SCHED_FIFO, &param);
-       /* Take over all pending softirqs when starting */
+
+       /* Take over timer pending softirqs when starting */
        local_irq_disable();
-       current->softirqs_raised = local_softirq_pending();
+       current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS;
        local_irq_enable();
 }
 
-static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online)
+static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu,
+                                                   bool online)
 {
        struct sched_param param = { .sched_priority = 0 };
 
        sched_setscheduler(current, SCHED_NORMAL, &param);
 }
 
+static int ktimer_softirqd_should_run(unsigned int cpu)
+{
+       return current->softirqs_raised;
+}
+
 #endif /* PREEMPT_RT_FULL */
 /*
  * Enter an interrupt context.
@@ -771,6 +811,9 @@ static inline void invoke_softirq(void)
        if (__this_cpu_read(ksoftirqd) &&
                        __this_cpu_read(ksoftirqd)->softirqs_raised)
                wakeup_softirqd();
+       if (__this_cpu_read(ktimer_softirqd) &&
+                       __this_cpu_read(ktimer_softirqd)->softirqs_raised)
+               wakeup_timer_softirqd();
        local_irq_restore(flags);
 #endif
 }
@@ -1203,17 +1246,30 @@ static struct notifier_block cpu_nfb = {
 static struct smp_hotplug_thread softirq_threads = {
        .store                  = &ksoftirqd,
        .setup                  = ksoftirqd_set_sched_params,
-       .cleanup                = ksoftirqd_clr_sched_params,
        .thread_should_run      = ksoftirqd_should_run,
        .thread_fn              = run_ksoftirqd,
        .thread_comm            = "ksoftirqd/%u",
 };
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+static struct smp_hotplug_thread softirq_timer_threads = {
+       .store                  = &ktimer_softirqd,
+       .setup                  = ktimer_softirqd_set_sched_params,
+       .cleanup                = ktimer_softirqd_clr_sched_params,
+       .thread_should_run      = ktimer_softirqd_should_run,
+       .thread_fn              = run_ksoftirqd,
+       .thread_comm            = "ktimersoftd/%u",
+};
+#endif
+
 static __init int spawn_ksoftirqd(void)
 {
        register_cpu_notifier(&cpu_nfb);
 
        BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
+#ifdef CONFIG_PREEMPT_RT_FULL
+       BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads));
+#endif
 
        return 0;
 }