Merge "Add the "timers: do not raise softirq unconditionally" temporarily"
authorYunhong Jiang <yunhong.jiang@intel.com>
Fri, 22 Jul 2016 17:10:32 +0000 (17:10 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Fri, 22 Jul 2016 17:10:32 +0000 (17:10 +0000)
kernel/kernel/locking/rtmutex.c
kernel/kernel/time/timer.c

index 6697100..30777e8 100644 (file)
@@ -2058,13 +2058,6 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
  */
 int __sched rt_mutex_trylock(struct rt_mutex *lock)
 {
-#ifdef CONFIG_PREEMPT_RT_FULL
-       if (WARN_ON(in_irq() || in_nmi()))
-#else
-       if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq()))
-#endif
-               return 0;
-
        return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
index fee8682..76a301b 100644 (file)
@@ -1509,8 +1509,36 @@ static void run_timer_softirq(struct softirq_action *h)
  */
 void run_local_timers(void)
 {
+       struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+
        hrtimer_run_queues();
-       raise_softirq(TIMER_SOFTIRQ);
+       /*
+        * We can access this lockless as we are in the timer
+        * interrupt. If there are no timers queued, nothing to do in
+        * the timer softirq.
+        */
+#ifdef CONFIG_PREEMPT_RT_FULL
+       if (irq_work_needs_cpu()) {
+               raise_softirq(TIMER_SOFTIRQ);
+               return;
+       }
+       if (!spin_do_trylock(&base->lock)) {
+               raise_softirq(TIMER_SOFTIRQ);
+               return;
+       }
+#endif
+       if (!base->active_timers)
+               goto out;
+
+       /* Check whether the next pending timer has expired */
+       if (time_before_eq(base->next_timer, jiffies))
+               raise_softirq(TIMER_SOFTIRQ);
+out:
+#ifdef CONFIG_PREEMPT_RT_FULL
+       rt_spin_unlock(&base->lock);
+#endif
+       /* The ; ensures that gcc won't complain in the !RT case */
+       ;
 }
 
 #ifdef __ARCH_WANT_SYS_ALARM