These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / irq / migration.c
index ca3f4aa..37ddb7b 100644 (file)
@@ -7,21 +7,21 @@
 void irq_move_masked_irq(struct irq_data *idata)
 {
        struct irq_desc *desc = irq_data_to_desc(idata);
-       struct irq_chip *chip = idata->chip;
+       struct irq_chip *chip = desc->irq_data.chip;
 
        if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
                return;
 
+       irqd_clr_move_pending(&desc->irq_data);
+
        /*
         * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
         */
-       if (!irqd_can_balance(&desc->irq_data)) {
+       if (irqd_is_per_cpu(&desc->irq_data)) {
                WARN_ON(1);
                return;
        }
 
-       irqd_clr_move_pending(&desc->irq_data);
-
        if (unlikely(cpumask_empty(desc->pending_mask)))
                return;
 
@@ -52,6 +52,13 @@ void irq_move_irq(struct irq_data *idata)
 {
        bool masked;
 
+       /*
+        * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
+        * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
+        * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
+        */
+       idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
+
        if (likely(!irqd_is_setaffinity_pending(idata)))
                return;