These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm64 / kernel / smp.c
index d3a202b..b1adc51 100644 (file)
@@ -17,6 +17,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/acpi.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
@@ -51,6 +52,7 @@
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
 #include <asm/ptrace.h>
+#include <asm/virt.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ipi.h>
@@ -140,22 +142,27 @@ asmlinkage void secondary_start_kernel(void)
         */
        atomic_inc(&mm->mm_count);
        current->active_mm = mm;
-       cpumask_set_cpu(cpu, mm_cpumask(mm));
 
        set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
-       printk("CPU%u: Booted secondary processor\n", cpu);
 
        /*
         * TTBR0 is only used for the identity mapping at this stage. Make it
         * point to zero page to avoid speculatively fetching new entries.
         */
        cpu_set_reserved_ttbr0();
-       flush_tlb_all();
+       local_flush_tlb_all();
        cpu_set_default_tcr_t0sz();
 
        preempt_disable();
        trace_hardirqs_off();
 
+       /*
+        * If the system has established the capabilities, make sure
+        * this CPU ticks all of those. If it doesn't, the CPU will
+        * fail to come online.
+        */
+       verify_local_cpu_capabilities();
+
        if (cpu_ops[cpu]->cpu_postboot)
                cpu_ops[cpu]->cpu_postboot();
 
@@ -176,6 +183,8 @@ asmlinkage void secondary_start_kernel(void)
         * the CPU migration code to notice that the CPU is online
         * before we continue.
         */
+       pr_info("CPU%u: Booted secondary processor [%08x]\n",
+                                        cpu, read_cpuid_id());
        set_cpu_online(cpu, true);
        complete(&cpu_running);
 
@@ -230,12 +239,7 @@ int __cpu_disable(void)
        /*
         * OK - migrate IRQs away from this CPU
         */
-       migrate_irqs();
-
-       /*
-        * Remove this CPU from the vm mask set of all processes.
-        */
-       clear_tasks_mm_cpumask(cpu);
+       irq_migrate_all_off_this_cpu();
 
        return 0;
 }
@@ -248,20 +252,20 @@ static int op_cpu_kill(unsigned int cpu)
         * time and hope that it's dead, so let's skip the wait and just hope.
         */
        if (!cpu_ops[cpu]->cpu_kill)
-               return 1;
+               return 0;
 
        return cpu_ops[cpu]->cpu_kill(cpu);
 }
 
-static DECLARE_COMPLETION(cpu_died);
-
 /*
  * called on the thread which is asking for a CPU to be shutdown -
  * waits until shutdown has completed, or it is timed out.
  */
 void __cpu_die(unsigned int cpu)
 {
-       if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+       int err;
+
+       if (!cpu_wait_death(cpu, 5)) {
                pr_crit("CPU%u: cpu didn't die\n", cpu);
                return;
        }
@@ -273,8 +277,10 @@ void __cpu_die(unsigned int cpu)
         * verify that it has really left the kernel before we consider
         * clobbering anything it might still be using.
         */
-       if (!op_cpu_kill(cpu))
-               pr_warn("CPU%d may not have shut down cleanly\n", cpu);
+       err = op_cpu_kill(cpu);
+       if (err)
+               pr_warn("CPU%d may not have shut down cleanly: %d\n",
+                       cpu, err);
 }
 
 /*
@@ -294,7 +300,7 @@ void cpu_die(void)
        local_irq_disable();
 
        /* Tell __cpu_die() that this CPU is now safe to dispose of */
-       complete(&cpu_died);
+       (void)cpu_report_death();
 
        /*
         * Actually shutdown the CPU. This must never fail. The specific hotplug
@@ -307,67 +313,182 @@ void cpu_die(void)
 }
 #endif
 
+static void __init hyp_mode_check(void)
+{
+       if (is_hyp_mode_available())
+               pr_info("CPU: All CPU(s) started at EL2\n");
+       else if (is_hyp_mode_mismatched())
+               WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
+                          "CPU: CPUs started in inconsistent modes");
+       else
+               pr_info("CPU: All CPU(s) started at EL1\n");
+}
+
 void __init smp_cpus_done(unsigned int max_cpus)
 {
        pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
-       do_post_cpus_up_work();
+       setup_cpu_features();
+       hyp_mode_check();
+       apply_alternatives_all();
 }
 
 void __init smp_prepare_boot_cpu(void)
 {
+       cpuinfo_store_boot_cpu();
        set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
 }
 
+static u64 __init of_get_cpu_mpidr(struct device_node *dn)
+{
+       const __be32 *cell;
+       u64 hwid;
+
+       /*
+        * A cpu node with missing "reg" property is
+        * considered invalid to build a cpu_logical_map
+        * entry.
+        */
+       cell = of_get_property(dn, "reg", NULL);
+       if (!cell) {
+               pr_err("%s: missing reg property\n", dn->full_name);
+               return INVALID_HWID;
+       }
+
+       hwid = of_read_number(cell, of_n_addr_cells(dn));
+       /*
+        * Non affinity bits must be set to 0 in the DT
+        */
+       if (hwid & ~MPIDR_HWID_BITMASK) {
+               pr_err("%s: invalid reg property\n", dn->full_name);
+               return INVALID_HWID;
+       }
+       return hwid;
+}
+
+/*
+ * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
+ * entries and check for duplicates. If any is found just ignore the
+ * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
+ * matching valid MPIDR values.
+ */
+static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
+{
+       unsigned int i;
+
+       for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
+               if (cpu_logical_map(i) == hwid)
+                       return true;
+       return false;
+}
+
+/*
+ * Initialize cpu operations for a logical cpu and
+ * set it in the possible mask on success
+ */
+static int __init smp_cpu_setup(int cpu)
+{
+       if (cpu_read_ops(cpu))
+               return -ENODEV;
+
+       if (cpu_ops[cpu]->cpu_init(cpu))
+               return -ENODEV;
+
+       set_cpu_possible(cpu, true);
+
+       return 0;
+}
+
+static bool bootcpu_valid __initdata;
+static unsigned int cpu_count = 1;
+
+#ifdef CONFIG_ACPI
+/*
+ * acpi_map_gic_cpu_interface - parse processor MADT entry
+ *
+ * Carry out sanity checks on MADT processor entry and initialize
+ * cpu_logical_map on success
+ */
+static void __init
+acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
+{
+       u64 hwid = processor->arm_mpidr;
+
+       if (!(processor->flags & ACPI_MADT_ENABLED)) {
+               pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
+               return;
+       }
+
+       if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
+               pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
+               return;
+       }
+
+       if (is_mpidr_duplicate(cpu_count, hwid)) {
+               pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
+               return;
+       }
+
+       /* Check if GICC structure of boot CPU is available in the MADT */
+       if (cpu_logical_map(0) == hwid) {
+               if (bootcpu_valid) {
+                       pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
+                              hwid);
+                       return;
+               }
+               bootcpu_valid = true;
+               return;
+       }
+
+       if (cpu_count >= NR_CPUS)
+               return;
+
+       /* map the logical cpu id to cpu MPIDR */
+       cpu_logical_map(cpu_count) = hwid;
+
+       cpu_count++;
+}
+
+static int __init
+acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
+                            const unsigned long end)
+{
+       struct acpi_madt_generic_interrupt *processor;
+
+       processor = (struct acpi_madt_generic_interrupt *)header;
+       if (BAD_MADT_GICC_ENTRY(processor, end))
+               return -EINVAL;
+
+       acpi_table_print_madt_entry(header);
+
+       acpi_map_gic_cpu_interface(processor);
+
+       return 0;
+}
+#else
+#define acpi_table_parse_madt(...)     do { } while (0)
+#endif
+
 /*
  * Enumerate the possible CPU set from the device tree and build the
  * cpu logical map array containing MPIDR values related to logical
  * cpus. Assumes that cpu_logical_map(0) has already been initialized.
  */
-void __init of_smp_init_cpus(void)
+static void __init of_parse_and_init_cpus(void)
 {
        struct device_node *dn = NULL;
-       unsigned int i, cpu = 1;
-       bool bootcpu_valid = false;
 
        while ((dn = of_find_node_by_type(dn, "cpu"))) {
-               const u32 *cell;
-               u64 hwid;
+               u64 hwid = of_get_cpu_mpidr(dn);
 
-               /*
-                * A cpu node with missing "reg" property is
-                * considered invalid to build a cpu_logical_map
-                * entry.
-                */
-               cell = of_get_property(dn, "reg", NULL);
-               if (!cell) {
-                       pr_err("%s: missing reg property\n", dn->full_name);
+               if (hwid == INVALID_HWID)
                        goto next;
-               }
-               hwid = of_read_number(cell, of_n_addr_cells(dn));
 
-               /*
-                * Non affinity bits must be set to 0 in the DT
-                */
-               if (hwid & ~MPIDR_HWID_BITMASK) {
-                       pr_err("%s: invalid reg property\n", dn->full_name);
+               if (is_mpidr_duplicate(cpu_count, hwid)) {
+                       pr_err("%s: duplicate cpu reg properties in the DT\n",
+                               dn->full_name);
                        goto next;
                }
 
-               /*
-                * Duplicate MPIDRs are a recipe for disaster. Scan
-                * all initialized entries and check for
-                * duplicates. If any is found just ignore the cpu.
-                * cpu_logical_map was initialized to INVALID_HWID to
-                * avoid matching valid MPIDR values.
-                */
-               for (i = 1; (i < cpu) && (i < NR_CPUS); i++) {
-                       if (cpu_logical_map(i) == hwid) {
-                               pr_err("%s: duplicate cpu reg properties in the DT\n",
-                                       dn->full_name);
-                               goto next;
-                       }
-               }
-
                /*
                 * The numbering scheme requires that the boot CPU
                 * must be assigned logical id 0. Record it so that
@@ -392,38 +513,58 @@ void __init of_smp_init_cpus(void)
                        continue;
                }
 
-               if (cpu >= NR_CPUS)
-                       goto next;
-
-               if (cpu_read_ops(dn, cpu) != 0)
-                       goto next;
-
-               if (cpu_ops[cpu]->cpu_init(dn, cpu))
+               if (cpu_count >= NR_CPUS)
                        goto next;
 
                pr_debug("cpu logical map 0x%llx\n", hwid);
-               cpu_logical_map(cpu) = hwid;
+               cpu_logical_map(cpu_count) = hwid;
 next:
-               cpu++;
+               cpu_count++;
        }
+}
 
-       /* sanity check */
-       if (cpu > NR_CPUS)
-               pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n",
-                          cpu, NR_CPUS);
+/*
+ * Enumerate the possible CPU set from the device tree or ACPI and build the
+ * cpu logical map array containing MPIDR values related to logical
+ * cpus. Assumes that cpu_logical_map(0) has already been initialized.
+ */
+void __init smp_init_cpus(void)
+{
+       int i;
+
+       if (acpi_disabled)
+               of_parse_and_init_cpus();
+       else
+               /*
+                * do a walk of MADT to determine how many CPUs
+                * we have including disabled CPUs, and get information
+                * we need for SMP init
+                */
+               acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+                                     acpi_parse_gic_cpu_interface, 0);
+
+       if (cpu_count > NR_CPUS)
+               pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
+                       cpu_count, NR_CPUS);
 
        if (!bootcpu_valid) {
-               pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n");
+               pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
                return;
        }
 
        /*
-        * All the cpus that made it to the cpu_logical_map have been
-        * validated so set them as possible cpus.
+        * We need to set the cpu_logical_map entries before enabling
+        * the cpus so that cpu processor description entries (DT cpu nodes
+        * and ACPI MADT entries) can be retrieved by matching the cpu hwid
+        * with entries in cpu_logical_map while initializing the cpus.
+        * If the cpu set-up fails, invalidate the cpu_logical_map entry.
         */
-       for (i = 0; i < NR_CPUS; i++)
-               if (cpu_logical_map(i) != INVALID_HWID)
-                       set_cpu_possible(i, true);
+       for (i = 1; i < NR_CPUS; i++) {
+               if (cpu_logical_map(i) != INVALID_HWID) {
+                       if (smp_cpu_setup(i))
+                               cpu_logical_map(i) = INVALID_HWID;
+               }
+       }
 }
 
 void __init smp_prepare_cpus(unsigned int max_cpus)