These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm / mach-rockchip / platsmp.c
index 5b4ca3c..3e7a4b7 100644 (file)
@@ -72,29 +72,22 @@ static struct reset_control *rockchip_get_core_reset(int cpu)
 static int pmu_set_power_domain(int pd, bool on)
 {
        u32 val = (on) ? 0 : BIT(pd);
+       struct reset_control *rstc = rockchip_get_core_reset(pd);
        int ret;
 
+       if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
+               pr_err("%s: could not get reset control for core %d\n",
+                      __func__, pd);
+               return PTR_ERR(rstc);
+       }
+
        /*
         * We need to soft reset the cpu when we turn off the cpu power domain,
         * or else the active processors might be stalled when the individual
         * processor is powered down.
         */
-       if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
-               struct reset_control *rstc = rockchip_get_core_reset(pd);
-
-               if (IS_ERR(rstc)) {
-                       pr_err("%s: could not get reset control for core %d\n",
-                              __func__, pd);
-                       return PTR_ERR(rstc);
-               }
-
-               if (on)
-                       reset_control_deassert(rstc);
-               else
-                       reset_control_assert(rstc);
-
-               reset_control_put(rstc);
-       }
+       if (!IS_ERR(rstc) && !on)
+               reset_control_assert(rstc);
 
        ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val);
        if (ret < 0) {
@@ -107,11 +100,17 @@ static int pmu_set_power_domain(int pd, bool on)
                ret = pmu_power_domain_is_on(pd);
                if (ret < 0) {
                        pr_err("%s: could not read power domain state\n",
-                                __func__);
+                              __func__);
                        return ret;
                }
        }
 
+       if (!IS_ERR(rstc)) {
+               if (on)
+                       reset_control_deassert(rstc);
+               reset_control_put(rstc);
+       }
+
        return 0;
 }
 
@@ -119,8 +118,7 @@ static int pmu_set_power_domain(int pd, bool on)
  * Handling of CPU cores
  */
 
-static int __cpuinit rockchip_boot_secondary(unsigned int cpu,
-                                            struct task_struct *idle)
+static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
        int ret;
 
@@ -131,7 +129,7 @@ static int __cpuinit rockchip_boot_secondary(unsigned int cpu,
 
        if (cpu >= ncores) {
                pr_err("%s: cpu %d outside maximum number of cpus %d\n",
-                                                       __func__, cpu, ncores);
+                      __func__, cpu, ncores);
                return -ENXIO;
        }
 
@@ -141,16 +139,20 @@ static int __cpuinit rockchip_boot_secondary(unsigned int cpu,
                return ret;
 
        if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
-               /* We communicate with the bootrom to active the cpus other
+               /*
+                * We communicate with the bootrom to active the cpus other
                 * than cpu0, after a blob of initialize code, they will
                 * stay at wfe state, once they are actived, they will check
                 * the mailbox:
                 * sram_base_addr + 4: 0xdeadbeaf
                 * sram_base_addr + 8: start address for pc
-                * */
-               udelay(10);
-               writel(virt_to_phys(rockchip_secondary_startup),
-                       sram_base_addr + 8);
+                * The cpu0 need to wait the other cpus other than cpu0 entering
+                * the wfe state.The wait time is affected by many aspects.
+                * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
+                */
+               mdelay(1); /* ensure the cpus other than cpu0 to startup */
+
+               writel(virt_to_phys(secondary_startup), sram_base_addr + 8);
                writel(0xDEADBEAF, sram_base_addr + 4);
                dsb_sev();
        }
@@ -189,7 +191,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
        }
 
        /* set the boot function for the sram code */
-       rockchip_boot_fn = virt_to_phys(rockchip_secondary_startup);
+       rockchip_boot_fn = virt_to_phys(secondary_startup);
 
        /* copy the trampoline to sram, that runs during startup of the core */
        memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
@@ -319,6 +321,13 @@ static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus)
 #ifdef CONFIG_HOTPLUG_CPU
 static int rockchip_cpu_kill(unsigned int cpu)
 {
+       /*
+        * We need a delay here to ensure that the dying CPU can finish
+        * executing v7_coherency_exit() and reach the WFI/WFE state
+        * prior to having the power domain disabled.
+        */
+       mdelay(1);
+
        pmu_set_power_domain(0 + cpu, false);
        return 1;
 }
@@ -326,7 +335,7 @@ static int rockchip_cpu_kill(unsigned int cpu)
 static void rockchip_cpu_die(unsigned int cpu)
 {
        v7_exit_coherency_flush(louis);
-       while(1)
+       while (1)
                cpu_do_idle();
 }
 #endif
@@ -339,4 +348,5 @@ static struct smp_operations rockchip_smp_ops __initdata = {
        .cpu_die                = rockchip_cpu_die,
 #endif
 };
+
 CPU_METHOD_OF_DECLARE(rk3066_smp, "rockchip,rk3066-smp", &rockchip_smp_ops);