These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm64 / kernel / process.c
index c6b1f3b..f75b540 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/personality.h>
 #include <linux/notifier.h>
+#include <trace/events/power.h>
 
 #include <asm/compat.h>
 #include <asm/cacheflush.h>
@@ -58,14 +59,6 @@ unsigned long __stack_chk_guard __read_mostly;
 EXPORT_SYMBOL(__stack_chk_guard);
 #endif
 
-void soft_restart(unsigned long addr)
-{
-       setup_mm_for_reboot();
-       cpu_soft_restart(virt_to_phys(cpu_reset), addr);
-       /* Should never get here */
-       BUG();
-}
-
 /*
  * Function pointers to optional machine specific functions
  */
@@ -83,8 +76,10 @@ void arch_cpu_idle(void)
         * This should do all the clock switching and wait for interrupt
         * tricks
         */
+       trace_cpu_idle_rcuidle(1, smp_processor_id());
        cpu_do_idle();
        local_irq_enable();
+       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -136,9 +131,7 @@ void machine_power_off(void)
 
 /*
  * Restart requires that the secondary CPUs stop performing any activity
- * while the primary CPU resets the system. Systems with a single CPU can
- * use soft_restart() as their machine descriptor's .restart hook, since that
- * will cause the only available CPU to reset. Systems with multiple CPUs must
+ * while the primary CPU resets the system. Systems with multiple CPUs must
  * provide a HW restart implementation, to ensure that all CPUs reset at once.
  * This is required so that any code running after reset on the primary CPU
  * doesn't have to co-ordinate with other CPUs to ensure they aren't still
@@ -243,7 +236,8 @@ void release_thread(struct task_struct *dead_task)
 
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
-       fpsimd_preserve_current_state();
+       if (current->mm)
+               fpsimd_preserve_current_state();
        *dst = *src;
        return 0;
 }
@@ -254,35 +248,35 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
                unsigned long stk_sz, struct task_struct *p)
 {
        struct pt_regs *childregs = task_pt_regs(p);
-       unsigned long tls = p->thread.tp_value;
 
        memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
 
        if (likely(!(p->flags & PF_KTHREAD))) {
                *childregs = *current_pt_regs();
                childregs->regs[0] = 0;
-               if (is_compat_thread(task_thread_info(p))) {
-                       if (stack_start)
+
+               /*
+                * Read the current TLS pointer from tpidr_el0 as it may be
+                * out-of-sync with the saved value.
+                */
+               asm("mrs %0, tpidr_el0" : "=r" (*task_user_tls(p)));
+
+               if (stack_start) {
+                       if (is_compat_thread(task_thread_info(p)))
                                childregs->compat_sp = stack_start;
-               } else {
-                       /*
-                        * Read the current TLS pointer from tpidr_el0 as it may be
-                        * out-of-sync with the saved value.
-                        */
-                       asm("mrs %0, tpidr_el0" : "=r" (tls));
-                       if (stack_start) {
-                               /* 16-byte aligned stack mandatory on AArch64 */
-                               if (stack_start & 15)
-                                       return -EINVAL;
+                       /* 16-byte aligned stack mandatory on AArch64 */
+                       else if (stack_start & 15)
+                               return -EINVAL;
+                       else
                                childregs->sp = stack_start;
-                       }
                }
+
                /*
                 * If a TLS pointer was passed to clone (4th argument), use it
                 * for the new thread.
                 */
                if (clone_flags & CLONE_SETTLS)
-                       tls = childregs->regs[3];
+                       p->thread.tp_value = childregs->regs[3];
        } else {
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->pstate = PSR_MODE_EL1h;
@@ -291,7 +285,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
        }
        p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
        p->thread.cpu_context.sp = (unsigned long)childregs;
-       p->thread.tp_value = tls;
 
        ptrace_hw_copy_thread(p);
 
@@ -302,18 +295,12 @@ static void tls_thread_switch(struct task_struct *next)
 {
        unsigned long tpidr, tpidrro;
 
-       if (!is_compat_task()) {
-               asm("mrs %0, tpidr_el0" : "=r" (tpidr));
-               current->thread.tp_value = tpidr;
-       }
+       asm("mrs %0, tpidr_el0" : "=r" (tpidr));
+       *task_user_tls(current) = tpidr;
 
-       if (is_compat_thread(task_thread_info(next))) {
-               tpidr = 0;
-               tpidrro = next->thread.tp_value;
-       } else {
-               tpidr = next->thread.tp_value;
-               tpidrro = 0;
-       }
+       tpidr = *task_user_tls(next);
+       tpidrro = is_compat_thread(task_thread_info(next)) ?
+                 next->thread.tp_value : 0;
 
        asm(
        "       msr     tpidr_el0, %0\n"