These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / x86 / kernel / process_32.c
index 3a70713..4dd4bea 100644 (file)
@@ -40,8 +40,7 @@
 #include <asm/pgtable.h>
 #include <asm/ldt.h>
 #include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/desc.h>
 #ifdef CONFIG_MATH_EMULATION
 #include <asm/math_emu.h>
@@ -55,6 +54,7 @@
 #include <asm/syscalls.h>
 #include <asm/debugreg.h>
 #include <asm/switch_to.h>
+#include <asm/vm86.h>
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
@@ -130,8 +130,8 @@ void release_thread(struct task_struct *dead_task)
        release_vm86_irqs(dead_task);
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long sp,
-       unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+       unsigned long arg, struct task_struct *p, unsigned long tls)
 {
        struct pt_regs *childregs = task_pt_regs(p);
        struct task_struct *tsk;
@@ -186,7 +186,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
         */
        if (clone_flags & CLONE_SETTLS)
                err = do_set_thread_area(p, -1,
-                       (struct user_desc __user *)childregs->si, 0);
+                       (struct user_desc __user *)tls, 0);
 
        if (err && p->thread.io_bitmap_ptr) {
                kfree(p->thread.io_bitmap_ptr);
@@ -272,14 +272,16 @@ __visible __notrace_funcgraph struct task_struct *
 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 {
        struct thread_struct *prev = &prev_p->thread,
-                                *next = &next_p->thread;
+                            *next = &next_p->thread;
+       struct fpu *prev_fpu = &prev->fpu;
+       struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
-       fpu_switch_t fpu;
+       fpu_switch_t fpu_switch;
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
-       fpu = switch_fpu_prepare(prev_p, next_p, cpu);
+       fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
 
        /*
         * Save away %gs. No need to save %fs, as it was saved on the
@@ -307,14 +309,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
                set_iopl_mask(next->iopl);
 
-       /*
-        * If it were not for PREEMPT_ACTIVE we could guarantee that the
-        * preempt_count of all tasks was equal here and this would not be
-        * needed.
-        */
-       task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
-       this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
-
        /*
         * Now maybe handle debug registers and/or IO bitmaps
         */
@@ -328,19 +322,16 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Leave lazy mode, flushing any hypercalls made here.
         * This must be done before restoring TLS segments so
         * the GDT and LDT are properly updated, and must be
-        * done before math_state_restore, so the TS bit is up
+        * done before fpu__restore(), so the TS bit is up
         * to date.
         */
        arch_end_context_switch(next_p);
 
        /*
-        * Reload esp0, kernel_stack, and current_top_of_stack.  This changes
+        * Reload esp0 and cpu_current_top_of_stack.  This changes
         * current_thread_info().
         */
        load_sp0(tss, next);
-       this_cpu_write(kernel_stack,
-                      (unsigned long)task_stack_page(next_p) +
-                      THREAD_SIZE);
        this_cpu_write(cpu_current_top_of_stack,
                       (unsigned long)task_stack_page(next_p) +
                       THREAD_SIZE);
@@ -351,37 +342,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        if (prev->gs | next->gs)
                lazy_load_gs(next->gs);
 
-       switch_fpu_finish(next_p, fpu);
+       switch_fpu_finish(next_fpu, fpu_switch);
 
        this_cpu_write(current_task, next_p);
 
        return prev_p;
 }
-
-#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
-#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
-
-unsigned long get_wchan(struct task_struct *p)
-{
-       unsigned long bp, sp, ip;
-       unsigned long stack_page;
-       int count = 0;
-       if (!p || p == current || p->state == TASK_RUNNING)
-               return 0;
-       stack_page = (unsigned long)task_stack_page(p);
-       sp = p->thread.sp;
-       if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
-               return 0;
-       /* include/asm-i386/system.h:switch_to() pushes bp last. */
-       bp = *(unsigned long *) sp;
-       do {
-               if (bp < stack_page || bp > top_ebp+stack_page)
-                       return 0;
-               ip = *(unsigned long *) (bp+4);
-               if (!in_sched_functions(ip))
-                       return ip;
-               bp = *(unsigned long *) bp;
-       } while (count++ < 16);
-       return 0;
-}
-