Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / x86 / kernel / process_64.c
diff --git a/kernel/arch/x86/kernel/process_64.c b/kernel/arch/x86/kernel/process_64.c
new file mode 100644 (file)
index 0000000..ddfdbf7
--- /dev/null
@@ -0,0 +1,629 @@
+/*
+ *  Copyright (C) 1995  Linus Torvalds
+ *
+ *  Pentium III FXSR, SSE support
+ *     Gareth Hughes <gareth@valinux.com>, May 2000
+ *
+ *  X86-64 port
+ *     Andi Kleen.
+ *
+ *     CPU hotplug support - ashok.raj@intel.com
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/elfcore.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/notifier.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/prctl.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/ftrace.h>
+
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/fpu-internal.h>
+#include <asm/mmu_context.h>
+#include <asm/prctl.h>
+#include <asm/desc.h>
+#include <asm/proto.h>
+#include <asm/ia32.h>
+#include <asm/idle.h>
+#include <asm/syscalls.h>
+#include <asm/debugreg.h>
+#include <asm/switch_to.h>
+
+asmlinkage extern void ret_from_fork(void);
+
+__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
+
+/* Prints also some state that isn't saved in the pt_regs */
+void __show_regs(struct pt_regs *regs, int all)
+{
+       unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
+       unsigned long d0, d1, d2, d3, d6, d7;
+       unsigned int fsindex, gsindex;
+       unsigned int ds, cs, es;
+
+       printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
+       printk_address(regs->ip);
+       printk(KERN_DEFAULT "RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss,
+                       regs->sp, regs->flags);
+       printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
+              regs->ax, regs->bx, regs->cx);
+       printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
+              regs->dx, regs->si, regs->di);
+       printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
+              regs->bp, regs->r8, regs->r9);
+       printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
+              regs->r10, regs->r11, regs->r12);
+       printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
+              regs->r13, regs->r14, regs->r15);
+
+       asm("movl %%ds,%0" : "=r" (ds));
+       asm("movl %%cs,%0" : "=r" (cs));
+       asm("movl %%es,%0" : "=r" (es));
+       asm("movl %%fs,%0" : "=r" (fsindex));
+       asm("movl %%gs,%0" : "=r" (gsindex));
+
+       rdmsrl(MSR_FS_BASE, fs);
+       rdmsrl(MSR_GS_BASE, gs);
+       rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+
+       if (!all)
+               return;
+
+       cr0 = read_cr0();
+       cr2 = read_cr2();
+       cr3 = read_cr3();
+       cr4 = __read_cr4();
+
+       printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
+              fs, fsindex, gs, gsindex, shadowgs);
+       printk(KERN_DEFAULT "CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
+                       es, cr0);
+       printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
+                       cr4);
+
+       get_debugreg(d0, 0);
+       get_debugreg(d1, 1);
+       get_debugreg(d2, 2);
+       get_debugreg(d3, 3);
+       get_debugreg(d6, 6);
+       get_debugreg(d7, 7);
+
+       /* Only print out debug registers if they are in their non-default state. */
+       if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
+           (d6 == DR6_RESERVED) && (d7 == 0x400))
+               return;
+
+       printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
+       printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
+
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+       if (dead_task->mm) {
+               if (dead_task->mm->context.size) {
+                       pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
+                               dead_task->comm,
+                               dead_task->mm->context.ldt,
+                               dead_task->mm->context.size);
+                       BUG();
+               }
+       }
+}
+
+static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
+{
+       struct user_desc ud = {
+               .base_addr = addr,
+               .limit = 0xfffff,
+               .seg_32bit = 1,
+               .limit_in_pages = 1,
+               .useable = 1,
+       };
+       struct desc_struct *desc = t->thread.tls_array;
+       desc += tls;
+       fill_ldt(desc, &ud);
+}
+
+static inline u32 read_32bit_tls(struct task_struct *t, int tls)
+{
+       return get_desc_base(&t->thread.tls_array[tls]);
+}
+
+int copy_thread(unsigned long clone_flags, unsigned long sp,
+               unsigned long arg, struct task_struct *p)
+{
+       int err;
+       struct pt_regs *childregs;
+       struct task_struct *me = current;
+
+       p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+       childregs = task_pt_regs(p);
+       p->thread.sp = (unsigned long) childregs;
+       set_tsk_thread_flag(p, TIF_FORK);
+       p->thread.io_bitmap_ptr = NULL;
+
+       savesegment(gs, p->thread.gsindex);
+       p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
+       savesegment(fs, p->thread.fsindex);
+       p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
+       savesegment(es, p->thread.es);
+       savesegment(ds, p->thread.ds);
+       memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
+
+       if (unlikely(p->flags & PF_KTHREAD)) {
+               /* kernel thread */
+               memset(childregs, 0, sizeof(struct pt_regs));
+               childregs->sp = (unsigned long)childregs;
+               childregs->ss = __KERNEL_DS;
+               childregs->bx = sp; /* function */
+               childregs->bp = arg;
+               childregs->orig_ax = -1;
+               childregs->cs = __KERNEL_CS | get_kernel_rpl();
+               childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
+               return 0;
+       }
+       *childregs = *current_pt_regs();
+
+       childregs->ax = 0;
+       if (sp)
+               childregs->sp = sp;
+
+       err = -ENOMEM;
+       if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
+               p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
+                                                 IO_BITMAP_BYTES, GFP_KERNEL);
+               if (!p->thread.io_bitmap_ptr) {
+                       p->thread.io_bitmap_max = 0;
+                       return -ENOMEM;
+               }
+               set_tsk_thread_flag(p, TIF_IO_BITMAP);
+       }
+
+       /*
+        * Set a new TLS for the child thread?
+        */
+       if (clone_flags & CLONE_SETTLS) {
+#ifdef CONFIG_IA32_EMULATION
+               if (is_ia32_task())
+                       err = do_set_thread_area(p, -1,
+                               (struct user_desc __user *)childregs->si, 0);
+               else
+#endif
+                       err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
+               if (err)
+                       goto out;
+       }
+       err = 0;
+out:
+       if (err && p->thread.io_bitmap_ptr) {
+               kfree(p->thread.io_bitmap_ptr);
+               p->thread.io_bitmap_max = 0;
+       }
+
+       return err;
+}
+
+static void
+start_thread_common(struct pt_regs *regs, unsigned long new_ip,
+                   unsigned long new_sp,
+                   unsigned int _cs, unsigned int _ss, unsigned int _ds)
+{
+       loadsegment(fs, 0);
+       loadsegment(es, _ds);
+       loadsegment(ds, _ds);
+       load_gs_index(0);
+       regs->ip                = new_ip;
+       regs->sp                = new_sp;
+       regs->cs                = _cs;
+       regs->ss                = _ss;
+       regs->flags             = X86_EFLAGS_IF;
+       force_iret();
+}
+
+void
+start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+{
+       start_thread_common(regs, new_ip, new_sp,
+                           __USER_CS, __USER_DS, 0);
+}
+
+#ifdef CONFIG_IA32_EMULATION
+void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
+{
+       start_thread_common(regs, new_ip, new_sp,
+                           test_thread_flag(TIF_X32)
+                           ? __USER_CS : __USER32_CS,
+                           __USER_DS, __USER_DS);
+}
+#endif
+
+/*
+ *     switch_to(x,y) should switch tasks from x to y.
+ *
+ * This could still be optimized:
+ * - fold all the options into a flag word and test it with a single test.
+ * - could test fs/gs bitsliced
+ *
+ * Kprobes not supported here. Set the probe on schedule instead.
+ * Function graph tracer not supported too.
+ */
+__visible __notrace_funcgraph struct task_struct *
+__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+{
+       struct thread_struct *prev = &prev_p->thread;
+       struct thread_struct *next = &next_p->thread;
+       int cpu = smp_processor_id();
+       struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+       unsigned fsindex, gsindex;
+       fpu_switch_t fpu;
+
+       fpu = switch_fpu_prepare(prev_p, next_p, cpu);
+
+       /* We must save %fs and %gs before load_TLS() because
+        * %fs and %gs may be cleared by load_TLS().
+        *
+        * (e.g. xen_load_tls())
+        */
+       savesegment(fs, fsindex);
+       savesegment(gs, gsindex);
+
+       /*
+        * Load TLS before restoring any segments so that segment loads
+        * reference the correct GDT entries.
+        */
+       load_TLS(next, cpu);
+
+       /*
+        * Leave lazy mode, flushing any hypercalls made here.  This
+        * must be done after loading TLS entries in the GDT but before
+        * loading segments that might reference them, and and it must
+        * be done before math_state_restore, so the TS bit is up to
+        * date.
+        */
+       arch_end_context_switch(next_p);
+
+       /* Switch DS and ES.
+        *
+        * Reading them only returns the selectors, but writing them (if
+        * nonzero) loads the full descriptor from the GDT or LDT.  The
+        * LDT for next is loaded in switch_mm, and the GDT is loaded
+        * above.
+        *
+        * We therefore need to write new values to the segment
+        * registers on every context switch unless both the new and old
+        * values are zero.
+        *
+        * Note that we don't need to do anything for CS and SS, as
+        * those are saved and restored as part of pt_regs.
+        */
+       savesegment(es, prev->es);
+       if (unlikely(next->es | prev->es))
+               loadsegment(es, next->es);
+
+       savesegment(ds, prev->ds);
+       if (unlikely(next->ds | prev->ds))
+               loadsegment(ds, next->ds);
+
+       /*
+        * Switch FS and GS.
+        *
+        * These are even more complicated than FS and GS: they have
+        * 64-bit bases are that controlled by arch_prctl.  Those bases
+        * only differ from the values in the GDT or LDT if the selector
+        * is 0.
+        *
+        * Loading the segment register resets the hidden base part of
+        * the register to 0 or the value from the GDT / LDT.  If the
+        * next base address zero, writing 0 to the segment register is
+        * much faster than using wrmsr to explicitly zero the base.
+        *
+        * The thread_struct.fs and thread_struct.gs values are 0
+        * if the fs and gs bases respectively are not overridden
+        * from the values implied by fsindex and gsindex.  They
+        * are nonzero, and store the nonzero base addresses, if
+        * the bases are overridden.
+        *
+        * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
+        * be impossible.
+        *
+        * Therefore we need to reload the segment registers if either
+        * the old or new selector is nonzero, and we need to override
+        * the base address if next thread expects it to be overridden.
+        *
+        * This code is unnecessarily slow in the case where the old and
+        * new indexes are zero and the new base is nonzero -- it will
+        * unnecessarily write 0 to the selector before writing the new
+        * base address.
+        *
+        * Note: This all depends on arch_prctl being the only way that
+        * user code can override the segment base.  Once wrfsbase and
+        * wrgsbase are enabled, most of this code will need to change.
+        */
+       if (unlikely(fsindex | next->fsindex | prev->fs)) {
+               loadsegment(fs, next->fsindex);
+
+               /*
+                * If user code wrote a nonzero value to FS, then it also
+                * cleared the overridden base address.
+                *
+                * XXX: if user code wrote 0 to FS and cleared the base
+                * address itself, we won't notice and we'll incorrectly
+                * restore the prior base address next time we reschdule
+                * the process.
+                */
+               if (fsindex)
+                       prev->fs = 0;
+       }
+       if (next->fs)
+               wrmsrl(MSR_FS_BASE, next->fs);
+       prev->fsindex = fsindex;
+
+       if (unlikely(gsindex | next->gsindex | prev->gs)) {
+               load_gs_index(next->gsindex);
+
+               /* This works (and fails) the same way as fsindex above. */
+               if (gsindex)
+                       prev->gs = 0;
+       }
+       if (next->gs)
+               wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
+       prev->gsindex = gsindex;
+
+       switch_fpu_finish(next_p, fpu);
+
+       /*
+        * Switch the PDA and FPU contexts.
+        */
+       this_cpu_write(current_task, next_p);
+
+       /*
+        * If it were not for PREEMPT_ACTIVE we could guarantee that the
+        * preempt_count of all tasks was equal here and this would not be
+        * needed.
+        */
+       task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
+       this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
+
+       /* Reload esp0 and ss1.  This changes current_thread_info(). */
+       load_sp0(tss, next);
+
+       this_cpu_write(kernel_stack,
+               (unsigned long)task_stack_page(next_p) + THREAD_SIZE);
+
+       /*
+        * Now maybe reload the debug registers and handle I/O bitmaps
+        */
+       if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
+                    task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
+               __switch_to_xtra(prev_p, next_p, tss);
+
+       if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
+               /*
+                * AMD CPUs have a misfeature: SYSRET sets the SS selector but
+                * does not update the cached descriptor.  As a result, if we
+                * do SYSRET while SS is NULL, we'll end up in user mode with
+                * SS apparently equal to __USER_DS but actually unusable.
+                *
+                * The straightforward workaround would be to fix it up just
+                * before SYSRET, but that would slow down the system call
+                * fast paths.  Instead, we ensure that SS is never NULL in
+                * system call context.  We do this by replacing NULL SS
+                * selectors at every context switch.  SYSCALL sets up a valid
+                * SS, so the only way to get NULL is to re-enter the kernel
+                * from CPL 3 through an interrupt.  Since that can't happen
+                * in the same task as a running syscall, we are guaranteed to
+                * context switch between every interrupt vector entry and a
+                * subsequent SYSRET.
+                *
+                * We read SS first because SS reads are much faster than
+                * writes.  Out of caution, we force SS to __KERNEL_DS even if
+                * it previously had a different non-NULL value.
+                */
+               unsigned short ss_sel;
+               savesegment(ss, ss_sel);
+               if (ss_sel != __KERNEL_DS)
+                       loadsegment(ss, __KERNEL_DS);
+       }
+
+       return prev_p;
+}
+
+void set_personality_64bit(void)
+{
+       /* inherit personality from parent */
+
+       /* Make sure to be in 64bit mode */
+       clear_thread_flag(TIF_IA32);
+       clear_thread_flag(TIF_ADDR32);
+       clear_thread_flag(TIF_X32);
+
+       /* Ensure the corresponding mm is not marked. */
+       if (current->mm)
+               current->mm->context.ia32_compat = 0;
+
+       /* TBD: overwrites user setup. Should have two bits.
+          But 64bit processes have always behaved this way,
+          so it's not too bad. The main problem is just that
+          32bit childs are affected again. */
+       current->personality &= ~READ_IMPLIES_EXEC;
+}
+
+void set_personality_ia32(bool x32)
+{
+       /* inherit personality from parent */
+
+       /* Make sure to be in 32bit mode */
+       set_thread_flag(TIF_ADDR32);
+
+       /* Mark the associated mm as containing 32-bit tasks. */
+       if (x32) {
+               clear_thread_flag(TIF_IA32);
+               set_thread_flag(TIF_X32);
+               if (current->mm)
+                       current->mm->context.ia32_compat = TIF_X32;
+               current->personality &= ~READ_IMPLIES_EXEC;
+               /* is_compat_task() uses the presence of the x32
+                  syscall bit flag to determine compat status */
+               current_thread_info()->status &= ~TS_COMPAT;
+       } else {
+               set_thread_flag(TIF_IA32);
+               clear_thread_flag(TIF_X32);
+               if (current->mm)
+                       current->mm->context.ia32_compat = TIF_IA32;
+               current->personality |= force_personality32;
+               /* Prepare the first "return" to user space */
+               current_thread_info()->status |= TS_COMPAT;
+       }
+}
+EXPORT_SYMBOL_GPL(set_personality_ia32);
+
+unsigned long get_wchan(struct task_struct *p)
+{
+       unsigned long stack;
+       u64 fp, ip;
+       int count = 0;
+
+       if (!p || p == current || p->state == TASK_RUNNING)
+               return 0;
+       stack = (unsigned long)task_stack_page(p);
+       if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
+               return 0;
+       fp = *(u64 *)(p->thread.sp);
+       do {
+               if (fp < (unsigned long)stack ||
+                   fp >= (unsigned long)stack+THREAD_SIZE)
+                       return 0;
+               ip = *(u64 *)(fp+8);
+               if (!in_sched_functions(ip))
+                       return ip;
+               fp = *(u64 *)fp;
+       } while (count++ < 16);
+       return 0;
+}
+
+long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
+{
+       int ret = 0;
+       int doit = task == current;
+       int cpu;
+
+       switch (code) {
+       case ARCH_SET_GS:
+               if (addr >= TASK_SIZE_OF(task))
+                       return -EPERM;
+               cpu = get_cpu();
+               /* handle small bases via the GDT because that's faster to
+                  switch. */
+               if (addr <= 0xffffffff) {
+                       set_32bit_tls(task, GS_TLS, addr);
+                       if (doit) {
+                               load_TLS(&task->thread, cpu);
+                               load_gs_index(GS_TLS_SEL);
+                       }
+                       task->thread.gsindex = GS_TLS_SEL;
+                       task->thread.gs = 0;
+               } else {
+                       task->thread.gsindex = 0;
+                       task->thread.gs = addr;
+                       if (doit) {
+                               load_gs_index(0);
+                               ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
+                       }
+               }
+               put_cpu();
+               break;
+       case ARCH_SET_FS:
+               /* Not strictly needed for fs, but do it for symmetry
+                  with gs */
+               if (addr >= TASK_SIZE_OF(task))
+                       return -EPERM;
+               cpu = get_cpu();
+               /* handle small bases via the GDT because that's faster to
+                  switch. */
+               if (addr <= 0xffffffff) {
+                       set_32bit_tls(task, FS_TLS, addr);
+                       if (doit) {
+                               load_TLS(&task->thread, cpu);
+                               loadsegment(fs, FS_TLS_SEL);
+                       }
+                       task->thread.fsindex = FS_TLS_SEL;
+                       task->thread.fs = 0;
+               } else {
+                       task->thread.fsindex = 0;
+                       task->thread.fs = addr;
+                       if (doit) {
+                               /* set the selector to 0 to not confuse
+                                  __switch_to */
+                               loadsegment(fs, 0);
+                               ret = wrmsrl_safe(MSR_FS_BASE, addr);
+                       }
+               }
+               put_cpu();
+               break;
+       case ARCH_GET_FS: {
+               unsigned long base;
+               if (task->thread.fsindex == FS_TLS_SEL)
+                       base = read_32bit_tls(task, FS_TLS);
+               else if (doit)
+                       rdmsrl(MSR_FS_BASE, base);
+               else
+                       base = task->thread.fs;
+               ret = put_user(base, (unsigned long __user *)addr);
+               break;
+       }
+       case ARCH_GET_GS: {
+               unsigned long base;
+               unsigned gsindex;
+               if (task->thread.gsindex == GS_TLS_SEL)
+                       base = read_32bit_tls(task, GS_TLS);
+               else if (doit) {
+                       savesegment(gs, gsindex);
+                       if (gsindex)
+                               rdmsrl(MSR_KERNEL_GS_BASE, base);
+                       else
+                               base = task->thread.gs;
+               } else
+                       base = task->thread.gs;
+               ret = put_user(base, (unsigned long __user *)addr);
+               break;
+       }
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+long sys_arch_prctl(int code, unsigned long addr)
+{
+       return do_arch_prctl(current, code, addr);
+}
+
+unsigned long KSTK_ESP(struct task_struct *task)
+{
+       return task_pt_regs(task)->sp;
+}