2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
9 * This file handles the architecture-dependent parts of process handling..
12 #include <linux/cpu.h>
13 #include <linux/errno.h>
14 #include <linux/sched.h>
16 #include <linux/kernel.h>
18 #include <linux/elfcore.h>
19 #include <linux/smp.h>
20 #include <linux/stddef.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/user.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/reboot.h>
27 #include <linux/mc146818rtc.h>
28 #include <linux/module.h>
29 #include <linux/kallsyms.h>
30 #include <linux/ptrace.h>
31 #include <linux/personality.h>
32 #include <linux/percpu.h>
33 #include <linux/prctl.h>
34 #include <linux/ftrace.h>
35 #include <linux/uaccess.h>
37 #include <linux/kdebug.h>
38 #include <linux/highmem.h>
40 #include <asm/pgtable.h>
42 #include <asm/processor.h>
44 #include <asm/fpu-internal.h>
46 #ifdef CONFIG_MATH_EMULATION
47 #include <asm/math_emu.h>
50 #include <linux/err.h>
52 #include <asm/tlbflush.h>
55 #include <asm/syscalls.h>
56 #include <asm/debugreg.h>
57 #include <asm/switch_to.h>
59 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
60 asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
63 * Return saved PC of a blocked thread.
65 unsigned long thread_saved_pc(struct task_struct *tsk)
67 return ((unsigned long *)tsk->thread.sp)[3];
70 void __show_regs(struct pt_regs *regs, int all)
72 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
73 unsigned long d0, d1, d2, d3, d6, d7;
75 unsigned short ss, gs;
77 if (user_mode(regs)) {
79 ss = regs->ss & 0xffff;
80 gs = get_user_gs(regs);
82 sp = kernel_stack_pointer(regs);
87 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
88 (u16)regs->cs, regs->ip, regs->flags,
90 print_symbol("EIP is at %s\n", regs->ip);
92 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
93 regs->ax, regs->bx, regs->cx, regs->dx);
94 printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
95 regs->si, regs->di, regs->bp, sp);
96 printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
97 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
105 cr4 = __read_cr4_safe();
106 printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
116 /* Only print out debug registers if they are in their non-default state. */
117 if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
118 (d6 == DR6_RESERVED) && (d7 == 0x400))
121 printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
123 printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
127 void release_thread(struct task_struct *dead_task)
129 BUG_ON(dead_task->mm);
130 release_vm86_irqs(dead_task);
133 int copy_thread(unsigned long clone_flags, unsigned long sp,
134 unsigned long arg, struct task_struct *p)
136 struct pt_regs *childregs = task_pt_regs(p);
137 struct task_struct *tsk;
140 p->thread.sp = (unsigned long) childregs;
141 p->thread.sp0 = (unsigned long) (childregs+1);
142 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
144 if (unlikely(p->flags & PF_KTHREAD)) {
146 memset(childregs, 0, sizeof(struct pt_regs));
147 p->thread.ip = (unsigned long) ret_from_kernel_thread;
148 task_user_gs(p) = __KERNEL_STACK_CANARY;
149 childregs->ds = __USER_DS;
150 childregs->es = __USER_DS;
151 childregs->fs = __KERNEL_PERCPU;
152 childregs->bx = sp; /* function */
154 childregs->orig_ax = -1;
155 childregs->cs = __KERNEL_CS | get_kernel_rpl();
156 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
157 p->thread.io_bitmap_ptr = NULL;
160 *childregs = *current_pt_regs();
165 p->thread.ip = (unsigned long) ret_from_fork;
166 task_user_gs(p) = get_user_gs(current_pt_regs());
168 p->thread.io_bitmap_ptr = NULL;
172 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
173 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
174 IO_BITMAP_BYTES, GFP_KERNEL);
175 if (!p->thread.io_bitmap_ptr) {
176 p->thread.io_bitmap_max = 0;
179 set_tsk_thread_flag(p, TIF_IO_BITMAP);
185 * Set a new TLS for the child thread?
187 if (clone_flags & CLONE_SETTLS)
188 err = do_set_thread_area(p, -1,
189 (struct user_desc __user *)childregs->si, 0);
191 if (err && p->thread.io_bitmap_ptr) {
192 kfree(p->thread.io_bitmap_ptr);
193 p->thread.io_bitmap_max = 0;
199 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
201 set_user_gs(regs, 0);
203 regs->ds = __USER_DS;
204 regs->es = __USER_DS;
205 regs->ss = __USER_DS;
206 regs->cs = __USER_CS;
209 regs->flags = X86_EFLAGS_IF;
212 EXPORT_SYMBOL_GPL(start_thread);
214 #ifdef CONFIG_PREEMPT_RT_FULL
215 static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
220 * Clear @prev's kmap_atomic mappings
222 for (i = 0; i < prev_p->kmap_idx; i++) {
223 int idx = i + KM_TYPE_NR * smp_processor_id();
224 pte_t *ptep = kmap_pte - idx;
226 kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
229 * Restore @next_p's kmap_atomic mappings
231 for (i = 0; i < next_p->kmap_idx; i++) {
232 int idx = i + KM_TYPE_NR * smp_processor_id();
234 if (!pte_none(next_p->kmap_pte[i]))
235 set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
240 switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
245 * switch_to(x,y) should switch tasks from x to y.
247 * We fsave/fwait so that an exception goes off at the right time
248 * (as a call from the fsave or fwait in effect) rather than to
249 * the wrong process. Lazy FP saving no longer makes any sense
250 * with modern CPU's, and this simplifies a lot of things (SMP
251 * and UP become the same).
253 * NOTE! We used to use the x86 hardware context switching. The
254 * reason for not using it any more becomes apparent when you
255 * try to recover gracefully from saved state that is no longer
256 * valid (stale segment register values in particular). With the
257 * hardware task-switch, there is no way to fix up bad state in
258 * a reasonable manner.
260 * The fact that Intel documents the hardware task-switching to
261 * be slow is a fairly red herring - this code is not noticeably
262 * faster. However, there _is_ some room for improvement here,
263 * so the performance issues may eventually be a valid point.
264 * More important, however, is the fact that this allows us much
267 * The return value (in %ax) will be the "prev" task after
268 * the task-switch, and shows up in ret_from_fork in entry.S,
271 __visible __notrace_funcgraph struct task_struct *
272 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
274 struct thread_struct *prev = &prev_p->thread,
275 *next = &next_p->thread;
276 int cpu = smp_processor_id();
277 struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
280 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
282 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
285 * Save away %gs. No need to save %fs, as it was saved on the
286 * stack on entry. No need to save %es and %ds, as those are
287 * always kernel segments while inside the kernel. Doing this
288 * before setting the new TLS descriptors avoids the situation
289 * where we temporarily have non-reloadable segments in %fs
290 * and %gs. This could be an issue if the NMI handler ever
291 * used %fs or %gs (it does not today), or if the kernel is
292 * running inside of a hypervisor layer.
294 lazy_save_gs(prev->gs);
297 * Load the per-thread Thread-Local Storage descriptor.
302 * Restore IOPL if needed. In normal use, the flags restore
303 * in the switch assembly will handle this. But if the kernel
304 * is running virtualized at a non-zero CPL, the popf will
305 * not restore flags, so it must be done in a separate step.
307 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
308 set_iopl_mask(next->iopl);
311 * If it were not for PREEMPT_ACTIVE we could guarantee that the
312 * preempt_count of all tasks was equal here and this would not be
315 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
316 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
319 * Now maybe handle debug registers and/or IO bitmaps
321 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
322 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
323 __switch_to_xtra(prev_p, next_p, tss);
325 switch_kmaps(prev_p, next_p);
328 * Leave lazy mode, flushing any hypercalls made here.
329 * This must be done before restoring TLS segments so
330 * the GDT and LDT are properly updated, and must be
331 * done before math_state_restore, so the TS bit is up
334 arch_end_context_switch(next_p);
337 * Reload esp0, kernel_stack, and current_top_of_stack. This changes
338 * current_thread_info().
341 this_cpu_write(kernel_stack,
342 (unsigned long)task_stack_page(next_p) +
344 this_cpu_write(cpu_current_top_of_stack,
345 (unsigned long)task_stack_page(next_p) +
349 * Restore %gs if needed (which is common)
351 if (prev->gs | next->gs)
352 lazy_load_gs(next->gs);
354 switch_fpu_finish(next_p, fpu);
356 this_cpu_write(current_task, next_p);
361 #define top_esp (THREAD_SIZE - sizeof(unsigned long))
362 #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
364 unsigned long get_wchan(struct task_struct *p)
366 unsigned long bp, sp, ip;
367 unsigned long stack_page;
369 if (!p || p == current || p->state == TASK_RUNNING)
371 stack_page = (unsigned long)task_stack_page(p);
373 if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
375 /* include/asm-i386/system.h:switch_to() pushes bp last. */
376 bp = *(unsigned long *) sp;
378 if (bp < stack_page || bp > top_ebp+stack_page)
380 ip = *(unsigned long *) (bp+4);
381 if (!in_sched_functions(ip))
383 bp = *(unsigned long *) bp;
384 } while (count++ < 16);