2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * Handle hardware traps and faults.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/context_tracking.h>
16 #include <linux/interrupt.h>
17 #include <linux/kallsyms.h>
18 #include <linux/spinlock.h>
19 #include <linux/kprobes.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kgdb.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ptrace.h>
26 #include <linux/uprobes.h>
27 #include <linux/string.h>
28 #include <linux/delay.h>
29 #include <linux/errno.h>
30 #include <linux/kexec.h>
31 #include <linux/sched.h>
32 #include <linux/timer.h>
33 #include <linux/init.h>
34 #include <linux/bug.h>
35 #include <linux/nmi.h>
37 #include <linux/smp.h>
41 #include <linux/ioport.h>
42 #include <linux/eisa.h>
45 #if defined(CONFIG_EDAC)
46 #include <linux/edac.h>
49 #include <asm/kmemcheck.h>
50 #include <asm/stacktrace.h>
51 #include <asm/processor.h>
52 #include <asm/debugreg.h>
53 #include <linux/atomic.h>
54 #include <asm/ftrace.h>
55 #include <asm/traps.h>
58 #include <asm/fpu-internal.h>
60 #include <asm/fixmap.h>
61 #include <asm/mach_traps.h>
62 #include <asm/alternative.h>
66 #include <asm/x86_init.h>
67 #include <asm/pgalloc.h>
68 #include <asm/proto.h>
70 /* No need to be aligned, but done to keep all IDTs defined the same way. */
71 gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
73 #include <asm/processor-flags.h>
74 #include <asm/setup.h>
76 asmlinkage int system_call(void);
79 /* Must be page-aligned because the real IDT is used in a fixmap. */
80 gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
82 DECLARE_BITMAP(used_vectors, NR_VECTORS);
83 EXPORT_SYMBOL_GPL(used_vectors);
85 static inline void conditional_sti(struct pt_regs *regs)
87 if (regs->flags & X86_EFLAGS_IF)
91 static inline void conditional_sti_ist(struct pt_regs *regs)
95 * X86_64 uses a per CPU stack on the IST for certain traps
96 * like int3. The task can not be preempted when using one
97 * of these stacks, thus preemption must be disabled, otherwise
98 * the stack can be corrupted if the task is scheduled out,
99 * and another task comes in and uses this stack.
101 * On x86_32 the task keeps its own stack and it is OK if the
102 * task schedules out.
106 if (regs->flags & X86_EFLAGS_IF)
110 static inline void conditional_cli(struct pt_regs *regs)
112 if (regs->flags & X86_EFLAGS_IF)
116 static inline void conditional_cli_ist(struct pt_regs *regs)
118 if (regs->flags & X86_EFLAGS_IF)
125 enum ctx_state ist_enter(struct pt_regs *regs)
127 enum ctx_state prev_state;
129 if (user_mode(regs)) {
130 /* Other than that, we're just an exception. */
131 prev_state = exception_enter();
134 * We might have interrupted pretty much anything. In
135 * fact, if we're a machine check, we can even interrupt
136 * NMI processing. We don't want in_nmi() to return true,
137 * but we need to notify RCU.
140 prev_state = CONTEXT_KERNEL; /* the value is irrelevant. */
144 * We are atomic because we're on the IST stack (or we're on x86_32,
145 * in which case we still shouldn't schedule).
147 * This must be after exception_enter(), because exception_enter()
148 * won't do anything if in_interrupt() returns true.
150 preempt_count_add(HARDIRQ_OFFSET);
152 /* This code is a bit fragile. Test it. */
153 rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work");
158 void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
160 /* Must be before exception_exit. */
161 preempt_count_sub(HARDIRQ_OFFSET);
164 return exception_exit(prev_state);
170 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
171 * @regs: regs passed to the IST exception handler
173 * IST exception handlers normally cannot schedule. As a special
174 * exception, if the exception interrupted userspace code (i.e.
175 * user_mode(regs) would return true) and the exception was not
176 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
177 * begins a non-atomic section within an ist_enter()/ist_exit() region.
178 * Callers are responsible for enabling interrupts themselves inside
179 * the non-atomic section, and callers must call is_end_non_atomic()
182 void ist_begin_non_atomic(struct pt_regs *regs)
184 BUG_ON(!user_mode(regs));
187 * Sanity check: we need to be on the normal thread stack. This
188 * will catch asm bugs and any attempt to use ist_preempt_enable
191 BUG_ON((unsigned long)(current_top_of_stack() -
192 current_stack_pointer()) >= THREAD_SIZE);
194 preempt_count_sub(HARDIRQ_OFFSET);
198 * ist_end_non_atomic() - begin a non-atomic section in an IST exception
200 * Ends a non-atomic section started with ist_begin_non_atomic().
202 void ist_end_non_atomic(void)
204 preempt_count_add(HARDIRQ_OFFSET);
207 static nokprobe_inline int
208 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
209 struct pt_regs *regs, long error_code)
211 if (v8086_mode(regs)) {
213 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
214 * On nmi (interrupt 2), do_trap should not be called.
216 if (trapnr < X86_TRAP_UD) {
217 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
224 if (!user_mode(regs)) {
225 if (!fixup_exception(regs)) {
226 tsk->thread.error_code = error_code;
227 tsk->thread.trap_nr = trapnr;
228 die(str, regs, error_code);
236 static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
239 unsigned long siaddr;
244 return SEND_SIG_PRIV;
248 siaddr = uprobe_get_trap_addr(regs);
252 siaddr = uprobe_get_trap_addr(regs);
260 info->si_signo = signr;
262 info->si_code = sicode;
263 info->si_addr = (void __user *)siaddr;
268 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
269 long error_code, siginfo_t *info)
271 struct task_struct *tsk = current;
274 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
277 * We want error_code and trap_nr set for userspace faults and
278 * kernelspace faults which result in die(), but not
279 * kernelspace faults which are fixed up. die() gives the
280 * process no chance to handle the signal and notice the
281 * kernel fault information, so that won't result in polluting
282 * the information about previously queued, but not yet
283 * delivered, faults. See also do_general_protection below.
285 tsk->thread.error_code = error_code;
286 tsk->thread.trap_nr = trapnr;
289 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
290 printk_ratelimit()) {
291 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
292 tsk->comm, tsk->pid, str,
293 regs->ip, regs->sp, error_code);
294 print_vma_addr(" in ", regs->ip);
299 force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
301 NOKPROBE_SYMBOL(do_trap);
303 static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
304 unsigned long trapnr, int signr)
306 enum ctx_state prev_state = exception_enter();
309 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
311 conditional_sti(regs);
312 do_trap(trapnr, signr, str, regs, error_code,
313 fill_trap_info(regs, signr, trapnr, &info));
316 exception_exit(prev_state);
319 #define DO_ERROR(trapnr, signr, str, name) \
320 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
322 do_error_trap(regs, error_code, str, trapnr, signr); \
325 DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error)
326 DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
327 DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op)
328 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun)
329 DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
330 DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
331 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
332 DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check)
335 /* Runs on IST stack */
336 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
338 static const char str[] = "double fault";
339 struct task_struct *tsk = current;
341 #ifdef CONFIG_X86_ESPFIX64
342 extern unsigned char native_irq_return_iret[];
345 * If IRET takes a non-IST fault on the espfix64 stack, then we
346 * end up promoting it to a doublefault. In that case, modify
347 * the stack to make it look like we just entered the #GP
348 * handler from user space, similar to bad_iret.
350 * No need for ist_enter here because we don't use RCU.
352 if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
353 regs->cs == __KERNEL_CS &&
354 regs->ip == (unsigned long)native_irq_return_iret)
356 struct pt_regs *normal_regs = task_pt_regs(current);
358 /* Fake a #GP(0) from userspace. */
359 memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
360 normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
361 regs->ip = (unsigned long)general_protection;
362 regs->sp = (unsigned long)&normal_regs->orig_ax;
368 ist_enter(regs); /* Discard prev_state because we won't return. */
369 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
371 tsk->thread.error_code = error_code;
372 tsk->thread.trap_nr = X86_TRAP_DF;
374 #ifdef CONFIG_DOUBLEFAULT
375 df_debug(regs, error_code);
378 * This is always a kernel trap and never fixable (and thus must
382 die(str, regs, error_code);
386 dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
388 struct task_struct *tsk = current;
389 struct xsave_struct *xsave_buf;
390 enum ctx_state prev_state;
391 struct bndcsr *bndcsr;
394 prev_state = exception_enter();
395 if (notify_die(DIE_TRAP, "bounds", regs, error_code,
396 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
398 conditional_sti(regs);
400 if (!user_mode(regs))
401 die("bounds", regs, error_code);
403 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
404 /* The exception is not from Intel MPX */
409 * We need to look at BNDSTATUS to resolve this exception.
410 * It is not directly accessible, though, so we need to
411 * do an xsave and then pull it out of the xsave buffer.
413 fpu_save_init(&tsk->thread.fpu);
414 xsave_buf = &(tsk->thread.fpu.state->xsave);
415 bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR);
420 * The error code field of the BNDSTATUS register communicates status
421 * information of a bound range exception #BR or operation involving
424 switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
425 case 2: /* Bound directory has invalid entry. */
426 if (mpx_handle_bd_fault(xsave_buf))
428 break; /* Success, it was handled */
429 case 1: /* Bound violation. */
430 info = mpx_generate_siginfo(regs, xsave_buf);
433 * We failed to decode the MPX instruction. Act as if
434 * the exception was not caused by MPX.
439 * Success, we decoded the instruction and retrieved
440 * an 'info' containing the address being accessed
441 * which caused the exception. This information
442 * allows and application to possibly handle the
443 * #BR exception itself.
445 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info);
448 case 0: /* No exception caused by Intel MPX operations. */
451 die("bounds", regs, error_code);
455 exception_exit(prev_state);
459 * This path out is for all the cases where we could not
460 * handle the exception in some way (like allocating a
461 * table or telling userspace about it. We will also end
462 * up here if the kernel has MPX turned off at compile
465 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
466 exception_exit(prev_state);
470 do_general_protection(struct pt_regs *regs, long error_code)
472 struct task_struct *tsk;
473 enum ctx_state prev_state;
475 prev_state = exception_enter();
476 conditional_sti(regs);
478 if (v8086_mode(regs)) {
480 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
485 if (!user_mode(regs)) {
486 if (fixup_exception(regs))
489 tsk->thread.error_code = error_code;
490 tsk->thread.trap_nr = X86_TRAP_GP;
491 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
492 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
493 die("general protection fault", regs, error_code);
497 tsk->thread.error_code = error_code;
498 tsk->thread.trap_nr = X86_TRAP_GP;
500 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
501 printk_ratelimit()) {
502 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
503 tsk->comm, task_pid_nr(tsk),
504 regs->ip, regs->sp, error_code);
505 print_vma_addr(" in ", regs->ip);
509 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
511 exception_exit(prev_state);
513 NOKPROBE_SYMBOL(do_general_protection);
515 /* May run on IST stack. */
516 dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
518 enum ctx_state prev_state;
520 #ifdef CONFIG_DYNAMIC_FTRACE
522 * ftrace must be first, everything else may cause a recursive crash.
523 * See note by declaration of modifying_ftrace_code in ftrace.c
525 if (unlikely(atomic_read(&modifying_ftrace_code)) &&
526 ftrace_int3_handler(regs))
529 if (poke_int3_handler(regs))
532 prev_state = ist_enter(regs);
533 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
534 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
535 SIGTRAP) == NOTIFY_STOP)
537 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
539 #ifdef CONFIG_KPROBES
540 if (kprobe_int3_handler(regs))
544 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
545 SIGTRAP) == NOTIFY_STOP)
549 * Let others (NMI) know that the debug stack is in use
550 * as we may switch to the interrupt stack.
552 debug_stack_usage_inc();
553 conditional_sti_ist(regs);
554 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
555 conditional_cli_ist(regs);
556 debug_stack_usage_dec();
558 ist_exit(regs, prev_state);
560 NOKPROBE_SYMBOL(do_int3);
564 * Help handler running on IST stack to switch off the IST stack if the
565 * interrupted code was in user mode. The actual stack switch is done in
568 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
570 struct pt_regs *regs = task_pt_regs(current);
574 NOKPROBE_SYMBOL(sync_regs);
576 struct bad_iret_stack {
577 void *error_entry_ret;
581 asmlinkage __visible notrace
582 struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
585 * This is called from entry_64.S early in handling a fault
586 * caused by a bad iret to user mode. To handle the fault
587 * correctly, we want move our stack frame to task_pt_regs
588 * and we want to pretend that the exception came from the
591 struct bad_iret_stack *new_stack =
592 container_of(task_pt_regs(current),
593 struct bad_iret_stack, regs);
595 /* Copy the IRET target to the new stack. */
596 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
598 /* Copy the remainder of the stack from the current stack. */
599 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
601 BUG_ON(!user_mode(&new_stack->regs));
604 NOKPROBE_SYMBOL(fixup_bad_iret);
608 * Our handling of the processor debug registers is non-trivial.
609 * We do not clear them on entry and exit from the kernel. Therefore
610 * it is possible to get a watchpoint trap here from inside the kernel.
611 * However, the code in ./ptrace.c has ensured that the user can
612 * only set watchpoints on userspace addresses. Therefore the in-kernel
613 * watchpoint trap can only occur in code which is reading/writing
614 * from user space. Such code must not hold kernel locks (since it
615 * can equally take a page fault), therefore it is safe to call
616 * force_sig_info even though that claims and releases locks.
618 * Code in ./signal.c ensures that the debug control register
619 * is restored before we deliver any signal, and therefore that
620 * user code runs with the correct debug control register even though
623 * Being careful here means that we don't have to be as careful in a
624 * lot of more complicated places (task switching can be a bit lazy
625 * about restoring all the debug state, and ptrace doesn't have to
626 * find every occurrence of the TF bit that could be saved away even
629 * May run on IST stack.
631 dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
633 struct task_struct *tsk = current;
634 enum ctx_state prev_state;
639 prev_state = ist_enter(regs);
641 get_debugreg(dr6, 6);
643 /* Filter out all the reserved bits which are preset to 1 */
644 dr6 &= ~DR6_RESERVED;
647 * If dr6 has no reason to give us about the origin of this trap,
648 * then it's very likely the result of an icebp/int01 trap.
649 * User wants a sigtrap for that.
651 if (!dr6 && user_mode(regs))
654 /* Catch kmemcheck conditions first of all! */
655 if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
658 /* DR6 may or may not be cleared by the CPU */
662 * The processor cleared BTF, so don't mark that we need it set.
664 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
666 /* Store the virtualized DR6 value */
667 tsk->thread.debugreg6 = dr6;
669 #ifdef CONFIG_KPROBES
670 if (kprobe_debug_handler(regs))
674 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
675 SIGTRAP) == NOTIFY_STOP)
679 * Let others (NMI) know that the debug stack is in use
680 * as we may switch to the interrupt stack.
682 debug_stack_usage_inc();
684 /* It's safe to allow irq's after DR6 has been saved */
685 conditional_sti_ist(regs);
687 if (v8086_mode(regs)) {
688 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
690 conditional_cli_ist(regs);
691 debug_stack_usage_dec();
696 * Single-stepping through system calls: ignore any exceptions in
697 * kernel space, but re-enable TF when returning to user mode.
699 * We already checked v86 mode above, so we can check for kernel mode
700 * by just checking the CPL of CS.
702 if ((dr6 & DR_STEP) && !user_mode(regs)) {
703 tsk->thread.debugreg6 &= ~DR_STEP;
704 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
705 regs->flags &= ~X86_EFLAGS_TF;
707 si_code = get_si_code(tsk->thread.debugreg6);
708 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
709 send_sigtrap(tsk, regs, error_code, si_code);
710 conditional_cli_ist(regs);
711 debug_stack_usage_dec();
714 ist_exit(regs, prev_state);
716 NOKPROBE_SYMBOL(do_debug);
719 * Note that we play around with the 'TS' bit in an attempt to get
720 * the correct behaviour even in the presence of the asynchronous
723 static void math_error(struct pt_regs *regs, int error_code, int trapnr)
725 struct task_struct *task = current;
728 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
731 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
733 conditional_sti(regs);
735 if (!user_mode(regs))
737 if (!fixup_exception(regs)) {
738 task->thread.error_code = error_code;
739 task->thread.trap_nr = trapnr;
740 die(str, regs, error_code);
746 * Save the info for the exception handler and clear the error.
749 task->thread.trap_nr = trapnr;
750 task->thread.error_code = error_code;
751 info.si_signo = SIGFPE;
753 info.si_addr = (void __user *)uprobe_get_trap_addr(regs);
754 if (trapnr == X86_TRAP_MF) {
755 unsigned short cwd, swd;
757 * (~cwd & swd) will mask out exceptions that are not set to unmasked
758 * status. 0x3f is the exception bits in these regs, 0x200 is the
759 * C1 reg you need in case of a stack fault, 0x040 is the stack
760 * fault bit. We should only be taking one exception at a time,
761 * so if this combination doesn't produce any single exception,
762 * then we have a bad program that isn't synchronizing its FPU usage
763 * and it will suffer the consequences since we won't be able to
764 * fully reproduce the context of the exception
766 cwd = get_fpu_cwd(task);
767 swd = get_fpu_swd(task);
772 * The SIMD FPU exceptions are handled a little differently, as there
773 * is only a single status/control register. Thus, to determine which
774 * unmasked exception was caught we must mask the exception mask bits
775 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
777 unsigned short mxcsr = get_fpu_mxcsr(task);
778 err = ~(mxcsr >> 7) & mxcsr;
781 if (err & 0x001) { /* Invalid op */
783 * swd & 0x240 == 0x040: Stack Underflow
784 * swd & 0x240 == 0x240: Stack Overflow
785 * User must clear the SF bit (0x40) if set
787 info.si_code = FPE_FLTINV;
788 } else if (err & 0x004) { /* Divide by Zero */
789 info.si_code = FPE_FLTDIV;
790 } else if (err & 0x008) { /* Overflow */
791 info.si_code = FPE_FLTOVF;
792 } else if (err & 0x012) { /* Denormal, Underflow */
793 info.si_code = FPE_FLTUND;
794 } else if (err & 0x020) { /* Precision */
795 info.si_code = FPE_FLTRES;
798 * If we're using IRQ 13, or supposedly even some trap
799 * X86_TRAP_MF implementations, it's possible
800 * we get a spurious trap, which is not an error.
804 force_sig_info(SIGFPE, &info, task);
807 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
809 enum ctx_state prev_state;
811 prev_state = exception_enter();
812 math_error(regs, error_code, X86_TRAP_MF);
813 exception_exit(prev_state);
817 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
819 enum ctx_state prev_state;
821 prev_state = exception_enter();
822 math_error(regs, error_code, X86_TRAP_XF);
823 exception_exit(prev_state);
827 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
829 conditional_sti(regs);
831 /* No need to warn about this any longer. */
832 pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
836 asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void)
840 asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
845 * 'math_state_restore()' saves the current math information in the
846 * old math state array, and gets the new ones from the current task
848 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
849 * Don't touch unless you *really* know how it works.
851 * Must be called with kernel preemption disabled (eg with local
852 * local interrupts as in the case of do_device_not_available).
854 void math_state_restore(void)
856 struct task_struct *tsk = current;
858 if (!tsk_used_math(tsk)) {
861 * does a slab alloc which can sleep
867 do_group_exit(SIGKILL);
873 /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
874 kernel_fpu_disable();
875 __thread_fpu_begin(tsk);
876 if (unlikely(restore_fpu_checking(tsk))) {
877 fpu_reset_state(tsk);
878 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
880 tsk->thread.fpu_counter++;
884 EXPORT_SYMBOL_GPL(math_state_restore);
887 do_device_not_available(struct pt_regs *regs, long error_code)
889 enum ctx_state prev_state;
891 prev_state = exception_enter();
892 BUG_ON(use_eager_fpu());
894 #ifdef CONFIG_MATH_EMULATION
895 if (read_cr0() & X86_CR0_EM) {
896 struct math_emu_info info = { };
898 conditional_sti(regs);
902 exception_exit(prev_state);
906 math_state_restore(); /* interrupts still off */
908 conditional_sti(regs);
910 exception_exit(prev_state);
912 NOKPROBE_SYMBOL(do_device_not_available);
915 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
918 enum ctx_state prev_state;
920 prev_state = exception_enter();
923 info.si_signo = SIGILL;
925 info.si_code = ILL_BADSTK;
927 if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
928 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
929 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
932 exception_exit(prev_state);
936 /* Set of traps needed for early debugging. */
937 void __init early_trap_init(void)
940 * Don't use IST to set DEBUG_STACK as it doesn't work until TSS
941 * is ready in cpu_init() <-- trap_init(). Before trap_init(),
942 * CPU runs at ring 0 so it is impossible to hit an invalid
943 * stack. Using the original stack works well enough at this
944 * early stage. DEBUG_STACK will be equipped after cpu_init() in
947 * We don't need to set trace_idt_table like set_intr_gate(),
948 * since we don't have trace_debug and it will be reset to
949 * 'debug' in trap_init() by set_intr_gate_ist().
951 set_intr_gate_notrace(X86_TRAP_DB, debug);
952 /* int3 can be called from all */
953 set_system_intr_gate(X86_TRAP_BP, &int3);
955 set_intr_gate(X86_TRAP_PF, page_fault);
957 load_idt(&idt_descr);
960 void __init early_trap_pf_init(void)
963 set_intr_gate(X86_TRAP_PF, page_fault);
967 void __init trap_init(void)
972 void __iomem *p = early_ioremap(0x0FFFD9, 4);
974 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
979 set_intr_gate(X86_TRAP_DE, divide_error);
980 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
981 /* int4 can be called from all */
982 set_system_intr_gate(X86_TRAP_OF, &overflow);
983 set_intr_gate(X86_TRAP_BR, bounds);
984 set_intr_gate(X86_TRAP_UD, invalid_op);
985 set_intr_gate(X86_TRAP_NM, device_not_available);
987 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
989 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
991 set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
992 set_intr_gate(X86_TRAP_TS, invalid_TSS);
993 set_intr_gate(X86_TRAP_NP, segment_not_present);
994 set_intr_gate(X86_TRAP_SS, stack_segment);
995 set_intr_gate(X86_TRAP_GP, general_protection);
996 set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
997 set_intr_gate(X86_TRAP_MF, coprocessor_error);
998 set_intr_gate(X86_TRAP_AC, alignment_check);
999 #ifdef CONFIG_X86_MCE
1000 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
1002 set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
1004 /* Reserve all the builtin and the syscall vector: */
1005 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
1006 set_bit(i, used_vectors);
1008 #ifdef CONFIG_IA32_EMULATION
1009 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
1010 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
1013 #ifdef CONFIG_X86_32
1014 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
1015 set_bit(SYSCALL_VECTOR, used_vectors);
1019 * Set the IDT descriptor to a fixed read-only location, so that the
1020 * "sidt" instruction will not leak the location of the kernel, and
1021 * to defend the IDT against arbitrary memory write vulnerabilities.
1022 * It will be reloaded in cpu_init() */
1023 __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
1024 idt_descr.address = fix_to_virt(FIX_RO_IDT);
1027 * Should be a barrier for any external CPU state:
1032 * X86_TRAP_DB and X86_TRAP_BP have been set
1033 * in early_trap_init(). However, ITS works only after
1034 * cpu_init() loads TSS. See comments in early_trap_init().
1036 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
1037 /* int3 can be called from all */
1038 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
1040 x86_init.irqs.trap_init();
1042 #ifdef CONFIG_X86_64
1043 memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
1044 set_nmi_gate(X86_TRAP_DB, &debug);
1045 set_nmi_gate(X86_TRAP_BP, &int3);