2 * Copyright (C) 1991,1992 Linus Torvalds
4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
6 * Stack layout while running C code:
7 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
10 * ptrace.c and ptrace.h
22 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
31 #include <linux/linkage.h>
32 #include <linux/err.h>
33 #include <asm/thread_info.h>
34 #include <asm/irqflags.h>
35 #include <asm/errno.h>
36 #include <asm/segment.h>
38 #include <asm/page_types.h>
39 #include <asm/percpu.h>
40 #include <asm/processor-flags.h>
41 #include <asm/ftrace.h>
42 #include <asm/irq_vectors.h>
43 #include <asm/cpufeature.h>
44 #include <asm/alternative-asm.h>
48 .section .entry.text, "ax"
51 * We use macros for low-level operations which need to be overridden
52 * for paravirtualization. The following will never clobber any registers:
53 * INTERRUPT_RETURN (aka. "iret")
54 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
55 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
57 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
58 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
59 * Allowing a register to be clobbered can shrink the paravirt replacement
60 * enough to patch inline, increasing performance.
64 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
66 # define preempt_stop(clobbers)
67 # define resume_kernel restore_all
70 .macro TRACE_IRQS_IRET
71 #ifdef CONFIG_TRACE_IRQFLAGS
72 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
80 * User gs save/restore
82 * %gs is used for userland TLS and kernel only uses it for stack
83 * canary which is required to be at %gs:20 by gcc. Read the comment
84 * at the top of stackprotector.h for more info.
86 * Local labels 98 and 99 are used.
88 #ifdef CONFIG_X86_32_LAZY_GS
90 /* unfortunately push/pop can't be no-op */
95 addl $(4 + \pop), %esp
100 /* all the rest are no-op */
107 .macro REG_TO_PTGS reg
109 .macro SET_KERNEL_GS reg
112 #else /* CONFIG_X86_32_LAZY_GS */
125 .pushsection .fixup, "ax"
129 _ASM_EXTABLE(98b, 99b)
133 98: mov PT_GS(%esp), %gs
136 .pushsection .fixup, "ax"
137 99: movl $0, PT_GS(%esp)
140 _ASM_EXTABLE(98b, 99b)
146 .macro REG_TO_PTGS reg
147 movl \reg, PT_GS(%esp)
149 .macro SET_KERNEL_GS reg
150 movl $(__KERNEL_STACK_CANARY), \reg
154 #endif /* CONFIG_X86_32_LAZY_GS */
156 .macro SAVE_ALL pt_regs_ax=%eax
169 movl $(__USER_DS), %edx
172 movl $(__KERNEL_PERCPU), %edx
177 .macro RESTORE_INT_REGS
187 .macro RESTORE_REGS pop=0
193 .pushsection .fixup, "ax"
210 GET_THREAD_INFO(%ebp)
212 pushl $0x0202 # Reset kernel eflags
215 /* When we fork, we trace the syscall return in the child, too. */
217 call syscall_return_slowpath
221 ENTRY(ret_from_kernel_thread)
224 GET_THREAD_INFO(%ebp)
226 pushl $0x0202 # Reset kernel eflags
228 movl PT_EBP(%esp), %eax
230 movl $0, PT_EAX(%esp)
233 * Kernel threads return to userspace as if returning from a syscall.
234 * We should check whether anything actually uses this path and, if so,
235 * consider switching it over to ret_from_fork.
238 call syscall_return_slowpath
240 ENDPROC(ret_from_kernel_thread)
243 * Return to user mode is not as complex as all this looks,
244 * but we want the default path for a system call return to
245 * go as quickly as possible which is why some of this is
246 * less clear than it otherwise should be.
249 # userspace resumption stub bypassing syscall exit tracing
252 preempt_stop(CLBR_ANY)
254 GET_THREAD_INFO(%ebp)
256 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
257 movb PT_CS(%esp), %al
258 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
261 * We can be coming here from child spawned by kernel_thread().
263 movl PT_CS(%esp), %eax
264 andl $SEGMENT_RPL_MASK, %eax
267 jb resume_kernel # not returning to v8086 or userspace
269 ENTRY(resume_userspace)
270 DISABLE_INTERRUPTS(CLBR_ANY)
273 call prepare_exit_to_usermode
275 END(ret_from_exception)
277 #ifdef CONFIG_PREEMPT
279 DISABLE_INTERRUPTS(CLBR_ANY)
281 # preempt count == 0 + NEED_RS set?
282 cmpl $0, PER_CPU_VAR(__preempt_count)
283 #ifndef CONFIG_PREEMPT_LAZY
288 # atleast preempt count == 0 ?
289 cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
292 cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
295 testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp)
299 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
301 call preempt_schedule_irq
306 # SYSENTER call handler stub
307 ENTRY(entry_SYSENTER_32)
308 movl TSS_sysenter_sp0(%esp), %esp
310 pushl $__USER_DS /* pt_regs->ss */
311 pushl %ebp /* pt_regs->sp (stashed in bp) */
312 pushfl /* pt_regs->flags (except IF = 0) */
313 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
314 pushl $__USER_CS /* pt_regs->cs */
315 pushl $0 /* pt_regs->ip = 0 (placeholder) */
316 pushl %eax /* pt_regs->orig_ax */
317 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
320 * User mode is traced as though IRQs are on, and SYSENTER
326 call do_fast_syscall_32
327 /* XEN PV guests always use IRET path */
328 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
329 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
331 /* Opportunistic SYSEXIT */
332 TRACE_IRQS_ON /* User mode traces as IRQs on. */
333 movl PT_EIP(%esp), %edx /* pt_regs->ip */
334 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
335 1: mov PT_FS(%esp), %fs
337 popl %ebx /* pt_regs->bx */
338 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
339 popl %esi /* pt_regs->si */
340 popl %edi /* pt_regs->di */
341 popl %ebp /* pt_regs->bp */
342 popl %eax /* pt_regs->ax */
345 * Return back to the vDSO, which will pop ecx and edx.
346 * Don't bother with DS and ES (they already contain __USER_DS).
348 ENABLE_INTERRUPTS_SYSEXIT
350 .pushsection .fixup, "ax"
351 2: movl $0, PT_FS(%esp)
356 ENDPROC(entry_SYSENTER_32)
358 # system call handler stub
359 ENTRY(entry_INT80_32)
361 pushl %eax /* pt_regs->orig_ax */
362 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
365 * User mode is traced as though IRQs are on. Unlike the 64-bit
366 * case, INT80 is a trap gate on 32-bit kernels, so interrupts
367 * are already on (unless user code is messing around with iopl).
371 call do_syscall_32_irqs_on
377 #ifdef CONFIG_X86_ESPFIX32
378 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
380 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
381 * are returning to the kernel.
382 * See comments in process.c:copy_thread() for details.
384 movb PT_OLDSS(%esp), %ah
385 movb PT_CS(%esp), %al
386 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
387 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
388 je ldt_ss # returning to user-space with LDT SS
391 RESTORE_REGS 4 # skip orig_eax/error_code
394 .section .fixup, "ax"
396 pushl $0 # no error code
400 _ASM_EXTABLE(irq_return, iret_exc)
402 #ifdef CONFIG_X86_ESPFIX32
404 #ifdef CONFIG_PARAVIRT
406 * The kernel can't run on a non-flat stack if paravirt mode
407 * is active. Rather than try to fixup the high bits of
408 * ESP, bypass this code entirely. This may break DOSemu
409 * and/or Wine support in a paravirt VM, although the option
410 * is still available to implement the setting of the high
411 * 16-bits in the INTERRUPT_RETURN paravirt-op.
413 cmpl $0, pv_info+PARAVIRT_enabled
418 * Setup and switch to ESPFIX stack
420 * We're returning to userspace with a 16 bit stack. The CPU will not
421 * restore the high word of ESP for us on executing iret... This is an
422 * "official" bug of all the x86-compatible CPUs, which we can work
423 * around to make dosemu and wine happy. We do this by preloading the
424 * high word of ESP with the high word of the userspace ESP while
425 * compensating for the offset by changing to the ESPFIX segment with
426 * a base address that matches for the difference.
428 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
429 mov %esp, %edx /* load kernel esp */
430 mov PT_OLDESP(%esp), %eax /* load userspace esp */
431 mov %dx, %ax /* eax: new kernel esp */
432 sub %eax, %edx /* offset (low word is 0) */
434 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
435 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
437 pushl %eax /* new kernel esp */
439 * Disable interrupts, but do not irqtrace this section: we
440 * will soon execute iret and the tracer was already set to
441 * the irqstate after the IRET:
443 DISABLE_INTERRUPTS(CLBR_EAX)
444 lss (%esp), %esp /* switch to espfix segment */
447 ENDPROC(entry_INT80_32)
449 .macro FIXUP_ESPFIX_STACK
451 * Switch back for ESPFIX stack to the normal zerobased stack
453 * We can't call C functions using the ESPFIX stack. This code reads
454 * the high word of the segment base from the GDT and swiches to the
455 * normal stack and adjusts ESP with the matching offset.
457 #ifdef CONFIG_X86_ESPFIX32
458 /* fixup the stack */
459 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
460 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
462 addl %esp, %eax /* the adjusted stack pointer */
465 lss (%esp), %esp /* switch to the normal stack segment */
468 .macro UNWIND_ESPFIX_STACK
469 #ifdef CONFIG_X86_ESPFIX32
471 /* see if on espfix stack */
472 cmpw $__ESPFIX_SS, %ax
474 movl $__KERNEL_DS, %eax
477 /* switch to normal stack */
484 * Build the entry stubs with some assembler magic.
485 * We pack 1 stub into every 8-byte block.
488 ENTRY(irq_entries_start)
489 vector=FIRST_EXTERNAL_VECTOR
490 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
491 pushl $(~vector+0x80) /* Note: always in signed byte range */
496 END(irq_entries_start)
499 * the CPU automatically disables interrupts when executing an IRQ vector,
500 * so IRQ-flags tracing has to follow that:
502 .p2align CONFIG_X86_L1_CACHE_SHIFT
505 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
511 ENDPROC(common_interrupt)
513 #define BUILD_INTERRUPT3(name, nr, fn) \
525 #ifdef CONFIG_TRACING
526 # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
528 # define TRACE_BUILD_INTERRUPT(name, nr)
531 #define BUILD_INTERRUPT(name, nr) \
532 BUILD_INTERRUPT3(name, nr, smp_##name); \
533 TRACE_BUILD_INTERRUPT(name, nr)
535 /* The include is where all of the SMP etc. interrupts come from */
536 #include <asm/entry_arch.h>
538 ENTRY(coprocessor_error)
541 pushl $do_coprocessor_error
543 END(coprocessor_error)
545 ENTRY(simd_coprocessor_error)
548 #ifdef CONFIG_X86_INVD_BUG
549 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
550 ALTERNATIVE "pushl $do_general_protection", \
551 "pushl $do_simd_coprocessor_error", \
554 pushl $do_simd_coprocessor_error
557 END(simd_coprocessor_error)
559 ENTRY(device_not_available)
561 pushl $-1 # mark this as an int
562 pushl $do_device_not_available
564 END(device_not_available)
566 #ifdef CONFIG_PARAVIRT
569 _ASM_EXTABLE(native_iret, iret_exc)
572 ENTRY(native_irq_enable_sysexit)
575 END(native_irq_enable_sysexit)
599 ENTRY(coprocessor_segment_overrun)
602 pushl $do_coprocessor_segment_overrun
604 END(coprocessor_segment_overrun)
608 pushl $do_invalid_TSS
612 ENTRY(segment_not_present)
614 pushl $do_segment_not_present
616 END(segment_not_present)
620 pushl $do_stack_segment
624 ENTRY(alignment_check)
626 pushl $do_alignment_check
632 pushl $0 # no error code
633 pushl $do_divide_error
637 #ifdef CONFIG_X86_MCE
641 pushl machine_check_vector
646 ENTRY(spurious_interrupt_bug)
649 pushl $do_spurious_interrupt_bug
651 END(spurious_interrupt_bug)
655 * Xen doesn't set %esp to be precisely what the normal SYSENTER
656 * entry point expects, so fix it up before using the normal path.
658 ENTRY(xen_sysenter_target)
659 addl $5*4, %esp /* remove xen-provided frame */
660 jmp sysenter_past_esp
662 ENTRY(xen_hypervisor_callback)
663 pushl $-1 /* orig_ax = -1 => not a system call */
668 * Check to see if we got the event in the critical
669 * region in xen_iret_direct, after we've reenabled
670 * events and checked for pending events. This simulates
671 * iret instruction's behaviour where it delivers a
672 * pending interrupt when enabling interrupts:
674 movl PT_EIP(%esp), %eax
675 cmpl $xen_iret_start_crit, %eax
677 cmpl $xen_iret_end_crit, %eax
680 jmp xen_iret_crit_fixup
684 call xen_evtchn_do_upcall
685 #ifndef CONFIG_PREEMPT
686 call xen_maybe_preempt_hcall
689 ENDPROC(xen_hypervisor_callback)
692 * Hypervisor uses this for application faults while it executes.
693 * We get here for two reasons:
694 * 1. Fault while reloading DS, ES, FS or GS
695 * 2. Fault while executing IRET
696 * Category 1 we fix up by reattempting the load, and zeroing the segment
697 * register if the load fails.
698 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
699 * normal Linux return path in this case because if we use the IRET hypercall
700 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
701 * We distinguish between categories by maintaining a status value in EAX.
703 ENTRY(xen_failsafe_callback)
710 /* EAX == 0 => Category 1 (Bad segment)
711 EAX != 0 => Category 2 (Bad IRET) */
717 5: pushl $-1 /* orig_ax = -1 => not a system call */
719 jmp ret_from_exception
721 .section .fixup, "ax"
739 ENDPROC(xen_failsafe_callback)
741 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
742 xen_evtchn_do_upcall)
744 #endif /* CONFIG_XEN */
746 #if IS_ENABLED(CONFIG_HYPERV)
748 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
749 hyperv_vector_handler)
751 #endif /* CONFIG_HYPERV */
753 #ifdef CONFIG_FUNCTION_TRACER
754 #ifdef CONFIG_DYNAMIC_FTRACE
764 pushl $0 /* Pass NULL as regs pointer */
767 movl function_trace_op, %ecx
768 subl $MCOUNT_INSN_SIZE, %eax
774 addl $4, %esp /* skip NULL pointer */
779 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
780 .globl ftrace_graph_call
785 /* This is weak to keep gas from relaxing the jumps */
790 ENTRY(ftrace_regs_caller)
791 pushf /* push flags before compare (in cs location) */
794 * i386 does not save SS and ESP when coming from kernel.
795 * Instead, to get sp, ®s->sp is used (see ptrace.h).
796 * Unfortunately, that means eflags must be at the same location
797 * as the current return ip is. We move the return ip into the
798 * ip location, and move flags into the return ip location.
800 pushl 4(%esp) /* save return ip into ip slot */
802 pushl $0 /* Load 0 into orig_ax */
815 movl 13*4(%esp), %eax /* Get the saved flags */
816 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
817 /* clobbering return ip */
818 movl $__KERNEL_CS, 13*4(%esp)
820 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
821 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
822 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
823 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
824 pushl %esp /* Save pt_regs as 4th parameter */
826 GLOBAL(ftrace_regs_call)
829 addl $4, %esp /* Skip pt_regs */
830 movl 14*4(%esp), %eax /* Move flags back into cs */
831 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
832 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
833 movl %eax, 14*4(%esp) /* Put return ip back for ret */
846 addl $8, %esp /* Skip orig_ax and ip */
847 popf /* Pop flags at end (no addl to corrupt flags) */
852 #else /* ! CONFIG_DYNAMIC_FTRACE */
855 cmpl $__PAGE_OFFSET, %esp
856 jb ftrace_stub /* Paging not enabled yet? */
858 cmpl $ftrace_stub, ftrace_trace_function
860 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
861 cmpl $ftrace_stub, ftrace_graph_return
862 jnz ftrace_graph_caller
864 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
865 jnz ftrace_graph_caller
871 /* taken from glibc */
878 subl $MCOUNT_INSN_SIZE, %eax
880 call *ftrace_trace_function
887 #endif /* CONFIG_DYNAMIC_FTRACE */
888 #endif /* CONFIG_FUNCTION_TRACER */
890 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
891 ENTRY(ftrace_graph_caller)
898 subl $MCOUNT_INSN_SIZE, %eax
899 call prepare_ftrace_return
904 END(ftrace_graph_caller)
906 .globl return_to_handler
911 call ftrace_return_to_handler
918 #ifdef CONFIG_TRACING
919 ENTRY(trace_page_fault)
921 pushl $trace_do_page_fault
923 END(trace_page_fault)
931 /* the function address is in %gs's slot on the stack */
943 movl $(__KERNEL_PERCPU), %ecx
947 movl PT_GS(%esp), %edi # get the function address
948 movl PT_ORIG_EAX(%esp), %edx # get the error code
949 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
952 movl $(__USER_DS), %ecx
956 movl %esp, %eax # pt_regs pointer
958 jmp ret_from_exception
962 * Debug traps and NMI can happen at the one SYSENTER instruction
963 * that sets up the real kernel stack. Check here, since we can't
964 * allow the wrong stack to be used.
966 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
967 * already pushed 3 words if it hits on the sysenter instruction:
968 * eflags, cs and eip.
970 * We just load the right stack, and push the three (known) values
971 * by hand onto the new stack - while updating the return eip past
972 * the instruction that would have done it for sysenter.
974 .macro FIX_STACK offset ok label
975 cmpw $__KERNEL_CS, 4(%esp)
978 movl TSS_sysenter_sp0 + \offset(%esp), %esp
981 pushl $sysenter_past_esp
986 cmpl $entry_SYSENTER_32, (%esp)
987 jne debug_stack_correct
988 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
990 pushl $-1 # mark this as an int
993 xorl %edx, %edx # error code 0
994 movl %esp, %eax # pt_regs pointer
996 jmp ret_from_exception
1000 * NMI is doubly nasty. It can happen _while_ we're handling
1001 * a debug fault, and the debug fault hasn't yet been able to
1002 * clear up the stack. So we first check whether we got an
1003 * NMI on the sysenter entry path, but after that we need to
1004 * check whether we got an NMI on the debug path where the debug
1005 * fault happened on the sysenter path.
1009 #ifdef CONFIG_X86_ESPFIX32
1012 cmpw $__ESPFIX_SS, %ax
1016 cmpl $entry_SYSENTER_32, (%esp)
1021 * Do not access memory above the end of our stack page,
1022 * it might not exist.
1024 andl $(THREAD_SIZE-1), %eax
1025 cmpl $(THREAD_SIZE-20), %eax
1027 jae nmi_stack_correct
1028 cmpl $entry_SYSENTER_32, 12(%esp)
1029 je nmi_debug_stack_check
1033 xorl %edx, %edx # zero error code
1034 movl %esp, %eax # pt_regs pointer
1036 jmp restore_all_notrace
1039 FIX_STACK 12, nmi_stack_correct, 1
1040 jmp nmi_stack_correct
1042 nmi_debug_stack_check:
1043 cmpw $__KERNEL_CS, 16(%esp)
1044 jne nmi_stack_correct
1046 jb nmi_stack_correct
1047 cmpl $debug_esp_fix_insn, (%esp)
1048 ja nmi_stack_correct
1049 FIX_STACK 24, nmi_stack_correct, 1
1050 jmp nmi_stack_correct
1052 #ifdef CONFIG_X86_ESPFIX32
1055 * create the pointer to lss back
1060 /* copy the iret frame of 12 bytes */
1066 FIXUP_ESPFIX_STACK # %eax == %esp
1067 xorl %edx, %edx # zero error code
1070 lss 12+4(%esp), %esp # back to espfix stack
1077 pushl $-1 # mark this as an int
1080 xorl %edx, %edx # zero error code
1081 movl %esp, %eax # pt_regs pointer
1083 jmp ret_from_exception
1086 ENTRY(general_protection)
1087 pushl $do_general_protection
1089 END(general_protection)
1091 #ifdef CONFIG_KVM_GUEST
1092 ENTRY(async_page_fault)
1094 pushl $do_async_page_fault
1096 END(async_page_fault)