These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / x86 / entry / thunk_64.S
1 /*
2  * Save registers before calling assembly functions. This avoids
3  * disturbance of register allocation in some inline assembly constructs.
4  * Copyright 2001,2002 by Andi Kleen, SuSE Labs.
5  * Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc.
6  * Subject to the GNU public license, v.2. No warranty of any kind.
7  */
8 #include <linux/linkage.h>
9 #include "calling.h"
10 #include <asm/asm.h>
11
12         /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
13         .macro THUNK name, func, put_ret_addr_in_rdi=0
14         .globl \name
15 \name:
16
17         /* this one pushes 9 elems, the next one would be %rIP */
18         pushq %rdi
19         pushq %rsi
20         pushq %rdx
21         pushq %rcx
22         pushq %rax
23         pushq %r8
24         pushq %r9
25         pushq %r10
26         pushq %r11
27
28         .if \put_ret_addr_in_rdi
29         /* 9*8(%rsp) is return addr on stack */
30         movq 9*8(%rsp), %rdi
31         .endif
32
33         call \func
34         jmp  restore
35         _ASM_NOKPROBE(\name)
36         .endm
37
38 #ifdef CONFIG_TRACE_IRQFLAGS
39         THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
40         THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
41 #endif
42
43 #ifdef CONFIG_DEBUG_LOCK_ALLOC
44         THUNK lockdep_sys_exit_thunk,lockdep_sys_exit
45 #endif
46
47 #ifdef CONFIG_PREEMPT
48         THUNK ___preempt_schedule, preempt_schedule
49         THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
50 #endif
51
52 #if defined(CONFIG_TRACE_IRQFLAGS) \
53  || defined(CONFIG_DEBUG_LOCK_ALLOC) \
54  || defined(CONFIG_PREEMPT)
55 restore:
56         popq %r11
57         popq %r10
58         popq %r9
59         popq %r8
60         popq %rax
61         popq %rcx
62         popq %rdx
63         popq %rsi
64         popq %rdi
65         ret
66         _ASM_NOKPROBE(restore)
67 #endif