These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / x86 / include / asm / thread_info.h
index 606144a..ddb63bd 100644 (file)
  * Without this offset, that can result in a page fault.  (We are
  * careful that, in this case, the value we read doesn't matter.)
  *
- * In vm86 mode, the hardware frame is much longer still, but we neither
- * access the extra members from NMI context, nor do we write such a
- * frame at sp0 at all.
+ * In vm86 mode, the hardware frame is much longer still, so add 16
+ * bytes to make room for the real-mode segments.
  *
  * x86_64 has a fixed-length stack frame.
  */
 #ifdef CONFIG_X86_32
-# define TOP_OF_KERNEL_STACK_PADDING 8
+# ifdef CONFIG_VM86
+#  define TOP_OF_KERNEL_STACK_PADDING 16
+# else
+#  define TOP_OF_KERNEL_STACK_PADDING 8
+# endif
 #else
 # define TOP_OF_KERNEL_STACK_PADDING 0
 #endif
@@ -54,11 +57,9 @@ struct thread_info {
        __u32                   flags;          /* low level flags */
        __u32                   status;         /* thread synchronous flags */
        __u32                   cpu;            /* current CPU */
-       int                     saved_preempt_count;
-       int                     preempt_lazy_count;     /* 0 => lazy preemptable
-                                                        <0  => BUG */
        mm_segment_t            addr_limit;
-       void __user             *sysenter_return;
+       int                     preempt_lazy_count;     /* 0 => lazy preemptable
+                                                         <0 => BUG */
        unsigned int            sig_on_uaccess_error:1;
        unsigned int            uaccess_err:1;  /* uaccess failed */
 };
@@ -68,7 +69,6 @@ struct thread_info {
        .task           = &tsk,                 \
        .flags          = 0,                    \
        .cpu            = 0,                    \
-       .saved_preempt_count = INIT_PREEMPT_COUNT,      \
        .addr_limit     = KERNEL_DS,            \
 }
 
@@ -144,27 +144,11 @@ struct thread_info {
         _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT |     \
         _TIF_NOHZ)
 
-/* work to do in syscall_trace_leave() */
-#define _TIF_WORK_SYSCALL_EXIT \
-       (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP |    \
-        _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
-
-/* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK                                                 \
-       (0x0000FFFF &                                                   \
-        ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|                       \
-          _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU))
-
 /* work to do on any return to user space */
 #define _TIF_ALLWORK_MASK                                              \
        ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT |       \
        _TIF_NOHZ)
 
-/* Only used for 64 bit */
-#define _TIF_DO_NOTIFY_MASK                                            \
-       (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME |                         \
-        _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
-
 /* flags to check in __switch_to() */
 #define _TIF_WORK_CTXSW                                                        \
        (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
@@ -183,8 +167,6 @@ struct thread_info {
  */
 #ifndef __ASSEMBLY__
 
-DECLARE_PER_CPU(unsigned long, kernel_stack);
-
 static inline struct thread_info *current_thread_info(void)
 {
        return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
@@ -203,9 +185,13 @@ static inline unsigned long current_stack_pointer(void)
 
 #else /* !__ASSEMBLY__ */
 
+#ifdef CONFIG_X86_64
+# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
+#endif
+
 /* Load thread_info address into "reg" */
 #define GET_THREAD_INFO(reg) \
-       _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
+       _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
        _ASM_SUB $(THREAD_SIZE),reg ;
 
 /*