These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm / include / asm / thread_info.h
1 /*
2  *  arch/arm/include/asm/thread_info.h
3  *
4  *  Copyright (C) 2002 Russell King.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #ifndef __ASM_ARM_THREAD_INFO_H
11 #define __ASM_ARM_THREAD_INFO_H
12
13 #ifdef __KERNEL__
14
15 #include <linux/compiler.h>
16 #include <asm/fpstate.h>
17 #include <asm/page.h>
18
19 #define THREAD_SIZE_ORDER       1
20 #define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
21 #define THREAD_START_SP         (THREAD_SIZE - 8)
22
23 #ifndef __ASSEMBLY__
24
25 struct task_struct;
26
27 #include <asm/types.h>
28
29 typedef unsigned long mm_segment_t;
30
31 struct cpu_context_save {
32         __u32   r4;
33         __u32   r5;
34         __u32   r6;
35         __u32   r7;
36         __u32   r8;
37         __u32   r9;
38         __u32   sl;
39         __u32   fp;
40         __u32   sp;
41         __u32   pc;
42         __u32   extra[2];               /* Xscale 'acc' register, etc */
43 };
44
45 /*
46  * low level task data that entry.S needs immediate access to.
47  * __switch_to() assumes cpu_context follows immediately after cpu_domain.
48  */
49 struct thread_info {
50         unsigned long           flags;          /* low level flags */
51         int                     preempt_count;  /* 0 => preemptable, <0 => bug */
52         int                     preempt_lazy_count; /* 0 => preemptable, <0 => bug */
53         mm_segment_t            addr_limit;     /* address limit */
54         struct task_struct      *task;          /* main task structure */
55         __u32                   cpu;            /* cpu */
56         __u32                   cpu_domain;     /* cpu domain */
57         struct cpu_context_save cpu_context;    /* cpu context */
58         __u32                   syscall;        /* syscall number */
59         __u8                    used_cp[16];    /* thread used copro */
60         unsigned long           tp_value[2];    /* TLS registers */
61 #ifdef CONFIG_CRUNCH
62         struct crunch_state     crunchstate;
63 #endif
64         union fp_state          fpstate __attribute__((aligned(8)));
65         union vfp_state         vfpstate;
66 #ifdef CONFIG_ARM_THUMBEE
67         unsigned long           thumbee_state;  /* ThumbEE Handler Base register */
68 #endif
69 };
70
71 #define INIT_THREAD_INFO(tsk)                                           \
72 {                                                                       \
73         .task           = &tsk,                                         \
74         .flags          = 0,                                            \
75         .preempt_count  = INIT_PREEMPT_COUNT,                           \
76         .addr_limit     = KERNEL_DS,                                    \
77 }
78
79 #define init_thread_info        (init_thread_union.thread_info)
80 #define init_stack              (init_thread_union.stack)
81
82 /*
83  * how to get the current stack pointer in C
84  */
85 register unsigned long current_stack_pointer asm ("sp");
86
87 /*
88  * how to get the thread information struct from C
89  */
90 static inline struct thread_info *current_thread_info(void) __attribute_const__;
91
92 static inline struct thread_info *current_thread_info(void)
93 {
94         return (struct thread_info *)
95                 (current_stack_pointer & ~(THREAD_SIZE - 1));
96 }
97
98 #define thread_saved_pc(tsk)    \
99         ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
100 #define thread_saved_sp(tsk)    \
101         ((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
102
103 #ifndef CONFIG_THUMB2_KERNEL
104 #define thread_saved_fp(tsk)    \
105         ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
106 #else
107 #define thread_saved_fp(tsk)    \
108         ((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
109 #endif
110
111 extern void crunch_task_disable(struct thread_info *);
112 extern void crunch_task_copy(struct thread_info *, void *);
113 extern void crunch_task_restore(struct thread_info *, void *);
114 extern void crunch_task_release(struct thread_info *);
115
116 extern void iwmmxt_task_disable(struct thread_info *);
117 extern void iwmmxt_task_copy(struct thread_info *, void *);
118 extern void iwmmxt_task_restore(struct thread_info *, void *);
119 extern void iwmmxt_task_release(struct thread_info *);
120 extern void iwmmxt_task_switch(struct thread_info *);
121
122 extern void vfp_sync_hwstate(struct thread_info *);
123 extern void vfp_flush_hwstate(struct thread_info *);
124
125 struct user_vfp;
126 struct user_vfp_exc;
127
128 extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
129                                            struct user_vfp_exc __user *);
130 extern int vfp_restore_user_hwstate(struct user_vfp __user *,
131                                     struct user_vfp_exc __user *);
132 #endif
133
134 /*
135  * thread information flags:
136  *  TIF_USEDFPU         - FPU was used by this task this quantum (SMP)
137  *  TIF_POLLING_NRFLAG  - true if poll_idle() is polling TIF_NEED_RESCHED
138  */
139 #define TIF_SIGPENDING          0       /* signal pending */
140 #define TIF_NEED_RESCHED        1       /* rescheduling necessary */
141 #define TIF_NOTIFY_RESUME       2       /* callback before returning to user */
142 #define TIF_UPROBE              3       /* breakpointed or singlestepping */
143 #define TIF_SYSCALL_TRACE       4       /* syscall trace active */
144 #define TIF_SYSCALL_AUDIT       5       /* syscall auditing active */
145 #define TIF_SYSCALL_TRACEPOINT  6       /* syscall tracepoint instrumentation */
146 #define TIF_SECCOMP             8       /* seccomp syscall filtering active */
147 #define TIF_NEED_RESCHED_LAZY   7
148
149 #define TIF_NOHZ                12      /* in adaptive nohz mode */
150 #define TIF_USING_IWMMXT        17
151 #define TIF_MEMDIE              18      /* is terminating due to OOM killer */
152 #define TIF_RESTORE_SIGMASK     20
153
154 #define _TIF_SIGPENDING         (1 << TIF_SIGPENDING)
155 #define _TIF_NEED_RESCHED       (1 << TIF_NEED_RESCHED)
156 #define _TIF_NOTIFY_RESUME      (1 << TIF_NOTIFY_RESUME)
157 #define _TIF_NEED_RESCHED_LAZY  (1 << TIF_NEED_RESCHED_LAZY)
158 #define _TIF_UPROBE             (1 << TIF_UPROBE)
159 #define _TIF_SYSCALL_TRACE      (1 << TIF_SYSCALL_TRACE)
160 #define _TIF_SYSCALL_AUDIT      (1 << TIF_SYSCALL_AUDIT)
161 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
162 #define _TIF_SECCOMP            (1 << TIF_SECCOMP)
163 #define _TIF_USING_IWMMXT       (1 << TIF_USING_IWMMXT)
164
165 /* Checks for any syscall work in entry-common.S */
166 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
167                            _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
168
169 /*
170  * Change these and you break ASM code in entry-common.S
171  */
172 #define _TIF_WORK_MASK          (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
173                                  _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
174                                  _TIF_NEED_RESCHED_LAZY)
175
176 #endif /* __KERNEL__ */
177 #endif /* __ASM_ARM_THREAD_INFO_H */