Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / arm / include / asm / thread_info.h
1 /*
2  *  arch/arm/include/asm/thread_info.h
3  *
4  *  Copyright (C) 2002 Russell King.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #ifndef __ASM_ARM_THREAD_INFO_H
11 #define __ASM_ARM_THREAD_INFO_H
12
13 #ifdef __KERNEL__
14
15 #include <linux/compiler.h>
16 #include <asm/fpstate.h>
17 #include <asm/page.h>
18
19 #define THREAD_SIZE_ORDER       1
20 #define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
21 #define THREAD_START_SP         (THREAD_SIZE - 8)
22
23 #ifndef __ASSEMBLY__
24
25 struct task_struct;
26
27 #include <asm/types.h>
28 #include <asm/domain.h>
29
30 typedef unsigned long mm_segment_t;
31
32 struct cpu_context_save {
33         __u32   r4;
34         __u32   r5;
35         __u32   r6;
36         __u32   r7;
37         __u32   r8;
38         __u32   r9;
39         __u32   sl;
40         __u32   fp;
41         __u32   sp;
42         __u32   pc;
43         __u32   extra[2];               /* Xscale 'acc' register, etc */
44 };
45
46 /*
47  * low level task data that entry.S needs immediate access to.
48  * __switch_to() assumes cpu_context follows immediately after cpu_domain.
49  */
50 struct thread_info {
51         unsigned long           flags;          /* low level flags */
52         int                     preempt_count;  /* 0 => preemptable, <0 => bug */
53         int                     preempt_lazy_count; /* 0 => preemptable, <0 => bug */
54         mm_segment_t            addr_limit;     /* address limit */
55         struct task_struct      *task;          /* main task structure */
56         __u32                   cpu;            /* cpu */
57         __u32                   cpu_domain;     /* cpu domain */
58         struct cpu_context_save cpu_context;    /* cpu context */
59         __u32                   syscall;        /* syscall number */
60         __u8                    used_cp[16];    /* thread used copro */
61         unsigned long           tp_value[2];    /* TLS registers */
62 #ifdef CONFIG_CRUNCH
63         struct crunch_state     crunchstate;
64 #endif
65         union fp_state          fpstate __attribute__((aligned(8)));
66         union vfp_state         vfpstate;
67 #ifdef CONFIG_ARM_THUMBEE
68         unsigned long           thumbee_state;  /* ThumbEE Handler Base register */
69 #endif
70 };
71
72 #define INIT_THREAD_INFO(tsk)                                           \
73 {                                                                       \
74         .task           = &tsk,                                         \
75         .flags          = 0,                                            \
76         .preempt_count  = INIT_PREEMPT_COUNT,                           \
77         .addr_limit     = KERNEL_DS,                                    \
78         .cpu_domain     = domain_val(DOMAIN_USER, DOMAIN_MANAGER) |     \
79                           domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |   \
80                           domain_val(DOMAIN_IO, DOMAIN_CLIENT),         \
81 }
82
83 #define init_thread_info        (init_thread_union.thread_info)
84 #define init_stack              (init_thread_union.stack)
85
86 /*
87  * how to get the current stack pointer in C
88  */
89 register unsigned long current_stack_pointer asm ("sp");
90
91 /*
92  * how to get the thread information struct from C
93  */
94 static inline struct thread_info *current_thread_info(void) __attribute_const__;
95
96 static inline struct thread_info *current_thread_info(void)
97 {
98         return (struct thread_info *)
99                 (current_stack_pointer & ~(THREAD_SIZE - 1));
100 }
101
102 #define thread_saved_pc(tsk)    \
103         ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
104 #define thread_saved_sp(tsk)    \
105         ((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
106
107 #ifndef CONFIG_THUMB2_KERNEL
108 #define thread_saved_fp(tsk)    \
109         ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
110 #else
111 #define thread_saved_fp(tsk)    \
112         ((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
113 #endif
114
115 extern void crunch_task_disable(struct thread_info *);
116 extern void crunch_task_copy(struct thread_info *, void *);
117 extern void crunch_task_restore(struct thread_info *, void *);
118 extern void crunch_task_release(struct thread_info *);
119
120 extern void iwmmxt_task_disable(struct thread_info *);
121 extern void iwmmxt_task_copy(struct thread_info *, void *);
122 extern void iwmmxt_task_restore(struct thread_info *, void *);
123 extern void iwmmxt_task_release(struct thread_info *);
124 extern void iwmmxt_task_switch(struct thread_info *);
125
126 extern void vfp_sync_hwstate(struct thread_info *);
127 extern void vfp_flush_hwstate(struct thread_info *);
128
129 struct user_vfp;
130 struct user_vfp_exc;
131
132 extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
133                                            struct user_vfp_exc __user *);
134 extern int vfp_restore_user_hwstate(struct user_vfp __user *,
135                                     struct user_vfp_exc __user *);
136 #endif
137
138 /*
139  * thread information flags:
140  *  TIF_SYSCALL_TRACE   - syscall trace active
141  *  TIF_SYSCAL_AUDIT    - syscall auditing active
142  *  TIF_SIGPENDING      - signal pending
143  *  TIF_NEED_RESCHED    - rescheduling necessary
144  *  TIF_NOTIFY_RESUME   - callback before returning to user
145  *  TIF_USEDFPU         - FPU was used by this task this quantum (SMP)
146  *  TIF_POLLING_NRFLAG  - true if poll_idle() is polling TIF_NEED_RESCHED
147  */
148 #define TIF_SIGPENDING          0
149 #define TIF_NEED_RESCHED        1
150 #define TIF_NOTIFY_RESUME       2       /* callback before returning to user */
151 #define TIF_NEED_RESCHED_LAZY   3
152 #define TIF_UPROBE              7
153 #define TIF_SYSCALL_TRACE       8
154 #define TIF_SYSCALL_AUDIT       9
155 #define TIF_SYSCALL_TRACEPOINT  10
156 #define TIF_SECCOMP             11      /* seccomp syscall filtering active */
157 #define TIF_NOHZ                12      /* in adaptive nohz mode */
158 #define TIF_USING_IWMMXT        17
159 #define TIF_MEMDIE              18      /* is terminating due to OOM killer */
160 #define TIF_RESTORE_SIGMASK     20
161
162 #define _TIF_SIGPENDING         (1 << TIF_SIGPENDING)
163 #define _TIF_NEED_RESCHED       (1 << TIF_NEED_RESCHED)
164 #define _TIF_NOTIFY_RESUME      (1 << TIF_NOTIFY_RESUME)
165 #define _TIF_NEED_RESCHED_LAZY  (1 << TIF_NEED_RESCHED_LAZY)
166 #define _TIF_UPROBE             (1 << TIF_UPROBE)
167 #define _TIF_SYSCALL_TRACE      (1 << TIF_SYSCALL_TRACE)
168 #define _TIF_SYSCALL_AUDIT      (1 << TIF_SYSCALL_AUDIT)
169 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
170 #define _TIF_SECCOMP            (1 << TIF_SECCOMP)
171 #define _TIF_USING_IWMMXT       (1 << TIF_USING_IWMMXT)
172
173 /* Checks for any syscall work in entry-common.S */
174 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
175                            _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
176
177 /*
178  * Change these and you break ASM code in entry-common.S
179  */
180 #define _TIF_WORK_MASK          (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
181                                  _TIF_NOTIFY_RESUME | _TIF_UPROBE)
182
183 #endif /* __KERNEL__ */
184 #endif /* __ASM_ARM_THREAD_INFO_H */