Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / x86 / include / asm / tlbflush.h
1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
3
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6
7 #include <asm/processor.h>
8 #include <asm/special_insns.h>
9
10 #ifdef CONFIG_PARAVIRT
11 #include <asm/paravirt.h>
12 #else
13 #define __flush_tlb() __native_flush_tlb()
14 #define __flush_tlb_global() __native_flush_tlb_global()
15 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
16 #endif
17
18 struct tlb_state {
19 #ifdef CONFIG_SMP
20         struct mm_struct *active_mm;
21         int state;
22 #endif
23
24         /*
25          * Access to this CR4 shadow and to H/W CR4 is protected by
26          * disabling interrupts when modifying either one.
27          */
28         unsigned long cr4;
29 };
30 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
31
32 /* Initialize cr4 shadow for this CPU. */
33 static inline void cr4_init_shadow(void)
34 {
35         this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
36 }
37
38 /* Set in this cpu's CR4. */
39 static inline void cr4_set_bits(unsigned long mask)
40 {
41         unsigned long cr4;
42
43         cr4 = this_cpu_read(cpu_tlbstate.cr4);
44         if ((cr4 | mask) != cr4) {
45                 cr4 |= mask;
46                 this_cpu_write(cpu_tlbstate.cr4, cr4);
47                 __write_cr4(cr4);
48         }
49 }
50
51 /* Clear in this cpu's CR4. */
52 static inline void cr4_clear_bits(unsigned long mask)
53 {
54         unsigned long cr4;
55
56         cr4 = this_cpu_read(cpu_tlbstate.cr4);
57         if ((cr4 & ~mask) != cr4) {
58                 cr4 &= ~mask;
59                 this_cpu_write(cpu_tlbstate.cr4, cr4);
60                 __write_cr4(cr4);
61         }
62 }
63
64 /* Read the CR4 shadow. */
65 static inline unsigned long cr4_read_shadow(void)
66 {
67         return this_cpu_read(cpu_tlbstate.cr4);
68 }
69
70 /*
71  * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
72  * enable and PPro Global page enable), so that any CPU's that boot
73  * up after us can get the correct flags.  This should only be used
74  * during boot on the boot cpu.
75  */
76 extern unsigned long mmu_cr4_features;
77 extern u32 *trampoline_cr4_features;
78
79 static inline void cr4_set_bits_and_update_boot(unsigned long mask)
80 {
81         mmu_cr4_features |= mask;
82         if (trampoline_cr4_features)
83                 *trampoline_cr4_features = mmu_cr4_features;
84         cr4_set_bits(mask);
85 }
86
87 static inline void __native_flush_tlb(void)
88 {
89         native_write_cr3(native_read_cr3());
90 }
91
92 static inline void __native_flush_tlb_global_irq_disabled(void)
93 {
94         unsigned long cr4;
95
96         cr4 = this_cpu_read(cpu_tlbstate.cr4);
97         /* clear PGE */
98         native_write_cr4(cr4 & ~X86_CR4_PGE);
99         /* write old PGE again and flush TLBs */
100         native_write_cr4(cr4);
101 }
102
103 static inline void __native_flush_tlb_global(void)
104 {
105         unsigned long flags;
106
107         /*
108          * Read-modify-write to CR4 - protect it from preemption and
109          * from interrupts. (Use the raw variant because this code can
110          * be called from deep inside debugging code.)
111          */
112         raw_local_irq_save(flags);
113
114         __native_flush_tlb_global_irq_disabled();
115
116         raw_local_irq_restore(flags);
117 }
118
119 static inline void __native_flush_tlb_single(unsigned long addr)
120 {
121         asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
122 }
123
124 static inline void __flush_tlb_all(void)
125 {
126         if (cpu_has_pge)
127                 __flush_tlb_global();
128         else
129                 __flush_tlb();
130 }
131
132 static inline void __flush_tlb_one(unsigned long addr)
133 {
134         count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
135         __flush_tlb_single(addr);
136 }
137
138 #define TLB_FLUSH_ALL   -1UL
139
140 /*
141  * TLB flushing:
142  *
143  *  - flush_tlb() flushes the current mm struct TLBs
144  *  - flush_tlb_all() flushes all processes TLBs
145  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
146  *  - flush_tlb_page(vma, vmaddr) flushes one page
147  *  - flush_tlb_range(vma, start, end) flushes a range of pages
148  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
149  *  - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
150  *
151  * ..but the i386 has somewhat limited tlb flushing capabilities,
152  * and page-granular flushes are available only on i486 and up.
153  */
154
155 #ifndef CONFIG_SMP
156
157 /* "_up" is for UniProcessor.
158  *
159  * This is a helper for other header functions.  *Not* intended to be called
160  * directly.  All global TLB flushes need to either call this, or to bump the
161  * vm statistics themselves.
162  */
163 static inline void __flush_tlb_up(void)
164 {
165         count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
166         __flush_tlb();
167 }
168
169 static inline void flush_tlb_all(void)
170 {
171         count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
172         __flush_tlb_all();
173 }
174
175 static inline void flush_tlb(void)
176 {
177         __flush_tlb_up();
178 }
179
180 static inline void local_flush_tlb(void)
181 {
182         __flush_tlb_up();
183 }
184
185 static inline void flush_tlb_mm(struct mm_struct *mm)
186 {
187         if (mm == current->active_mm)
188                 __flush_tlb_up();
189 }
190
191 static inline void flush_tlb_page(struct vm_area_struct *vma,
192                                   unsigned long addr)
193 {
194         if (vma->vm_mm == current->active_mm)
195                 __flush_tlb_one(addr);
196 }
197
198 static inline void flush_tlb_range(struct vm_area_struct *vma,
199                                    unsigned long start, unsigned long end)
200 {
201         if (vma->vm_mm == current->active_mm)
202                 __flush_tlb_up();
203 }
204
205 static inline void flush_tlb_mm_range(struct mm_struct *mm,
206            unsigned long start, unsigned long end, unsigned long vmflag)
207 {
208         if (mm == current->active_mm)
209                 __flush_tlb_up();
210 }
211
212 static inline void native_flush_tlb_others(const struct cpumask *cpumask,
213                                            struct mm_struct *mm,
214                                            unsigned long start,
215                                            unsigned long end)
216 {
217 }
218
219 static inline void reset_lazy_tlbstate(void)
220 {
221 }
222
223 static inline void flush_tlb_kernel_range(unsigned long start,
224                                           unsigned long end)
225 {
226         flush_tlb_all();
227 }
228
229 #else  /* SMP */
230
231 #include <asm/smp.h>
232
233 #define local_flush_tlb() __flush_tlb()
234
235 #define flush_tlb_mm(mm)        flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
236
237 #define flush_tlb_range(vma, start, end)        \
238                 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
239
240 extern void flush_tlb_all(void);
241 extern void flush_tlb_current_task(void);
242 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
243 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
244                                 unsigned long end, unsigned long vmflag);
245 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
246
247 #define flush_tlb()     flush_tlb_current_task()
248
249 void native_flush_tlb_others(const struct cpumask *cpumask,
250                                 struct mm_struct *mm,
251                                 unsigned long start, unsigned long end);
252
253 #define TLBSTATE_OK     1
254 #define TLBSTATE_LAZY   2
255
256 static inline void reset_lazy_tlbstate(void)
257 {
258         this_cpu_write(cpu_tlbstate.state, 0);
259         this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
260 }
261
262 #endif  /* SMP */
263
264 #ifndef CONFIG_PARAVIRT
265 #define flush_tlb_others(mask, mm, start, end)  \
266         native_flush_tlb_others(mask, mm, start, end)
267 #endif
268
269 #endif /* _ASM_X86_TLBFLUSH_H */