1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
8 #include <trace/events/tlb.h>
10 #include <asm/pgalloc.h>
11 #include <asm/tlbflush.h>
12 #include <asm/paravirt.h>
14 #ifndef CONFIG_PARAVIRT
15 static inline void paravirt_activate_mm(struct mm_struct *prev,
16 struct mm_struct *next)
19 #endif /* !CONFIG_PARAVIRT */
21 #ifdef CONFIG_PERF_EVENTS
22 extern struct static_key rdpmc_always_available;
24 static inline void load_mm_cr4(struct mm_struct *mm)
26 if (static_key_false(&rdpmc_always_available) ||
27 atomic_read(&mm->context.perf_rdpmc_allowed))
28 cr4_set_bits(X86_CR4_PCE);
30 cr4_clear_bits(X86_CR4_PCE);
33 static inline void load_mm_cr4(struct mm_struct *mm) {}
37 * ldt_structs can be allocated, used, and freed, but they are never
38 * modified while live.
42 * Xen requires page-aligned LDTs with special permissions. This is
43 * needed to prevent us from installing evil descriptors such as
44 * call gates. On native, we could merge the ldt_struct and LDT
45 * allocations, but it's not worth trying to optimize.
47 struct desc_struct *entries;
51 static inline void load_mm_ldt(struct mm_struct *mm)
53 struct ldt_struct *ldt;
55 /* lockless_dereference synchronizes with smp_store_release */
56 ldt = lockless_dereference(mm->context.ldt);
59 * Any change to mm->context.ldt is followed by an IPI to all
60 * CPUs with the mm active. The LDT will not be freed until
61 * after the IPI is handled by all such CPUs. This means that,
62 * if the ldt_struct changes before we return, the values we see
63 * will be safe, and the new values will be loaded before we run
66 * NB: don't try to convert this to use RCU without extreme care.
67 * We would still need IRQs off, because we don't want to change
68 * the local LDT after an IPI loaded a newer value than the one
73 set_ldt(ldt->entries, ldt->size);
77 DEBUG_LOCKS_WARN_ON(preemptible());
81 * Used for LDT copy/destruction.
83 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
84 void destroy_context(struct mm_struct *mm);
87 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
90 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
91 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
95 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
96 struct task_struct *tsk)
98 unsigned cpu = smp_processor_id();
100 if (likely(prev != next)) {
102 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
103 this_cpu_write(cpu_tlbstate.active_mm, next);
105 cpumask_set_cpu(cpu, mm_cpumask(next));
107 /* Re-load page tables */
109 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
111 /* Stop flush ipis for the previous mm */
112 cpumask_clear_cpu(cpu, mm_cpumask(prev));
114 /* Load per-mm CR4 state */
118 * Load the LDT, if the LDT is different.
120 * It's possible that prev->context.ldt doesn't match
121 * the LDT register. This can happen if leave_mm(prev)
122 * was called and then modify_ldt changed
123 * prev->context.ldt but suppressed an IPI to this CPU.
124 * In this case, prev->context.ldt != NULL, because we
125 * never set context.ldt to NULL while the mm still
126 * exists. That means that next->context.ldt !=
127 * prev->context.ldt, because mms never share an LDT.
129 if (unlikely(prev->context.ldt != next->context.ldt))
134 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
135 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
137 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
139 * On established mms, the mm_cpumask is only changed
140 * from irq context, from ptep_clear_flush() while in
141 * lazy tlb mode, and here. Irqs are blocked during
142 * schedule, protecting us from simultaneous changes.
144 cpumask_set_cpu(cpu, mm_cpumask(next));
146 * We were in lazy tlb mode and leave_mm disabled
147 * tlb flush IPI delivery. We must reload CR3
148 * to make sure to use no freed page tables.
151 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
159 #define activate_mm(prev, next) \
161 paravirt_activate_mm((prev), (next)); \
162 switch_mm((prev), (next), NULL); \
166 #define deactivate_mm(tsk, mm) \
171 #define deactivate_mm(tsk, mm) \
174 loadsegment(fs, 0); \
178 static inline void arch_dup_mmap(struct mm_struct *oldmm,
179 struct mm_struct *mm)
181 paravirt_arch_dup_mmap(oldmm, mm);
184 static inline void arch_exit_mmap(struct mm_struct *mm)
186 paravirt_arch_exit_mmap(mm);
189 static inline void arch_bprm_mm_init(struct mm_struct *mm,
190 struct vm_area_struct *vma)
195 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
196 unsigned long start, unsigned long end)
199 * mpx_notify_unmap() goes and reads a rarely-hot
200 * cacheline in the mm_struct. That can be expensive
201 * enough to be seen in profiles.
203 * The mpx_notify_unmap() call and its contents have been
204 * observed to affect munmap() performance on hardware
205 * where MPX is not present.
207 * The unlikely() optimizes for the fast case: no MPX
208 * in the CPU, or no MPX use in the process. Even if
209 * we get this wrong (in the unlikely event that MPX
210 * is widely enabled on some system) the overhead of
211 * MPX itself (reading bounds tables) is expected to
212 * overwhelm the overhead of getting this unlikely()
213 * consistently wrong.
215 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
216 mpx_notify_unmap(mm, vma, start, end);
219 #endif /* _ASM_X86_MMU_CONTEXT_H */