Code Review
/
kvmfornfv.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
Upgrade to 4.4.50-rt62
[kvmfornfv.git]
/
kernel
/
arch
/
x86
/
include
/
asm
/
tlbflush.h
diff --git
a/kernel/arch/x86/include/asm/tlbflush.h
b/kernel/arch/x86/include/asm/tlbflush.h
index
6df2029
..
6433e28
100644
(file)
--- a/
kernel/arch/x86/include/asm/tlbflush.h
+++ b/
kernel/arch/x86/include/asm/tlbflush.h
@@
-32,7
+32,7
@@
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
- this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
+ this_cpu_write(cpu_tlbstate.cr4, __read_cr4
_safe
());
}
/* Set in this cpu's CR4. */
}
/* Set in this cpu's CR4. */
@@
-86,7
+86,14
@@
static inline void cr4_set_bits_and_update_boot(unsigned long mask)
static inline void __native_flush_tlb(void)
{
static inline void __native_flush_tlb(void)
{
+ /*
+ * If current->mm == NULL then we borrow a mm which may change during a
+ * task switch and therefore we must not be preempted while we write CR3
+ * back:
+ */
+ preempt_disable();
native_write_cr3(native_read_cr3());
native_write_cr3(native_read_cr3());
+ preempt_enable();
}
static inline void __native_flush_tlb_global_irq_disabled(void)
}
static inline void __native_flush_tlb_global_irq_disabled(void)