These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / x86 / mm / fault.c
index 9dc9098..e830c71 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/kmemcheck.h>             /* kmemcheck_*(), ...           */
 #include <asm/fixmap.h>                        /* VSYSCALL_ADDR                */
 #include <asm/vsyscall.h>              /* emulate_vsyscall             */
+#include <asm/vm86.h>                  /* struct vm86                  */
 
 #define CREATE_TRACE_POINTS
 #include <asm/trace/exceptions.h>
@@ -286,6 +287,9 @@ static noinline int vmalloc_fault(unsigned long address)
        if (!pmd_k)
                return -1;
 
+       if (pmd_huge(*pmd_k))
+               return 0;
+
        pte_k = pte_offset_kernel(pmd_k, address);
        if (!pte_present(*pte_k))
                return -1;
@@ -301,14 +305,16 @@ static inline void
 check_v8086_mode(struct pt_regs *regs, unsigned long address,
                 struct task_struct *tsk)
 {
+#ifdef CONFIG_VM86
        unsigned long bit;
 
-       if (!v8086_mode(regs))
+       if (!v8086_mode(regs) || !tsk->thread.vm86)
                return;
 
        bit = (address - 0xA0000) >> PAGE_SHIFT;
        if (bit < 32)
-               tsk->thread.screen_bitmap |= 1 << bit;
+               tsk->thread.vm86->screen_bitmap |= 1 << bit;
+#endif
 }
 
 static bool low_pfn(unsigned long pfn)
@@ -357,8 +363,6 @@ void vmalloc_sync_all(void)
  * 64-bit:
  *
  *   Handle a fault on the vmalloc area
- *
- * This assumes no large pages in there.
  */
 static noinline int vmalloc_fault(unsigned long address)
 {
@@ -400,17 +404,23 @@ static noinline int vmalloc_fault(unsigned long address)
        if (pud_none(*pud_ref))
                return -1;
 
-       if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
+       if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
                BUG();
 
+       if (pud_huge(*pud))
+               return 0;
+
        pmd = pmd_offset(pud, address);
        pmd_ref = pmd_offset(pud_ref, address);
        if (pmd_none(*pmd_ref))
                return -1;
 
-       if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
+       if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
                BUG();
 
+       if (pmd_huge(*pmd))
+               return 0;
+
        pte_ref = pte_offset_kernel(pmd_ref, address);
        if (!pte_present(*pte_ref))
                return -1;