Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / arch / sparc / mm / tlb.c
index 9df2190..3659d37 100644 (file)
@@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void)
 }
 
 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
-                             bool exec)
+                             bool exec, bool huge)
 {
        struct tlb_batch *tb = &get_cpu_var(tlb_batch);
        unsigned long nr;
@@ -84,13 +84,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
        }
 
        if (!tb->active) {
-               flush_tsb_user_page(mm, vaddr);
+               flush_tsb_user_page(mm, vaddr, huge);
                global_flush_tlb_page(mm, vaddr);
                goto out;
        }
 
-       if (nr == 0)
+       if (nr == 0) {
                tb->mm = mm;
+               tb->huge = huge;
+       }
+
+       if (tb->huge != huge) {
+               flush_tlb_pending();
+               tb->huge = huge;
+               nr = 0;
+       }
 
        tb->vaddrs[nr] = vaddr;
        tb->tlb_nr = ++nr;
@@ -104,6 +112,8 @@ out:
 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
                   pte_t *ptep, pte_t orig, int fullmm)
 {
+       bool huge = is_hugetlb_pte(orig);
+
        if (tlb_type != hypervisor &&
            pte_dirty(orig)) {
                unsigned long paddr, pfn = pte_pfn(orig);
@@ -129,7 +139,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
 
 no_cache_flush:
        if (!fullmm)
-               tlb_batch_add_one(mm, vaddr, pte_exec(orig));
+               tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -145,7 +155,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
                if (pte_val(*pte) & _PAGE_VALID) {
                        bool exec = pte_exec(*pte);
 
-                       tlb_batch_add_one(mm, vaddr, exec);
+                       tlb_batch_add_one(mm, vaddr, exec, false);
                }
                pte++;
                vaddr += PAGE_SIZE;
@@ -165,9 +175,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 
        if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
                if (pmd_val(pmd) & _PAGE_PMD_HUGE)
-                       mm->context.huge_pte_count++;
+                       mm->context.thp_pte_count++;
                else
-                       mm->context.huge_pte_count--;
+                       mm->context.thp_pte_count--;
 
                /* Do not try to allocate the TSB hash table if we
                 * don't have one already.  We have various locks held
@@ -185,8 +195,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                        pte_t orig_pte = __pte(pmd_val(orig));
                        bool exec = pte_exec(orig_pte);
 
-                       tlb_batch_add_one(mm, addr, exec);
-                       tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
+                       tlb_batch_add_one(mm, addr, exec, true);
+                       tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
+                                       true);
                } else {
                        tlb_batch_pmd_scan(mm, addr, orig);
                }