Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / arch / mips / kvm / tlb.c
index aed0ac2..eff71c7 100644 (file)
@@ -152,7 +152,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
        srcu_idx = srcu_read_lock(&kvm->srcu);
        pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
 
-       if (kvm_mips_is_error_pfn(pfn)) {
+       if (is_error_noslot_pfn(pfn)) {
                kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
                err = -EFAULT;
                goto out;
@@ -276,7 +276,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
        }
 
        gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
-       if (gfn >= kvm->arch.guest_pmap_npages) {
+       if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
                kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
                        gfn, badvaddr);
                kvm_mips_dump_host_tlbs();
@@ -361,25 +361,39 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
        unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
        struct kvm *kvm = vcpu->kvm;
        pfn_t pfn0, pfn1;
-
-       if ((tlb->tlb_hi & VPN2_MASK) == 0) {
-               pfn0 = 0;
-               pfn1 = 0;
-       } else {
-               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
-                                          >> PAGE_SHIFT) < 0)
-                       return -1;
-
-               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
-                                          >> PAGE_SHIFT) < 0)
-                       return -1;
-
-               pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
-                                           >> PAGE_SHIFT];
-               pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
-                                           >> PAGE_SHIFT];
+       gfn_t gfn0, gfn1;
+       long tlb_lo[2];
+
+       tlb_lo[0] = tlb->tlb_lo0;
+       tlb_lo[1] = tlb->tlb_lo1;
+
+       /*
+        * The commpage address must not be mapped to anything else if the guest
+        * TLB contains entries nearby, or commpage accesses will break.
+        */
+       if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
+                       VPN2_MASK & (PAGE_MASK << 1)))
+               tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
+
+       gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
+       gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
+       if (gfn0 >= kvm->arch.guest_pmap_npages ||
+           gfn1 >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
+                       __func__, gfn0, gfn1, tlb->tlb_hi);
+               kvm_mips_dump_guest_tlbs(vcpu);
+               return -1;
        }
 
+       if (kvm_mips_map_page(kvm, gfn0) < 0)
+               return -1;
+
+       if (kvm_mips_map_page(kvm, gfn1) < 0)
+               return -1;
+
+       pfn0 = kvm->arch.guest_pmap[gfn0];
+       pfn1 = kvm->arch.guest_pmap[gfn1];
+
        if (hpa0)
                *hpa0 = pfn0 << PAGE_SHIFT;
 
@@ -391,9 +405,9 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
                                               kvm_mips_get_kernel_asid(vcpu) :
                                               kvm_mips_get_user_asid(vcpu));
        entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
-                  (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+                  (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
        entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
-                  (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+                  (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
 
        kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
                  tlb->tlb_lo0, tlb->tlb_lo1);
@@ -794,10 +808,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
                                local_irq_restore(flags);
                                return KVM_INVALID_INST;
                        }
-                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
-                                                            &vcpu->arch.
-                                                            guest_tlb[index],
-                                                            NULL, NULL);
+                       if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+                                               &vcpu->arch.guest_tlb[index],
+                                               NULL, NULL)) {
+                               kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
+                                       __func__, opc, index, vcpu,
+                                       read_c0_entryhi());
+                               kvm_mips_dump_guest_tlbs(vcpu);
+                               local_irq_restore(flags);
+                               return KVM_INVALID_INST;
+                       }
                        inst = *(opc);
                }
                local_irq_restore(flags);