Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / arch / mips / kvm / emulate.c
index 41b1b09..4c85ab8 100644 (file)
@@ -302,12 +302,31 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
  */
 static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
 {
-       ktime_t expires;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       ktime_t expires, threshold;
+       uint32_t count, compare;
        int running;
 
-       /* Is the hrtimer pending? */
+       /* Calculate the biased and scaled guest CP0_Count */
+       count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+       compare = kvm_read_c0_guest_compare(cop0);
+
+       /*
+        * Find whether CP0_Count has reached the closest timer interrupt. If
+        * not, we shouldn't inject it.
+        */
+       if ((int32_t)(count - compare) < 0)
+               return count;
+
+       /*
+        * The CP0_Count we're going to return has already reached the closest
+        * timer interrupt. Quickly check if it really is a new interrupt by
+        * looking at whether the interval until the hrtimer expiry time is
+        * less than 1/4 of the timer period.
+        */
        expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
-       if (ktime_compare(now, expires) >= 0) {
+       threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
+       if (ktime_before(expires, threshold)) {
                /*
                 * Cancel it while we handle it so there's no chance of
                 * interference with the timeout handler.
@@ -329,8 +348,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
                }
        }
 
-       /* Return the biased and scaled guest CP0_Count */
-       return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+       return count;
 }
 
 /**
@@ -419,32 +437,6 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
        hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
 }
 
-/**
- * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
- * @vcpu:      Virtual CPU.
- *
- * Recalculates and updates the expiry time of the hrtimer. This can be used
- * after timer parameters have been altered which do not depend on the time that
- * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
- * kvm_mips_resume_hrtimer() are used directly).
- *
- * It is guaranteed that no timer interrupts will be lost in the process.
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- */
-static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
-{
-       ktime_t now;
-       uint32_t count;
-
-       /*
-        * freeze_hrtimer takes care of a timer interrupts <= count, and
-        * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
-        */
-       now = kvm_mips_freeze_hrtimer(vcpu, &count);
-       kvm_mips_resume_hrtimer(vcpu, now, count);
-}
-
 /**
  * kvm_mips_write_count() - Modify the count and update timer.
  * @vcpu:      Virtual CPU.
@@ -540,23 +532,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
  * kvm_mips_write_compare() - Modify compare and update timer.
  * @vcpu:      Virtual CPU.
  * @compare:   New CP0_Compare value.
+ * @ack:       Whether to acknowledge timer interrupt.
  *
  * Update CP0_Compare to a new value and update the timeout.
+ * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
+ * any pending timer interrupt is preserved.
  */
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
 {
        struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int dc;
+       u32 old_compare = kvm_read_c0_guest_compare(cop0);
+       ktime_t now;
+       uint32_t count;
 
        /* if unchanged, must just be an ack */
-       if (kvm_read_c0_guest_compare(cop0) == compare)
+       if (old_compare == compare) {
+               if (!ack)
+                       return;
+               kvm_mips_callbacks->dequeue_timer_int(vcpu);
+               kvm_write_c0_guest_compare(cop0, compare);
                return;
+       }
+
+       /* freeze_hrtimer() takes care of timer interrupts <= count */
+       dc = kvm_mips_count_disabled(vcpu);
+       if (!dc)
+               now = kvm_mips_freeze_hrtimer(vcpu, &count);
+
+       if (ack)
+               kvm_mips_callbacks->dequeue_timer_int(vcpu);
 
-       /* Update compare */
        kvm_write_c0_guest_compare(cop0, compare);
 
-       /* Update timeout if count enabled */
-       if (!kvm_mips_count_disabled(vcpu))
-               kvm_mips_update_hrtimer(vcpu);
+       /* resume_hrtimer() takes care of timer interrupts > count */
+       if (!dc)
+               kvm_mips_resume_hrtimer(vcpu, now, count);
 }
 
 /**
@@ -741,15 +752,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
        struct mips_coproc *cop0 = vcpu->arch.cop0;
        enum emulation_result er = EMULATE_DONE;
 
-       if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+       if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+               kvm_clear_c0_guest_status(cop0, ST0_ERL);
+               vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+       } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
                kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
                          kvm_read_c0_guest_epc(cop0));
                kvm_clear_c0_guest_status(cop0, ST0_EXL);
                vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
 
-       } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
-               kvm_clear_c0_guest_status(cop0, ST0_ERL);
-               vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
        } else {
                kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
                        vcpu->arch.pc);
@@ -796,6 +807,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
        return EMULATE_FAIL;
 }
 
+/**
+ * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
+ * @vcpu:      VCPU with changed mappings.
+ * @tlb:       TLB entry being removed.
+ *
+ * This is called to indicate a single change in guest MMU mappings, so that we
+ * can arrange TLB flushes on this and other CPUs.
+ */
+static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
+                                         struct kvm_mips_tlb *tlb)
+{
+       int cpu, i;
+       bool user;
+
+       /* No need to flush for entries which are already invalid */
+       if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V))
+               return;
+       /* User address space doesn't need flushing for KSeg2/3 changes */
+       user = tlb->tlb_hi < KVM_GUEST_KSEG0;
+
+       preempt_disable();
+
+       /*
+        * Probe the shadow host TLB for the entry being overwritten, if one
+        * matches, invalidate it
+        */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+       /* Invalidate the whole ASID on other CPUs */
+       cpu = smp_processor_id();
+       for_each_possible_cpu(i) {
+               if (i == cpu)
+                       continue;
+               if (user)
+                       vcpu->arch.guest_user_asid[i] = 0;
+               vcpu->arch.guest_kernel_asid[i] = 0;
+       }
+
+       preempt_enable();
+}
+
 /* Write Guest TLB Entry @ Index */
 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
 {
@@ -815,11 +867,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
        }
 
        tlb = &vcpu->arch.guest_tlb[index];
-       /*
-        * Probe the shadow host TLB for the entry being overwritten, if one
-        * matches, invalidate it
-        */
-       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+       kvm_mips_invalidate_guest_tlb(vcpu, tlb);
 
        tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
        tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
@@ -848,11 +897,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
 
        tlb = &vcpu->arch.guest_tlb[index];
 
-       /*
-        * Probe the shadow host TLB for the entry being overwritten, if one
-        * matches, invalidate it
-        */
-       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+       kvm_mips_invalidate_guest_tlb(vcpu, tlb);
 
        tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
        tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
@@ -971,6 +1016,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
        int32_t rt, rd, copz, sel, co_bit, op;
        uint32_t pc = vcpu->arch.pc;
        unsigned long curr_pc;
+       int cpu, i;
 
        /*
         * Update PC and hold onto current PC in case there is
@@ -1078,8 +1124,16 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
                                                vcpu->arch.gprs[rt]
                                                & ASID_MASK);
 
+                                       preempt_disable();
                                        /* Blow away the shadow host TLBs */
                                        kvm_mips_flush_host_tlb(1);
+                                       cpu = smp_processor_id();
+                                       for_each_possible_cpu(i)
+                                               if (i != cpu) {
+                                                       vcpu->arch.guest_user_asid[i] = 0;
+                                                       vcpu->arch.guest_kernel_asid[i] = 0;
+                                               }
+                                       preempt_enable();
                                }
                                kvm_write_c0_guest_entryhi(cop0,
                                                           vcpu->arch.gprs[rt]);
@@ -1095,9 +1149,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
 
                                /* If we are writing to COMPARE */
                                /* Clear pending timer interrupt, if any */
-                               kvm_mips_callbacks->dequeue_timer_int(vcpu);
                                kvm_mips_write_compare(vcpu,
-                                                      vcpu->arch.gprs[rt]);
+                                                      vcpu->arch.gprs[rt],
+                                                      true);
                        } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
                                unsigned int old_val, val, change;
 
@@ -1419,6 +1473,7 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
                                            struct kvm_vcpu *vcpu)
 {
        enum emulation_result er = EMULATE_DO_MMIO;
+       unsigned long curr_pc;
        int32_t op, base, rt, offset;
        uint32_t bytes;
 
@@ -1427,7 +1482,18 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
        offset = inst & 0xffff;
        op = (inst >> 26) & 0x3f;
 
-       vcpu->arch.pending_load_cause = cause;
+       /*
+        * Find the resume PC now while we have safe and easy access to the
+        * prior branch instruction, and save it for
+        * kvm_mips_complete_mmio_load() to restore later.
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+       vcpu->arch.io_pc = vcpu->arch.pc;
+       vcpu->arch.pc = curr_pc;
+
        vcpu->arch.io_gpr = rt;
 
        switch (op) {
@@ -1618,8 +1684,14 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
 
        preempt_disable();
        if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
-               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
-                       kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
+                   kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
+                       kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
+                               __func__, va, vcpu, read_c0_entryhi());
+                       er = EMULATE_FAIL;
+                       preempt_enable();
+                       goto done;
+               }
        } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
                   KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
                int index;
@@ -1654,14 +1726,19 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
                                                                run, vcpu);
                                preempt_enable();
                                goto dont_update_pc;
-                       } else {
-                               /*
-                                * We fault an entry from the guest tlb to the
-                                * shadow host TLB
-                                */
-                               kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
-                                                                    NULL,
-                                                                    NULL);
+                       }
+                       /*
+                        * We fault an entry from the guest tlb to the
+                        * shadow host TLB
+                        */
+                       if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+                                                                NULL, NULL)) {
+                               kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+                                       __func__, va, index, vcpu,
+                                       read_c0_entryhi());
+                               er = EMULATE_FAIL;
+                               preempt_enable();
+                               goto done;
                        }
                }
        } else {
@@ -2396,9 +2473,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
                goto done;
        }
 
-       er = update_pc(vcpu, vcpu->arch.pending_load_cause);
-       if (er == EMULATE_FAIL)
-               return er;
+       /* Restore saved resume PC */
+       vcpu->arch.pc = vcpu->arch.io_pc;
 
        switch (run->mmio.len) {
        case 4:
@@ -2420,11 +2496,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
                break;
        }
 
-       if (vcpu->arch.pending_load_cause & CAUSEF_BD)
-               kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
-                         vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
-                         vcpu->mmio_needed);
-
 done:
        return er;
 }
@@ -2622,8 +2693,13 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
                         * OK we have a Guest TLB entry, now inject it into the
                         * shadow host TLB
                         */
-                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
-                                                            NULL);
+                       if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+                                                                NULL, NULL)) {
+                               kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+                                       __func__, va, index, vcpu,
+                                       read_c0_entryhi());
+                               er = EMULATE_FAIL;
+                       }
                }
        }