These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / powerpc / kvm / book3s_hv_rmhandlers.S
index 3b2d2c5..463af88 100644 (file)
@@ -128,6 +128,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        subf    r4, r4, r3
        mtspr   SPRN_DEC, r4
 
+       /* hwthread_req may have got set by cede or no vcpu, so clear it */
+       li      r0, 0
+       stb     r0, HSTATE_HWTHREAD_REQ(r13)
+
        /*
         * For external and machine check interrupts, we need
         * to call the Linux handler to process the interrupt.
@@ -146,6 +150,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        cmpwi   cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
        cmpwi   r12, BOOK3S_INTERRUPT_EXTERNAL
        beq     11f
+       cmpwi   r12, BOOK3S_INTERRUPT_H_DOORBELL
+       beq     15f     /* Invoke the H_DOORBELL handler */
        cmpwi   cr2, r12, BOOK3S_INTERRUPT_HMI
        beq     cr2, 14f                        /* HMI check */
 
@@ -170,6 +176,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        mtspr   SPRN_HSRR1, r7
        b       hmi_exception_after_realmode
 
+15:    mtspr SPRN_HSRR0, r8
+       mtspr SPRN_HSRR1, r7
+       ba    0xe80
+
 kvmppc_primary_no_guest:
        /* We handle this much like a ceded vcpu */
        /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
@@ -215,7 +225,6 @@ kvm_novcpu_wakeup:
        ld      r5, HSTATE_KVM_VCORE(r13)
        li      r0, 0
        stb     r0, HSTATE_NAPPING(r13)
-       stb     r0, HSTATE_HWTHREAD_REQ(r13)
 
        /* check the wake reason */
        bl      kvmppc_check_wake_reason
@@ -315,33 +324,54 @@ kvm_start_guest:
        cmpdi   r3, 0
        bge     kvm_no_guest
 
-       /* get vcpu pointer, NULL if we have no vcpu to run */
-       ld      r4,HSTATE_KVM_VCPU(r13)
-       cmpdi   r4,0
-       /* if we have no vcpu to run, go back to sleep */
+       /* get vcore pointer, NULL if we have nothing to run */
+       ld      r5,HSTATE_KVM_VCORE(r13)
+       cmpdi   r5,0
+       /* if we have no vcore to run, go back to sleep */
        beq     kvm_no_guest
 
 kvm_secondary_got_guest:
 
        /* Set HSTATE_DSCR(r13) to something sensible */
-       ld      r6, PACA_DSCR(r13)
+       ld      r6, PACA_DSCR_DEFAULT(r13)
        std     r6, HSTATE_DSCR(r13)
 
-       /* Order load of vcore, ptid etc. after load of vcpu */
+       /* On thread 0 of a subcore, set HDEC to max */
+       lbz     r4, HSTATE_PTID(r13)
+       cmpwi   r4, 0
+       bne     63f
+       lis     r6, 0x7fff
+       ori     r6, r6, 0xffff
+       mtspr   SPRN_HDEC, r6
+       /* and set per-LPAR registers, if doing dynamic micro-threading */
+       ld      r6, HSTATE_SPLIT_MODE(r13)
+       cmpdi   r6, 0
+       beq     63f
+       ld      r0, KVM_SPLIT_RPR(r6)
+       mtspr   SPRN_RPR, r0
+       ld      r0, KVM_SPLIT_PMMAR(r6)
+       mtspr   SPRN_PMMAR, r0
+       ld      r0, KVM_SPLIT_LDBAR(r6)
+       mtspr   SPRN_LDBAR, r0
+       isync
+63:
+       /* Order load of vcpu after load of vcore */
        lwsync
+       ld      r4, HSTATE_KVM_VCPU(r13)
        bl      kvmppc_hv_entry
 
        /* Back from the guest, go back to nap */
-       /* Clear our vcpu pointer so we don't come back in early */
+       /* Clear our vcpu and vcore pointers so we don't come back in early */
        li      r0, 0
+       std     r0, HSTATE_KVM_VCPU(r13)
        /*
-        * Once we clear HSTATE_KVM_VCPU(r13), the code in
+        * Once we clear HSTATE_KVM_VCORE(r13), the code in
         * kvmppc_run_core() is going to assume that all our vcpu
         * state is visible in memory.  This lwsync makes sure
         * that that is true.
         */
        lwsync
-       std     r0, HSTATE_KVM_VCPU(r13)
+       std     r0, HSTATE_KVM_VCORE(r13)
 
 /*
  * At this point we have finished executing in the guest.
@@ -374,16 +404,71 @@ kvm_no_guest:
        b       power7_wakeup_loss
 
 53:    HMT_LOW
-       ld      r4, HSTATE_KVM_VCPU(r13)
-       cmpdi   r4, 0
+       ld      r5, HSTATE_KVM_VCORE(r13)
+       cmpdi   r5, 0
+       bne     60f
+       ld      r3, HSTATE_SPLIT_MODE(r13)
+       cmpdi   r3, 0
+       beq     kvm_no_guest
+       lbz     r0, KVM_SPLIT_DO_NAP(r3)
+       cmpwi   r0, 0
        beq     kvm_no_guest
        HMT_MEDIUM
+       b       kvm_unsplit_nap
+60:    HMT_MEDIUM
        b       kvm_secondary_got_guest
 
 54:    li      r0, KVM_HWTHREAD_IN_KVM
        stb     r0, HSTATE_HWTHREAD_STATE(r13)
        b       kvm_no_guest
 
+/*
+ * Here the primary thread is trying to return the core to
+ * whole-core mode, so we need to nap.
+ */
+kvm_unsplit_nap:
+       /*
+        * Ensure that secondary doesn't nap when it has
+        * its vcore pointer set.
+        */
+       sync            /* matches smp_mb() before setting split_info.do_nap */
+       ld      r0, HSTATE_KVM_VCORE(r13)
+       cmpdi   r0, 0
+       bne     kvm_no_guest
+       /* clear any pending message */
+BEGIN_FTR_SECTION
+       lis     r6, (PPC_DBELL_SERVER << (63-36))@h
+       PPC_MSGCLR(6)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+       /* Set kvm_split_mode.napped[tid] = 1 */
+       ld      r3, HSTATE_SPLIT_MODE(r13)
+       li      r0, 1
+       lhz     r4, PACAPACAINDEX(r13)
+       clrldi  r4, r4, 61      /* micro-threading => P8 => 8 threads/core */
+       addi    r4, r4, KVM_SPLIT_NAPPED
+       stbx    r0, r3, r4
+       /* Check the do_nap flag again after setting napped[] */
+       sync
+       lbz     r0, KVM_SPLIT_DO_NAP(r3)
+       cmpwi   r0, 0
+       beq     57f
+       li      r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
+       mfspr   r4, SPRN_LPCR
+       rlwimi  r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
+       mtspr   SPRN_LPCR, r4
+       isync
+       std     r0, HSTATE_SCRATCH0(r13)
+       ptesync
+       ld      r0, HSTATE_SCRATCH0(r13)
+1:     cmpd    r0, r0
+       bne     1b
+       nap
+       b       .
+
+57:    li      r0, 0
+       stbx    r0, r3, r4
+       b       kvm_no_guest
+
 /******************************************************************************
  *                                                                            *
  *                               Entry code                                   *
@@ -854,7 +939,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
        cmpwi   r0, 0
        bne     21f
        HMT_LOW
-20:    lbz     r0, VCORE_IN_GUEST(r5)
+20:    lwz     r3, VCORE_ENTRY_EXIT(r5)
+       cmpwi   r3, 0x100
+       bge     no_switch_exit
+       lbz     r0, VCORE_IN_GUEST(r5)
        cmpwi   r0, 0
        beq     20b
        HMT_MEDIUM
@@ -870,7 +958,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
        blt     hdec_soon
 
        ld      r6, VCPU_CTR(r4)
-       lwz     r7, VCPU_XER(r4)
+       l     r7, VCPU_XER(r4)
 
        mtctr   r6
        mtxer   r7
@@ -985,9 +1073,13 @@ secondary_too_late:
 #endif
 11:    b       kvmhv_switch_to_host
 
+no_switch_exit:
+       HMT_MEDIUM
+       li      r12, 0
+       b       12f
 hdec_soon:
        li      r12, BOOK3S_INTERRUPT_HV_DECREMENTER
-       stw     r12, VCPU_TRAP(r4)
+12:    stw     r12, VCPU_TRAP(r4)
        mr      r9, r4
 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
        addi    r3, r4, VCPU_TB_RMEXIT
@@ -1103,7 +1195,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        mfctr   r3
        mfxer   r4
        std     r3, VCPU_CTR(r9)
-       stw     r4, VCPU_XER(r9)
+       std     r4, VCPU_XER(r9)
 
        /* If this is a page table miss then see if it's theirs or ours */
        cmpwi   r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
@@ -1171,12 +1263,18 @@ mc_cont:
        bl      kvmhv_accumulate_time
 #endif
 
+       mr      r3, r12
        /* Increment exit count, poke other threads to exit */
        bl      kvmhv_commence_exit
        nop
        ld      r9, HSTATE_KVM_VCPU(r13)
        lwz     r12, VCPU_TRAP(r9)
 
+       /* Stop others sending VCPU interrupts to this physical CPU */
+       li      r0, -1
+       stw     r0, VCPU_CPU(r9)
+       stw     r0, VCPU_THREAD_CPU(r9)
+
        /* Save guest CTRL register, set runlatch to 1 */
        mfspr   r6,SPRN_CTRLF
        stw     r6,VCPU_CTRL(r9)
@@ -1272,6 +1370,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
        std     r6, VCPU_ACOP(r9)
        stw     r7, VCPU_GUEST_PID(r9)
        std     r8, VCPU_WORT(r9)
+       /*
+        * Restore various registers to 0, where non-zero values
+        * set by the guest could disrupt the host.
+        */
+       li      r0, 0
+       mtspr   SPRN_IAMR, r0
+       mtspr   SPRN_CIABR, r0
+       mtspr   SPRN_DAWRX, r0
+       mtspr   SPRN_TCSCR, r0
+       mtspr   SPRN_WORT, r0
+       /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
+       li      r0, 1
+       sldi    r0, r0, 31
+       mtspr   SPRN_MMCRS, r0
 8:
 
        /* Save and reset AMR and UAMOR before turning on the MMU */
@@ -1541,12 +1653,17 @@ kvmhv_switch_to_host:
 
        /* Primary thread waits for all the secondaries to exit guest */
 15:    lwz     r3,VCORE_ENTRY_EXIT(r5)
-       srwi    r0,r3,8
+       rlwinm  r0,r3,32-8,0xff
        clrldi  r3,r3,56
        cmpw    r3,r0
        bne     15b
        isync
 
+       /* Did we actually switch to the guest at all? */
+       lbz     r6, VCORE_IN_GUEST(r5)
+       cmpwi   r6, 0
+       beq     19f
+
        /* Primary thread switches back to host partition */
        ld      r6,KVM_HOST_SDR1(r4)
        lwz     r7,KVM_HOST_LPID(r4)
@@ -1590,7 +1707,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 18:
        /* Signal secondary CPUs to continue */
        stb     r0,VCORE_IN_GUEST(r5)
-       lis     r8,0x7fff               /* MAX_INT@h */
+19:    lis     r8,0x7fff               /* MAX_INT@h */
        mtspr   SPRN_HDEC,r8
 
 16:    ld      r8,KVM_HOST_LPCR(r4)
@@ -1646,7 +1763,8 @@ kvmppc_hdsi:
        beq     3f
        clrrdi  r0, r4, 28
        PPC_SLBFEE_DOT(R5, R0)          /* if so, look up SLB */
-       bne     1f                      /* if no SLB entry found */
+       li      r0, BOOK3S_INTERRUPT_DATA_SEGMENT
+       bne     7f                      /* if no SLB entry found */
 4:     std     r4, VCPU_FAULT_DAR(r9)
        stw     r6, VCPU_FAULT_DSISR(r9)
 
@@ -1665,18 +1783,19 @@ kvmppc_hdsi:
        cmpdi   r3, -2                  /* MMIO emulation; need instr word */
        beq     2f
 
-       /* Synthesize a DSI for the guest */
+       /* Synthesize a DSI (or DSegI) for the guest */
        ld      r4, VCPU_FAULT_DAR(r9)
        mr      r6, r3
-1:     mtspr   SPRN_DAR, r4
+1:     li      r0, BOOK3S_INTERRUPT_DATA_STORAGE
        mtspr   SPRN_DSISR, r6
+7:     mtspr   SPRN_DAR, r4
        mtspr   SPRN_SRR0, r10
        mtspr   SPRN_SRR1, r11
-       li      r10, BOOK3S_INTERRUPT_DATA_STORAGE
+       mr      r10, r0
        bl      kvmppc_msr_interrupt
 fast_interrupt_c_return:
 6:     ld      r7, VCPU_CTR(r9)
-       lwz     r8, VCPU_XER(r9)
+       l     r8, VCPU_XER(r9)
        mtctr   r7
        mtxer   r8
        mr      r4, r9
@@ -1720,7 +1839,8 @@ kvmppc_hisi:
        beq     3f
        clrrdi  r0, r10, 28
        PPC_SLBFEE_DOT(R5, R0)          /* if so, look up SLB */
-       bne     1f                      /* if no SLB entry found */
+       li      r0, BOOK3S_INTERRUPT_INST_SEGMENT
+       bne     7f                      /* if no SLB entry found */
 4:
        /* Search the hash table. */
        mr      r3, r9                  /* vcpu pointer */
@@ -1737,11 +1857,12 @@ kvmppc_hisi:
        cmpdi   r3, -1                  /* handle in kernel mode */
        beq     guest_exit_cont
 
-       /* Synthesize an ISI for the guest */
+       /* Synthesize an ISI (or ISegI) for the guest */
        mr      r11, r3
-1:     mtspr   SPRN_SRR0, r10
+1:     li      r0, BOOK3S_INTERRUPT_INST_STORAGE
+7:     mtspr   SPRN_SRR0, r10
        mtspr   SPRN_SRR1, r11
-       li      r10, BOOK3S_INTERRUPT_INST_STORAGE
+       mr      r10, r0
        bl      kvmppc_msr_interrupt
        b       fast_interrupt_c_return
 
@@ -1817,8 +1938,8 @@ hcall_real_table:
        .long   DOTSYM(kvmppc_h_remove) - hcall_real_table
        .long   DOTSYM(kvmppc_h_enter) - hcall_real_table
        .long   DOTSYM(kvmppc_h_read) - hcall_real_table
-       .long   0               /* 0x10 - H_CLEAR_MOD */
-       .long   0               /* 0x14 - H_CLEAR_REF */
+       .long   DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
+       .long   DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
        .long   DOTSYM(kvmppc_h_protect) - hcall_real_table
        .long   DOTSYM(kvmppc_h_get_tce) - hcall_real_table
        .long   DOTSYM(kvmppc_h_put_tce) - hcall_real_table
@@ -2046,7 +2167,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
        /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
 2:     rlwimi  r5, r4, 5, DAWRX_DR | DAWRX_DW
-       rlwimi  r5, r4, 1, DAWRX_WT
+       rlwimi  r5, r4, 2, DAWRX_WT
        clrrdi  r4, r4, 3
        std     r4, VCPU_DAWR(r3)
        std     r5, VCPU_DAWRX(r3)
@@ -2280,7 +2401,6 @@ machine_check_realmode:
        mr      r3, r9          /* get vcpu pointer */
        bl      kvmppc_realmode_machine_check
        nop
-       cmpdi   r3, 0           /* Did we handle MCE ? */
        ld      r9, HSTATE_KVM_VCPU(r13)
        li      r12, BOOK3S_INTERRUPT_MACHINE_CHECK
        /*
@@ -2293,13 +2413,18 @@ machine_check_realmode:
         * The old code used to return to host for unhandled errors which
         * was causing guest to hang with soft lockups inside guest and
         * makes it difficult to recover guest instance.
+        *
+        * if we receive machine check with MSR(RI=0) then deliver it to
+        * guest as machine check causing guest to crash.
         */
-       ld      r10, VCPU_PC(r9)
        ld      r11, VCPU_MSR(r9)
+       andi.   r10, r11, MSR_RI        /* check for unrecoverable exception */
+       beq     1f                      /* Deliver a machine check to guest */
+       ld      r10, VCPU_PC(r9)
+       cmpdi   r3, 0           /* Did we handle MCE ? */
        bne     2f      /* Continue guest execution. */
        /* If not, deliver a machine check.  SRR0/1 are already set */
-       li      r10, BOOK3S_INTERRUPT_MACHINE_CHECK
-       ld      r11, VCPU_MSR(r9)
+1:     li      r10, BOOK3S_INTERRUPT_MACHINE_CHECK
        bl      kvmppc_msr_interrupt
 2:     b       fast_interrupt_c_return
 
@@ -2339,14 +2464,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
        /* hypervisor doorbell */
 3:     li      r12, BOOK3S_INTERRUPT_H_DOORBELL
+
+       /*
+        * Clear the doorbell as we will invoke the handler
+        * explicitly in the guest exit path.
+        */
+       lis     r6, (PPC_DBELL_SERVER << (63-36))@h
+       PPC_MSGCLR(6)
        /* see if it's a host IPI */
        li      r3, 1
        lbz     r0, HSTATE_HOST_IPI(r13)
        cmpwi   r0, 0
        bnelr
-       /* if not, clear it and return -1 */
-       lis     r6, (PPC_DBELL_SERVER << (63-36))@h
-       PPC_MSGCLR(6)
+       /* if not, return -1 */
        li      r3, -1
        blr