2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h>
33 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
35 /* Values in HSTATE_NAPPING(r13) */
36 #define NAPPING_CEDE 1
37 #define NAPPING_NOVCPU 2
40 * Call kvmppc_hv_entry in real mode.
41 * Must be called with interrupts hard-disabled.
45 * LR = return address to continue at after eventually re-enabling MMU
47 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
49 std r0, PPC_LR_STKOFF(r1)
52 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
57 mtmsrd r0,1 /* clear RI in MSR */
63 ld r4, HSTATE_KVM_VCPU(r13)
66 /* Back from guest - restore host state and return to caller */
69 /* Restore host DABR and DABRX */
70 ld r5,HSTATE_DABR(r13)
74 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
77 ld r3,PACA_SPRG_VDSO(r13)
78 mtspr SPRN_SPRG_VDSO_WRITE,r3
80 /* Reload the host's PMU registers */
81 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
82 lbz r4, LPPACA_PMCINUSE(r3)
84 beq 23f /* skip if not */
86 ld r3, HSTATE_MMCR0(r13)
87 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
90 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
91 lwz r3, HSTATE_PMC1(r13)
92 lwz r4, HSTATE_PMC2(r13)
93 lwz r5, HSTATE_PMC3(r13)
94 lwz r6, HSTATE_PMC4(r13)
95 lwz r8, HSTATE_PMC5(r13)
96 lwz r9, HSTATE_PMC6(r13)
103 ld r3, HSTATE_MMCR0(r13)
104 ld r4, HSTATE_MMCR1(r13)
105 ld r5, HSTATE_MMCRA(r13)
106 ld r6, HSTATE_SIAR(r13)
107 ld r7, HSTATE_SDAR(r13)
113 ld r8, HSTATE_MMCR2(r13)
114 ld r9, HSTATE_SIER(r13)
117 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
123 * Reload DEC. HDEC interrupts were disabled when
124 * we reloaded the host's LPCR value.
126 ld r3, HSTATE_DECEXP(r13)
131 /* hwthread_req may have got set by cede or no vcpu, so clear it */
133 stb r0, HSTATE_HWTHREAD_REQ(r13)
136 * For external and machine check interrupts, we need
137 * to call the Linux handler to process the interrupt.
138 * We do that by jumping to absolute address 0x500 for
139 * external interrupts, or the machine_check_fwnmi label
140 * for machine checks (since firmware might have patched
141 * the vector area at 0x200). The [h]rfid at the end of the
142 * handler will return to the book3s_hv_interrupts.S code.
143 * For other interrupts we do the rfid to get back
144 * to the book3s_hv_interrupts.S code here.
146 ld r8, 112+PPC_LR_STKOFF(r1)
148 ld r7, HSTATE_HOST_MSR(r13)
150 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
151 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
153 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
154 beq 15f /* Invoke the H_DOORBELL handler */
155 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
156 beq cr2, 14f /* HMI check */
158 /* RFI into the highmem handler, or branch to interrupt handler */
162 mtmsrd r6, 1 /* Clear RI in MSR */
165 beq cr1, 13f /* machine check */
168 /* On POWER7, we have external interrupts set to use HSRR0/1 */
169 11: mtspr SPRN_HSRR0, r8
173 13: b machine_check_fwnmi
175 14: mtspr SPRN_HSRR0, r8
177 b hmi_exception_after_realmode
179 15: mtspr SPRN_HSRR0, r8
183 kvmppc_primary_no_guest:
184 /* We handle this much like a ceded vcpu */
185 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
189 * Make sure the primary has finished the MMU switch.
190 * We should never get here on a secondary thread, but
191 * check it for robustness' sake.
193 ld r5, HSTATE_KVM_VCORE(r13)
194 65: lbz r0, VCORE_IN_GUEST(r5)
201 /* set our bit in napping_threads */
202 ld r5, HSTATE_KVM_VCORE(r13)
203 lbz r7, HSTATE_PTID(r13)
206 addi r6, r5, VCORE_NAPPING_THREADS
211 /* order napping_threads update vs testing entry_exit_map */
214 lwz r7, VCORE_ENTRY_EXIT(r5)
216 bge kvm_novcpu_exit /* another thread already exiting */
217 li r3, NAPPING_NOVCPU
218 stb r3, HSTATE_NAPPING(r13)
220 li r3, 0 /* Don't wake on privileged (OS) doorbell */
224 ld r1, HSTATE_HOST_R1(r13)
225 ld r5, HSTATE_KVM_VCORE(r13)
227 stb r0, HSTATE_NAPPING(r13)
229 /* check the wake reason */
230 bl kvmppc_check_wake_reason
232 /* see if any other thread is already exiting */
233 lwz r0, VCORE_ENTRY_EXIT(r5)
237 /* clear our bit in napping_threads */
238 lbz r7, HSTATE_PTID(r13)
241 addi r6, r5, VCORE_NAPPING_THREADS
247 /* See if the wake reason means we need to exit */
251 /* See if our timeslice has expired (HDEC is negative) */
253 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
257 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
258 ld r4, HSTATE_KVM_VCPU(r13)
260 beq kvmppc_primary_no_guest
262 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
263 addi r3, r4, VCPU_TB_RMENTRY
264 bl kvmhv_start_timing
269 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
270 ld r4, HSTATE_KVM_VCPU(r13)
273 addi r3, r4, VCPU_TB_RMEXIT
274 bl kvmhv_accumulate_time
278 bl kvmhv_commence_exit
281 b kvmhv_switch_to_host
284 * We come in here when wakened from nap mode.
285 * Relocation is off and most register values are lost.
286 * r13 points to the PACA.
288 .globl kvm_start_guest
291 /* Set runlatch bit the minute you wake up from nap */
298 li r0,KVM_HWTHREAD_IN_KVM
299 stb r0,HSTATE_HWTHREAD_STATE(r13)
301 /* NV GPR values from power7_idle() will no longer be valid */
303 stb r0,PACA_NAPSTATELOST(r13)
305 /* were we napping due to cede? */
306 lbz r0,HSTATE_NAPPING(r13)
307 cmpwi r0,NAPPING_CEDE
309 cmpwi r0,NAPPING_NOVCPU
310 beq kvm_novcpu_wakeup
312 ld r1,PACAEMERGSP(r13)
313 subi r1,r1,STACK_FRAME_OVERHEAD
316 * We weren't napping due to cede, so this must be a secondary
317 * thread being woken up to run a guest, or being woken up due
318 * to a stray IPI. (Or due to some machine check or hypervisor
319 * maintenance interrupt while the core is in KVM.)
322 /* Check the wake reason in SRR1 to see why we got here */
323 bl kvmppc_check_wake_reason
327 /* get vcore pointer, NULL if we have nothing to run */
328 ld r5,HSTATE_KVM_VCORE(r13)
330 /* if we have no vcore to run, go back to sleep */
333 kvm_secondary_got_guest:
335 /* Set HSTATE_DSCR(r13) to something sensible */
336 ld r6, PACA_DSCR_DEFAULT(r13)
337 std r6, HSTATE_DSCR(r13)
339 /* On thread 0 of a subcore, set HDEC to max */
340 lbz r4, HSTATE_PTID(r13)
346 /* and set per-LPAR registers, if doing dynamic micro-threading */
347 ld r6, HSTATE_SPLIT_MODE(r13)
350 ld r0, KVM_SPLIT_RPR(r6)
352 ld r0, KVM_SPLIT_PMMAR(r6)
354 ld r0, KVM_SPLIT_LDBAR(r6)
358 /* Order load of vcpu after load of vcore */
360 ld r4, HSTATE_KVM_VCPU(r13)
363 /* Back from the guest, go back to nap */
364 /* Clear our vcpu and vcore pointers so we don't come back in early */
366 std r0, HSTATE_KVM_VCPU(r13)
368 * Once we clear HSTATE_KVM_VCORE(r13), the code in
369 * kvmppc_run_core() is going to assume that all our vcpu
370 * state is visible in memory. This lwsync makes sure
374 std r0, HSTATE_KVM_VCORE(r13)
377 * At this point we have finished executing in the guest.
378 * We need to wait for hwthread_req to become zero, since
379 * we may not turn on the MMU while hwthread_req is non-zero.
380 * While waiting we also need to check if we get given a vcpu to run.
383 lbz r3, HSTATE_HWTHREAD_REQ(r13)
387 li r0, KVM_HWTHREAD_IN_KERNEL
388 stb r0, HSTATE_HWTHREAD_STATE(r13)
389 /* need to recheck hwthread_req after a barrier, to avoid race */
391 lbz r3, HSTATE_HWTHREAD_REQ(r13)
395 * We jump to power7_wakeup_loss, which will return to the caller
396 * of power7_nap in the powernv cpu offline loop. The value we
397 * put in r3 becomes the return value for power7_nap.
401 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
407 ld r5, HSTATE_KVM_VCORE(r13)
410 ld r3, HSTATE_SPLIT_MODE(r13)
413 lbz r0, KVM_SPLIT_DO_NAP(r3)
419 b kvm_secondary_got_guest
421 54: li r0, KVM_HWTHREAD_IN_KVM
422 stb r0, HSTATE_HWTHREAD_STATE(r13)
426 * Here the primary thread is trying to return the core to
427 * whole-core mode, so we need to nap.
431 * Ensure that secondary doesn't nap when it has
432 * its vcore pointer set.
434 sync /* matches smp_mb() before setting split_info.do_nap */
435 ld r0, HSTATE_KVM_VCORE(r13)
438 /* clear any pending message */
440 lis r6, (PPC_DBELL_SERVER << (63-36))@h
442 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
443 /* Set kvm_split_mode.napped[tid] = 1 */
444 ld r3, HSTATE_SPLIT_MODE(r13)
446 lhz r4, PACAPACAINDEX(r13)
447 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
448 addi r4, r4, KVM_SPLIT_NAPPED
450 /* Check the do_nap flag again after setting napped[] */
452 lbz r0, KVM_SPLIT_DO_NAP(r3)
455 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
457 rlwimi r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
460 std r0, HSTATE_SCRATCH0(r13)
462 ld r0, HSTATE_SCRATCH0(r13)
472 /******************************************************************************
476 *****************************************************************************/
478 .global kvmppc_hv_entry
483 * R4 = vcpu pointer (or NULL)
488 * all other volatile GPRS = free
491 std r0, PPC_LR_STKOFF(r1)
494 /* Save R1 in the PACA */
495 std r1, HSTATE_HOST_R1(r13)
497 li r6, KVM_GUEST_MODE_HOST_HV
498 stb r6, HSTATE_IN_GUEST(r13)
500 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
501 /* Store initial timestamp */
504 addi r3, r4, VCPU_TB_RMENTRY
505 bl kvmhv_start_timing
515 * POWER7/POWER8 host -> guest partition switch code.
516 * We don't have to lock against concurrent tlbies,
517 * but we do have to coordinate across hardware threads.
519 /* Set bit in entry map iff exit map is zero. */
520 ld r5, HSTATE_KVM_VCORE(r13)
522 lbz r6, HSTATE_PTID(r13)
524 addi r9, r5, VCORE_ENTRY_EXIT
526 cmpwi r3, 0x100 /* any threads starting to exit? */
527 bge secondary_too_late /* if so we're too late to the party */
532 /* Primary thread switches to guest partition. */
533 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
538 li r0,LPID_RSVD /* switch to reserved LPID */
541 mtspr SPRN_SDR1,r6 /* switch to partition page table */
545 /* See if we need to flush the TLB */
546 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
547 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
548 srdi r6,r6,6 /* doubleword number */
549 sldi r6,r6,3 /* address offset */
551 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
557 23: ldarx r7,0,r6 /* if set, clear the bit */
561 /* Flush the TLB of any entries for this LPID */
562 /* use arch 2.07S as a proxy for POWER8 */
564 li r6,512 /* POWER8 has 512 sets */
566 li r6,128 /* POWER7 has 128 sets */
567 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
569 li r7,0x800 /* IS field = 0b10 */
576 /* Add timebase offset onto timebase */
577 22: ld r8,VCORE_TB_OFFSET(r5)
580 mftb r6 /* current host timebase */
582 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
583 mftb r7 /* check if lower 24 bits overflowed */
588 addis r8,r8,0x100 /* if so, increment upper 40 bits */
591 /* Load guest PCR value to select appropriate compat mode */
592 37: ld r7, VCORE_PCR(r5)
599 /* DPDES is shared between threads */
600 ld r8, VCORE_DPDES(r5)
602 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
605 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
607 /* Do we have a guest vcpu to run? */
609 beq kvmppc_primary_no_guest
612 /* Load up guest SLB entries */
613 lwz r5,VCPU_SLB_MAX(r4)
618 1: ld r8,VCPU_SLB_E(r6)
621 addi r6,r6,VCPU_SLB_SIZE
624 /* Increment yield count if they have a VPA */
628 li r6, LPPACA_YIELDCOUNT
633 stb r6, VCPU_VPA_DIRTY(r4)
636 /* Save purr/spurr */
639 std r5,HSTATE_PURR(r13)
640 std r6,HSTATE_SPURR(r13)
647 /* Set partition DABR */
648 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
649 lwz r5,VCPU_DABRX(r4)
654 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
656 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
659 END_FTR_SECTION_IFSET(CPU_FTR_TM)
662 /* Load guest PMU registers */
663 /* R4 is live here (vcpu pointer) */
665 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
666 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
670 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
673 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
674 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
675 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
676 lwz r6, VCPU_PMC + 8(r4)
677 lwz r7, VCPU_PMC + 12(r4)
678 lwz r8, VCPU_PMC + 16(r4)
679 lwz r9, VCPU_PMC + 20(r4)
687 ld r5, VCPU_MMCR + 8(r4)
688 ld r6, VCPU_MMCR + 16(r4)
696 ld r5, VCPU_MMCR + 24(r4)
698 lwz r7, VCPU_PMC + 24(r4)
699 lwz r8, VCPU_PMC + 28(r4)
700 ld r9, VCPU_MMCR + 32(r4)
706 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
710 /* Load up FP, VMX and VSX registers */
713 ld r14, VCPU_GPR(R14)(r4)
714 ld r15, VCPU_GPR(R15)(r4)
715 ld r16, VCPU_GPR(R16)(r4)
716 ld r17, VCPU_GPR(R17)(r4)
717 ld r18, VCPU_GPR(R18)(r4)
718 ld r19, VCPU_GPR(R19)(r4)
719 ld r20, VCPU_GPR(R20)(r4)
720 ld r21, VCPU_GPR(R21)(r4)
721 ld r22, VCPU_GPR(R22)(r4)
722 ld r23, VCPU_GPR(R23)(r4)
723 ld r24, VCPU_GPR(R24)(r4)
724 ld r25, VCPU_GPR(R25)(r4)
725 ld r26, VCPU_GPR(R26)(r4)
726 ld r27, VCPU_GPR(R27)(r4)
727 ld r28, VCPU_GPR(R28)(r4)
728 ld r29, VCPU_GPR(R29)(r4)
729 ld r30, VCPU_GPR(R30)(r4)
730 ld r31, VCPU_GPR(R31)(r4)
732 /* Switch DSCR to guest value */
737 /* Skip next section on POWER7 */
739 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
740 /* Load up POWER8-specific registers */
742 lwz r6, VCPU_PSPB(r4)
748 ld r6, VCPU_DAWRX(r4)
749 ld r7, VCPU_CIABR(r4)
759 ld r8, VCPU_EBBHR(r4)
761 ld r5, VCPU_EBBRR(r4)
762 ld r6, VCPU_BESCR(r4)
763 ld r7, VCPU_CSIGR(r4)
769 ld r5, VCPU_TCSCR(r4)
771 lwz r7, VCPU_GUEST_PID(r4)
780 * Set the decrementer to the guest decrementer.
782 ld r8,VCPU_DEC_EXPIRES(r4)
783 /* r8 is a host timebase value here, convert to guest TB */
784 ld r5,HSTATE_KVM_VCORE(r13)
785 ld r6,VCORE_TB_OFFSET(r5)
792 ld r5, VCPU_SPRG0(r4)
793 ld r6, VCPU_SPRG1(r4)
794 ld r7, VCPU_SPRG2(r4)
795 ld r8, VCPU_SPRG3(r4)
801 /* Load up DAR and DSISR */
803 lwz r6, VCPU_DSISR(r4)
807 /* Restore AMR and UAMOR, set AMOR to all 1s */
815 /* Restore state of CTRL run bit; assume 1 on entry */
823 /* Secondary threads wait for primary to have done partition switch */
824 ld r5, HSTATE_KVM_VCORE(r13)
825 lbz r6, HSTATE_PTID(r13)
828 lbz r0, VCORE_IN_GUEST(r5)
832 20: lwz r3, VCORE_ENTRY_EXIT(r5)
835 lbz r0, VCORE_IN_GUEST(r5)
845 /* Check if HDEC expires soon */
847 cmpwi r3, 512 /* 1 microsecond */
856 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
864 deliver_guest_interrupt:
865 /* r11 = vcpu->arch.msr & ~MSR_HV */
866 rldicl r11, r11, 63 - MSR_HV_LG, 1
867 rotldi r11, r11, 1 + MSR_HV_LG
870 /* Check if we can deliver an external or decrementer interrupt now */
871 ld r0, VCPU_PENDING_EXC(r4)
872 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
874 andi. r8, r11, MSR_EE
876 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
877 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
881 li r0, BOOK3S_INTERRUPT_EXTERNAL
885 li r0, BOOK3S_INTERRUPT_DECREMENTER
888 12: mtspr SPRN_SRR0, r10
892 bl kvmppc_msr_interrupt
898 * R10: value for HSRR0
899 * R11: value for HSRR1
904 stb r0,VCPU_CEDED(r4) /* cancel cede */
908 /* Activate guest mode, so faults get handled by KVM */
909 li r9, KVM_GUEST_MODE_GUEST_HV
910 stb r9, HSTATE_IN_GUEST(r13)
912 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
913 /* Accumulate timing */
914 addi r3, r4, VCPU_TB_GUEST
915 bl kvmhv_accumulate_time
923 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
926 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
933 ld r1, VCPU_GPR(R1)(r4)
934 ld r2, VCPU_GPR(R2)(r4)
935 ld r3, VCPU_GPR(R3)(r4)
936 ld r5, VCPU_GPR(R5)(r4)
937 ld r6, VCPU_GPR(R6)(r4)
938 ld r7, VCPU_GPR(R7)(r4)
939 ld r8, VCPU_GPR(R8)(r4)
940 ld r9, VCPU_GPR(R9)(r4)
941 ld r10, VCPU_GPR(R10)(r4)
942 ld r11, VCPU_GPR(R11)(r4)
943 ld r12, VCPU_GPR(R12)(r4)
944 ld r13, VCPU_GPR(R13)(r4)
948 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
949 ld r0, VCPU_GPR(R0)(r4)
950 ld r4, VCPU_GPR(R4)(r4)
959 stw r12, VCPU_TRAP(r4)
960 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
961 addi r3, r4, VCPU_TB_RMEXIT
962 bl kvmhv_accumulate_time
964 11: b kvmhv_switch_to_host
971 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
972 12: stw r12, VCPU_TRAP(r4)
974 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
975 addi r3, r4, VCPU_TB_RMEXIT
976 bl kvmhv_accumulate_time
980 /******************************************************************************
984 *****************************************************************************/
987 * We come here from the first-level interrupt handlers.
989 .globl kvmppc_interrupt_hv
993 * R12 = interrupt vector
995 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
996 * guest R13 saved in SPRN_SCRATCH0
998 std r9, HSTATE_SCRATCH2(r13)
1000 lbz r9, HSTATE_IN_GUEST(r13)
1001 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1002 beq kvmppc_bad_host_intr
1003 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1004 cmpwi r9, KVM_GUEST_MODE_GUEST
1005 ld r9, HSTATE_SCRATCH2(r13)
1006 beq kvmppc_interrupt_pr
1008 /* We're now back in the host but in guest MMU context */
1009 li r9, KVM_GUEST_MODE_HOST_HV
1010 stb r9, HSTATE_IN_GUEST(r13)
1012 ld r9, HSTATE_KVM_VCPU(r13)
1014 /* Save registers */
1016 std r0, VCPU_GPR(R0)(r9)
1017 std r1, VCPU_GPR(R1)(r9)
1018 std r2, VCPU_GPR(R2)(r9)
1019 std r3, VCPU_GPR(R3)(r9)
1020 std r4, VCPU_GPR(R4)(r9)
1021 std r5, VCPU_GPR(R5)(r9)
1022 std r6, VCPU_GPR(R6)(r9)
1023 std r7, VCPU_GPR(R7)(r9)
1024 std r8, VCPU_GPR(R8)(r9)
1025 ld r0, HSTATE_SCRATCH2(r13)
1026 std r0, VCPU_GPR(R9)(r9)
1027 std r10, VCPU_GPR(R10)(r9)
1028 std r11, VCPU_GPR(R11)(r9)
1029 ld r3, HSTATE_SCRATCH0(r13)
1030 lwz r4, HSTATE_SCRATCH1(r13)
1031 std r3, VCPU_GPR(R12)(r9)
1034 ld r3, HSTATE_CFAR(r13)
1035 std r3, VCPU_CFAR(r9)
1036 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1038 ld r4, HSTATE_PPR(r13)
1039 std r4, VCPU_PPR(r9)
1040 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1042 /* Restore R1/R2 so we can handle faults */
1043 ld r1, HSTATE_HOST_R1(r13)
1046 mfspr r10, SPRN_SRR0
1047 mfspr r11, SPRN_SRR1
1048 std r10, VCPU_SRR0(r9)
1049 std r11, VCPU_SRR1(r9)
1050 andi. r0, r12, 2 /* need to read HSRR0/1? */
1052 mfspr r10, SPRN_HSRR0
1053 mfspr r11, SPRN_HSRR1
1055 1: std r10, VCPU_PC(r9)
1056 std r11, VCPU_MSR(r9)
1060 std r3, VCPU_GPR(R13)(r9)
1063 stw r12,VCPU_TRAP(r9)
1065 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1066 addi r3, r9, VCPU_TB_RMINTR
1068 bl kvmhv_accumulate_time
1069 ld r5, VCPU_GPR(R5)(r9)
1070 ld r6, VCPU_GPR(R6)(r9)
1071 ld r7, VCPU_GPR(R7)(r9)
1072 ld r8, VCPU_GPR(R8)(r9)
1075 /* Save HEIR (HV emulation assist reg) in emul_inst
1076 if this is an HEI (HV emulation interrupt, e40) */
1077 li r3,KVM_INST_FETCH_FAILED
1078 stw r3,VCPU_LAST_INST(r9)
1079 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1082 11: stw r3,VCPU_HEIR(r9)
1084 /* these are volatile across C function calls */
1087 std r3, VCPU_CTR(r9)
1088 std r4, VCPU_XER(r9)
1090 /* If this is a page table miss then see if it's theirs or ours */
1091 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1093 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1096 /* See if this is a leftover HDEC interrupt */
1097 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1102 bge fast_guest_return
1104 /* See if this is an hcall we can handle in real mode */
1105 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1106 beq hcall_try_real_mode
1108 /* Hypervisor doorbell - exit only if host IPI flag set */
1109 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1111 lbz r0, HSTATE_HOST_IPI(r13)
1116 /* External interrupt ? */
1117 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1118 bne+ guest_exit_cont
1120 /* External interrupt, first check for host_ipi. If this is
1121 * set, we know the host wants us out so let's do it now
1127 /* Check if any CPU is heading out to the host, if so head out too */
1128 4: ld r5, HSTATE_KVM_VCORE(r13)
1129 lwz r0, VCORE_ENTRY_EXIT(r5)
1132 blt deliver_guest_interrupt
1134 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1135 /* Save more register state */
1138 std r6, VCPU_DAR(r9)
1139 stw r7, VCPU_DSISR(r9)
1140 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1141 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1143 std r6, VCPU_FAULT_DAR(r9)
1144 stw r7, VCPU_FAULT_DSISR(r9)
1146 /* See if it is a machine check */
1147 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1148 beq machine_check_realmode
1150 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1151 addi r3, r9, VCPU_TB_RMEXIT
1153 bl kvmhv_accumulate_time
1157 /* Increment exit count, poke other threads to exit */
1158 bl kvmhv_commence_exit
1160 ld r9, HSTATE_KVM_VCPU(r13)
1161 lwz r12, VCPU_TRAP(r9)
1163 /* Stop others sending VCPU interrupts to this physical CPU */
1165 stw r0, VCPU_CPU(r9)
1166 stw r0, VCPU_THREAD_CPU(r9)
1168 /* Save guest CTRL register, set runlatch to 1 */
1170 stw r6,VCPU_CTRL(r9)
1176 /* Read the guest SLB and save it away */
1177 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1183 andis. r0,r8,SLB_ESID_V@h
1185 add r8,r8,r6 /* put index in */
1187 std r8,VCPU_SLB_E(r7)
1188 std r3,VCPU_SLB_V(r7)
1189 addi r7,r7,VCPU_SLB_SIZE
1193 stw r5,VCPU_SLB_MAX(r9)
1196 * Save the guest PURR/SPURR
1201 ld r8,VCPU_SPURR(r9)
1202 std r5,VCPU_PURR(r9)
1203 std r6,VCPU_SPURR(r9)
1208 * Restore host PURR/SPURR and add guest times
1209 * so that the time in the guest gets accounted.
1211 ld r3,HSTATE_PURR(r13)
1212 ld r4,HSTATE_SPURR(r13)
1223 /* r5 is a guest timebase value here, convert to host TB */
1224 ld r3,HSTATE_KVM_VCORE(r13)
1225 ld r4,VCORE_TB_OFFSET(r3)
1227 std r5,VCPU_DEC_EXPIRES(r9)
1231 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1232 /* Save POWER8-specific registers */
1236 std r5, VCPU_IAMR(r9)
1237 stw r6, VCPU_PSPB(r9)
1238 std r7, VCPU_FSCR(r9)
1243 std r6, VCPU_VTB(r9)
1244 std r7, VCPU_TAR(r9)
1245 mfspr r8, SPRN_EBBHR
1246 std r8, VCPU_EBBHR(r9)
1247 mfspr r5, SPRN_EBBRR
1248 mfspr r6, SPRN_BESCR
1249 mfspr r7, SPRN_CSIGR
1251 std r5, VCPU_EBBRR(r9)
1252 std r6, VCPU_BESCR(r9)
1253 std r7, VCPU_CSIGR(r9)
1254 std r8, VCPU_TACR(r9)
1255 mfspr r5, SPRN_TCSCR
1259 std r5, VCPU_TCSCR(r9)
1260 std r6, VCPU_ACOP(r9)
1261 stw r7, VCPU_GUEST_PID(r9)
1262 std r8, VCPU_WORT(r9)
1264 * Restore various registers to 0, where non-zero values
1265 * set by the guest could disrupt the host.
1269 mtspr SPRN_CIABR, r0
1270 mtspr SPRN_DAWRX, r0
1271 mtspr SPRN_TCSCR, r0
1273 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1276 mtspr SPRN_MMCRS, r0
1279 /* Save and reset AMR and UAMOR before turning on the MMU */
1283 std r6,VCPU_UAMOR(r9)
1287 /* Switch DSCR back to host value */
1289 ld r7, HSTATE_DSCR(r13)
1290 std r8, VCPU_DSCR(r9)
1293 /* Save non-volatile GPRs */
1294 std r14, VCPU_GPR(R14)(r9)
1295 std r15, VCPU_GPR(R15)(r9)
1296 std r16, VCPU_GPR(R16)(r9)
1297 std r17, VCPU_GPR(R17)(r9)
1298 std r18, VCPU_GPR(R18)(r9)
1299 std r19, VCPU_GPR(R19)(r9)
1300 std r20, VCPU_GPR(R20)(r9)
1301 std r21, VCPU_GPR(R21)(r9)
1302 std r22, VCPU_GPR(R22)(r9)
1303 std r23, VCPU_GPR(R23)(r9)
1304 std r24, VCPU_GPR(R24)(r9)
1305 std r25, VCPU_GPR(R25)(r9)
1306 std r26, VCPU_GPR(R26)(r9)
1307 std r27, VCPU_GPR(R27)(r9)
1308 std r28, VCPU_GPR(R28)(r9)
1309 std r29, VCPU_GPR(R29)(r9)
1310 std r30, VCPU_GPR(R30)(r9)
1311 std r31, VCPU_GPR(R31)(r9)
1314 mfspr r3, SPRN_SPRG0
1315 mfspr r4, SPRN_SPRG1
1316 mfspr r5, SPRN_SPRG2
1317 mfspr r6, SPRN_SPRG3
1318 std r3, VCPU_SPRG0(r9)
1319 std r4, VCPU_SPRG1(r9)
1320 std r5, VCPU_SPRG2(r9)
1321 std r6, VCPU_SPRG3(r9)
1327 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1330 END_FTR_SECTION_IFSET(CPU_FTR_TM)
1333 /* Increment yield count if they have a VPA */
1334 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1337 li r4, LPPACA_YIELDCOUNT
1342 stb r3, VCPU_VPA_DIRTY(r9)
1344 /* Save PMU registers if requested */
1345 /* r8 and cr0.eq are live here */
1348 * POWER8 seems to have a hardware bug where setting
1349 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1350 * when some counters are already negative doesn't seem
1351 * to cause a performance monitor alert (and hence interrupt).
1352 * The effect of this is that when saving the PMU state,
1353 * if there is no PMU alert pending when we read MMCR0
1354 * before freezing the counters, but one becomes pending
1355 * before we read the counters, we lose it.
1356 * To work around this, we need a way to freeze the counters
1357 * before reading MMCR0. Normally, freezing the counters
1358 * is done by writing MMCR0 (to set MMCR0[FC]) which
1359 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1360 * we can also freeze the counters using MMCR2, by writing
1361 * 1s to all the counter freeze condition bits (there are
1362 * 9 bits each for 6 counters).
1364 li r3, -1 /* set all freeze bits */
1366 mfspr r10, SPRN_MMCR2
1367 mtspr SPRN_MMCR2, r3
1369 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1371 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1372 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1373 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1374 mfspr r6, SPRN_MMCRA
1375 /* Clear MMCRA in order to disable SDAR updates */
1377 mtspr SPRN_MMCRA, r7
1379 beq 21f /* if no VPA, save PMU stuff anyway */
1380 lbz r7, LPPACA_PMCINUSE(r8)
1381 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1383 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1385 21: mfspr r5, SPRN_MMCR1
1388 std r4, VCPU_MMCR(r9)
1389 std r5, VCPU_MMCR + 8(r9)
1390 std r6, VCPU_MMCR + 16(r9)
1392 std r10, VCPU_MMCR + 24(r9)
1393 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1394 std r7, VCPU_SIAR(r9)
1395 std r8, VCPU_SDAR(r9)
1402 stw r3, VCPU_PMC(r9)
1403 stw r4, VCPU_PMC + 4(r9)
1404 stw r5, VCPU_PMC + 8(r9)
1405 stw r6, VCPU_PMC + 12(r9)
1406 stw r7, VCPU_PMC + 16(r9)
1407 stw r8, VCPU_PMC + 20(r9)
1410 mfspr r6, SPRN_SPMC1
1411 mfspr r7, SPRN_SPMC2
1412 mfspr r8, SPRN_MMCRS
1413 std r5, VCPU_SIER(r9)
1414 stw r6, VCPU_PMC + 24(r9)
1415 stw r7, VCPU_PMC + 28(r9)
1416 std r8, VCPU_MMCR + 32(r9)
1418 mtspr SPRN_MMCRS, r4
1419 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1428 * POWER7/POWER8 guest -> host partition switch code.
1429 * We don't have to lock against tlbies but we do
1430 * have to coordinate the hardware threads.
1432 kvmhv_switch_to_host:
1433 /* Secondary threads wait for primary to do partition switch */
1434 ld r5,HSTATE_KVM_VCORE(r13)
1435 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1436 lbz r3,HSTATE_PTID(r13)
1440 13: lbz r3,VCORE_IN_GUEST(r5)
1446 /* Primary thread waits for all the secondaries to exit guest */
1447 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1448 rlwinm r0,r3,32-8,0xff
1454 /* Did we actually switch to the guest at all? */
1455 lbz r6, VCORE_IN_GUEST(r5)
1459 /* Primary thread switches back to host partition */
1460 ld r6,KVM_HOST_SDR1(r4)
1461 lwz r7,KVM_HOST_LPID(r4)
1462 li r8,LPID_RSVD /* switch to reserved LPID */
1465 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1470 /* DPDES is shared between threads */
1471 mfspr r7, SPRN_DPDES
1472 std r7, VCORE_DPDES(r5)
1473 /* clear DPDES so we don't get guest doorbells in the host */
1475 mtspr SPRN_DPDES, r8
1476 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1478 /* Subtract timebase offset from timebase */
1479 ld r8,VCORE_TB_OFFSET(r5)
1482 mftb r6 /* current guest timebase */
1484 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1485 mftb r7 /* check if lower 24 bits overflowed */
1490 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1494 17: ld r0, VCORE_PCR(r5)
1500 /* Signal secondary CPUs to continue */
1501 stb r0,VCORE_IN_GUEST(r5)
1502 19: lis r8,0x7fff /* MAX_INT@h */
1505 16: ld r8,KVM_HOST_LPCR(r4)
1509 /* load host SLB entries */
1510 ld r8,PACA_SLBSHADOWPTR(r13)
1512 .rept SLB_NUM_BOLTED
1513 li r3, SLBSHADOW_SAVEAREA
1517 andis. r7,r5,SLB_ESID_V@h
1523 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1524 /* Finish timing, if we have a vcpu */
1525 ld r4, HSTATE_KVM_VCPU(r13)
1529 bl kvmhv_accumulate_time
1532 /* Unset guest mode */
1533 li r0, KVM_GUEST_MODE_NONE
1534 stb r0, HSTATE_IN_GUEST(r13)
1536 ld r0, 112+PPC_LR_STKOFF(r1)
1542 * Check whether an HDSI is an HPTE not found fault or something else.
1543 * If it is an HPTE not found fault that is due to the guest accessing
1544 * a page that they have mapped but which we have paged out, then
1545 * we continue on with the guest exit path. In all other cases,
1546 * reflect the HDSI to the guest as a DSI.
1550 mfspr r6, SPRN_HDSISR
1551 /* HPTE not found fault or protection fault? */
1552 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1553 beq 1f /* if not, send it to the guest */
1554 andi. r0, r11, MSR_DR /* data relocation enabled? */
1557 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1558 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1559 bne 7f /* if no SLB entry found */
1560 4: std r4, VCPU_FAULT_DAR(r9)
1561 stw r6, VCPU_FAULT_DSISR(r9)
1563 /* Search the hash table. */
1564 mr r3, r9 /* vcpu pointer */
1565 li r7, 1 /* data fault */
1566 bl kvmppc_hpte_hv_fault
1567 ld r9, HSTATE_KVM_VCPU(r13)
1569 ld r11, VCPU_MSR(r9)
1570 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1571 cmpdi r3, 0 /* retry the instruction */
1573 cmpdi r3, -1 /* handle in kernel mode */
1575 cmpdi r3, -2 /* MMIO emulation; need instr word */
1578 /* Synthesize a DSI (or DSegI) for the guest */
1579 ld r4, VCPU_FAULT_DAR(r9)
1581 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
1582 mtspr SPRN_DSISR, r6
1583 7: mtspr SPRN_DAR, r4
1584 mtspr SPRN_SRR0, r10
1585 mtspr SPRN_SRR1, r11
1587 bl kvmppc_msr_interrupt
1588 fast_interrupt_c_return:
1589 6: ld r7, VCPU_CTR(r9)
1596 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1597 ld r5, KVM_VRMA_SLB_V(r5)
1600 /* If this is for emulated MMIO, load the instruction word */
1601 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1603 /* Set guest mode to 'jump over instruction' so if lwz faults
1604 * we'll just continue at the next IP. */
1605 li r0, KVM_GUEST_MODE_SKIP
1606 stb r0, HSTATE_IN_GUEST(r13)
1608 /* Do the access with MSR:DR enabled */
1610 ori r4, r3, MSR_DR /* Enable paging for data */
1615 /* Store the result */
1616 stw r8, VCPU_LAST_INST(r9)
1618 /* Unset guest mode. */
1619 li r0, KVM_GUEST_MODE_HOST_HV
1620 stb r0, HSTATE_IN_GUEST(r13)
1624 * Similarly for an HISI, reflect it to the guest as an ISI unless
1625 * it is an HPTE not found fault for a page that we have paged out.
1628 andis. r0, r11, SRR1_ISI_NOPT@h
1630 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1633 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1634 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
1635 bne 7f /* if no SLB entry found */
1637 /* Search the hash table. */
1638 mr r3, r9 /* vcpu pointer */
1641 li r7, 0 /* instruction fault */
1642 bl kvmppc_hpte_hv_fault
1643 ld r9, HSTATE_KVM_VCPU(r13)
1645 ld r11, VCPU_MSR(r9)
1646 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1647 cmpdi r3, 0 /* retry the instruction */
1648 beq fast_interrupt_c_return
1649 cmpdi r3, -1 /* handle in kernel mode */
1652 /* Synthesize an ISI (or ISegI) for the guest */
1654 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
1655 7: mtspr SPRN_SRR0, r10
1656 mtspr SPRN_SRR1, r11
1658 bl kvmppc_msr_interrupt
1659 b fast_interrupt_c_return
1661 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1662 ld r5, KVM_VRMA_SLB_V(r6)
1666 * Try to handle an hcall in real mode.
1667 * Returns to the guest if we handle it, or continues on up to
1668 * the kernel if we can't (i.e. if we don't have a handler for
1669 * it, or if the handler returns H_TOO_HARD).
1671 * r5 - r8 contain hcall args,
1672 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
1674 hcall_try_real_mode:
1675 ld r3,VCPU_GPR(R3)(r9)
1677 /* sc 1 from userspace - reflect to guest syscall */
1678 bne sc_1_fast_return
1680 cmpldi r3,hcall_real_table_end - hcall_real_table
1682 /* See if this hcall is enabled for in-kernel handling */
1684 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1685 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1687 ld r0, KVM_ENABLED_HCALLS(r4)
1688 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1692 /* Get pointer to handler, if any, and call it */
1693 LOAD_REG_ADDR(r4, hcall_real_table)
1699 mr r3,r9 /* get vcpu pointer */
1700 ld r4,VCPU_GPR(R4)(r9)
1703 beq hcall_real_fallback
1704 ld r4,HSTATE_KVM_VCPU(r13)
1705 std r3,VCPU_GPR(R3)(r4)
1713 li r10, BOOK3S_INTERRUPT_SYSCALL
1714 bl kvmppc_msr_interrupt
1718 /* We've attempted a real mode hcall, but it's punted it back
1719 * to userspace. We need to restore some clobbered volatiles
1720 * before resuming the pass-it-to-qemu path */
1721 hcall_real_fallback:
1722 li r12,BOOK3S_INTERRUPT_SYSCALL
1723 ld r9, HSTATE_KVM_VCPU(r13)
1727 .globl hcall_real_table
1729 .long 0 /* 0 - unused */
1730 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1731 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1732 .long DOTSYM(kvmppc_h_read) - hcall_real_table
1733 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
1734 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
1735 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1736 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1737 .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table
1738 .long 0 /* 0x24 - H_SET_SPRG0 */
1739 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1754 #ifdef CONFIG_KVM_XICS
1755 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1756 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1757 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1758 .long 0 /* 0x70 - H_IPOLL */
1759 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1761 .long 0 /* 0x64 - H_EOI */
1762 .long 0 /* 0x68 - H_CPPR */
1763 .long 0 /* 0x6c - H_IPI */
1764 .long 0 /* 0x70 - H_IPOLL */
1765 .long 0 /* 0x74 - H_XIRR */
1793 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
1794 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
1810 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
1814 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
1929 .long DOTSYM(kvmppc_h_random) - hcall_real_table
1930 .globl hcall_real_table_end
1931 hcall_real_table_end:
1933 _GLOBAL(kvmppc_h_set_xdabr)
1934 andi. r0, r5, DABRX_USER | DABRX_KERNEL
1936 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
1939 6: li r3, H_PARAMETER
1942 _GLOBAL(kvmppc_h_set_dabr)
1943 li r5, DABRX_USER | DABRX_KERNEL
1947 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1948 std r4,VCPU_DABR(r3)
1949 stw r5, VCPU_DABRX(r3)
1950 mtspr SPRN_DABRX, r5
1951 /* Work around P7 bug where DABR can get corrupted on mtspr */
1952 1: mtspr SPRN_DABR,r4
1960 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
1961 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
1962 rlwimi r5, r4, 2, DAWRX_WT
1964 std r4, VCPU_DAWR(r3)
1965 std r5, VCPU_DAWRX(r3)
1967 mtspr SPRN_DAWRX, r5
1971 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
1973 std r11,VCPU_MSR(r3)
1975 stb r0,VCPU_CEDED(r3)
1976 sync /* order setting ceded vs. testing prodded */
1977 lbz r5,VCPU_PRODDED(r3)
1979 bne kvm_cede_prodded
1980 li r12,0 /* set trap to 0 to say hcall is handled */
1981 stw r12,VCPU_TRAP(r3)
1983 std r0,VCPU_GPR(R3)(r3)
1986 * Set our bit in the bitmask of napping threads unless all the
1987 * other threads are already napping, in which case we send this
1990 ld r5,HSTATE_KVM_VCORE(r13)
1991 lbz r6,HSTATE_PTID(r13)
1992 lwz r8,VCORE_ENTRY_EXIT(r5)
1996 addi r6,r5,VCORE_NAPPING_THREADS
2003 /* order napping_threads update vs testing entry_exit_map */
2006 stb r0,HSTATE_NAPPING(r13)
2007 lwz r7,VCORE_ENTRY_EXIT(r5)
2009 bge 33f /* another thread already exiting */
2012 * Although not specifically required by the architecture, POWER7
2013 * preserves the following registers in nap mode, even if an SMT mode
2014 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2015 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2017 /* Save non-volatile GPRs */
2018 std r14, VCPU_GPR(R14)(r3)
2019 std r15, VCPU_GPR(R15)(r3)
2020 std r16, VCPU_GPR(R16)(r3)
2021 std r17, VCPU_GPR(R17)(r3)
2022 std r18, VCPU_GPR(R18)(r3)
2023 std r19, VCPU_GPR(R19)(r3)
2024 std r20, VCPU_GPR(R20)(r3)
2025 std r21, VCPU_GPR(R21)(r3)
2026 std r22, VCPU_GPR(R22)(r3)
2027 std r23, VCPU_GPR(R23)(r3)
2028 std r24, VCPU_GPR(R24)(r3)
2029 std r25, VCPU_GPR(R25)(r3)
2030 std r26, VCPU_GPR(R26)(r3)
2031 std r27, VCPU_GPR(R27)(r3)
2032 std r28, VCPU_GPR(R28)(r3)
2033 std r29, VCPU_GPR(R29)(r3)
2034 std r30, VCPU_GPR(R30)(r3)
2035 std r31, VCPU_GPR(R31)(r3)
2040 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2042 ld r9, HSTATE_KVM_VCPU(r13)
2044 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2048 * Set DEC to the smaller of DEC and HDEC, so that we wake
2049 * no later than the end of our timeslice (HDEC interrupts
2050 * don't wake us from nap).
2059 /* save expiry time of guest decrementer */
2062 ld r4, HSTATE_KVM_VCPU(r13)
2063 ld r5, HSTATE_KVM_VCORE(r13)
2064 ld r6, VCORE_TB_OFFSET(r5)
2065 subf r3, r6, r3 /* convert to host TB value */
2066 std r3, VCPU_DEC_EXPIRES(r4)
2068 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2069 ld r4, HSTATE_KVM_VCPU(r13)
2070 addi r3, r4, VCPU_TB_CEDE
2071 bl kvmhv_accumulate_time
2074 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2077 * Take a nap until a decrementer or external or doobell interrupt
2078 * occurs, with PECE1 and PECE0 set in LPCR.
2079 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2080 * Also clear the runlatch bit before napping.
2083 mfspr r0, SPRN_CTRLF
2085 mtspr SPRN_CTRLT, r0
2088 stb r0,HSTATE_HWTHREAD_REQ(r13)
2090 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2092 ori r5, r5, LPCR_PECEDH
2093 rlwimi r5, r3, 0, LPCR_PECEDP
2094 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2098 std r0, HSTATE_SCRATCH0(r13)
2100 ld r0, HSTATE_SCRATCH0(r13)
2112 /* get vcpu pointer */
2113 ld r4, HSTATE_KVM_VCPU(r13)
2115 /* Woken by external or decrementer interrupt */
2116 ld r1, HSTATE_HOST_R1(r13)
2118 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2119 addi r3, r4, VCPU_TB_RMINTR
2120 bl kvmhv_accumulate_time
2123 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2125 bl kvmppc_restore_tm
2126 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2129 /* load up FP state */
2132 /* Restore guest decrementer */
2133 ld r3, VCPU_DEC_EXPIRES(r4)
2134 ld r5, HSTATE_KVM_VCORE(r13)
2135 ld r6, VCORE_TB_OFFSET(r5)
2136 add r3, r3, r6 /* convert host TB to guest TB value */
2142 ld r14, VCPU_GPR(R14)(r4)
2143 ld r15, VCPU_GPR(R15)(r4)
2144 ld r16, VCPU_GPR(R16)(r4)
2145 ld r17, VCPU_GPR(R17)(r4)
2146 ld r18, VCPU_GPR(R18)(r4)
2147 ld r19, VCPU_GPR(R19)(r4)
2148 ld r20, VCPU_GPR(R20)(r4)
2149 ld r21, VCPU_GPR(R21)(r4)
2150 ld r22, VCPU_GPR(R22)(r4)
2151 ld r23, VCPU_GPR(R23)(r4)
2152 ld r24, VCPU_GPR(R24)(r4)
2153 ld r25, VCPU_GPR(R25)(r4)
2154 ld r26, VCPU_GPR(R26)(r4)
2155 ld r27, VCPU_GPR(R27)(r4)
2156 ld r28, VCPU_GPR(R28)(r4)
2157 ld r29, VCPU_GPR(R29)(r4)
2158 ld r30, VCPU_GPR(R30)(r4)
2159 ld r31, VCPU_GPR(R31)(r4)
2161 /* Check the wake reason in SRR1 to see why we got here */
2162 bl kvmppc_check_wake_reason
2164 /* clear our bit in vcore->napping_threads */
2165 34: ld r5,HSTATE_KVM_VCORE(r13)
2166 lbz r7,HSTATE_PTID(r13)
2169 addi r6,r5,VCORE_NAPPING_THREADS
2175 stb r0,HSTATE_NAPPING(r13)
2177 /* See if the wake reason means we need to exit */
2178 stw r12, VCPU_TRAP(r4)
2183 /* see if any other thread is already exiting */
2184 lwz r0,VCORE_ENTRY_EXIT(r5)
2188 b kvmppc_cede_reentry /* if not go back to guest */
2190 /* cede when already previously prodded case */
2193 stb r0,VCPU_PRODDED(r3)
2194 sync /* order testing prodded vs. clearing ceded */
2195 stb r0,VCPU_CEDED(r3)
2199 /* we've ceded but we want to give control to the host */
2201 ld r9, HSTATE_KVM_VCPU(r13)
2204 /* Try to handle a machine check in real mode */
2205 machine_check_realmode:
2206 mr r3, r9 /* get vcpu pointer */
2207 bl kvmppc_realmode_machine_check
2209 ld r9, HSTATE_KVM_VCPU(r13)
2210 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2212 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2213 * machine check interrupt (set HSRR0 to 0x200). And for handled
2214 * errors (no-fatal), just go back to guest execution with current
2215 * HSRR0 instead of exiting guest. This new approach will inject
2216 * machine check to guest for fatal error causing guest to crash.
2218 * The old code used to return to host for unhandled errors which
2219 * was causing guest to hang with soft lockups inside guest and
2220 * makes it difficult to recover guest instance.
2222 * if we receive machine check with MSR(RI=0) then deliver it to
2223 * guest as machine check causing guest to crash.
2225 ld r11, VCPU_MSR(r9)
2226 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2227 beq 1f /* Deliver a machine check to guest */
2229 cmpdi r3, 0 /* Did we handle MCE ? */
2230 bne 2f /* Continue guest execution. */
2231 /* If not, deliver a machine check. SRR0/1 are already set */
2232 1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2233 bl kvmppc_msr_interrupt
2234 2: b fast_interrupt_c_return
2237 * Check the reason we woke from nap, and take appropriate action.
2239 * 0 if nothing needs to be done
2240 * 1 if something happened that needs to be handled by the host
2241 * -1 if there was a guest wakeup (IPI or msgsnd)
2243 * Also sets r12 to the interrupt vector for any interrupt that needs
2244 * to be handled now by the host (0x500 for external interrupt), or zero.
2245 * Modifies r0, r6, r7, r8.
2247 kvmppc_check_wake_reason:
2250 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2252 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2253 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2254 cmpwi r6, 8 /* was it an external interrupt? */
2255 li r12, BOOK3S_INTERRUPT_EXTERNAL
2256 beq kvmppc_read_intr /* if so, see what it was */
2259 cmpwi r6, 6 /* was it the decrementer? */
2262 cmpwi r6, 5 /* privileged doorbell? */
2264 cmpwi r6, 3 /* hypervisor doorbell? */
2266 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2267 li r3, 1 /* anything else, return 1 */
2270 /* hypervisor doorbell */
2271 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2274 * Clear the doorbell as we will invoke the handler
2275 * explicitly in the guest exit path.
2277 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2279 /* see if it's a host IPI */
2281 lbz r0, HSTATE_HOST_IPI(r13)
2284 /* if not, return -1 */
2289 * Determine what sort of external interrupt is pending (if any).
2291 * 0 if no interrupt is pending
2292 * 1 if an interrupt is pending that needs to be handled by the host
2293 * -1 if there was a guest wakeup IPI (which has now been cleared)
2294 * Modifies r0, r6, r7, r8, returns value in r3.
2297 /* see if a host IPI is pending */
2299 lbz r0, HSTATE_HOST_IPI(r13)
2303 /* Now read the interrupt from the ICP */
2304 ld r6, HSTATE_XICS_PHYS(r13)
2310 * Save XIRR for later. Since we get in in reverse endian on LE
2311 * systems, save it byte reversed and fetch it back in host endian.
2313 li r3, HSTATE_SAVED_XIRR
2315 #ifdef __LITTLE_ENDIAN__
2316 lwz r3, HSTATE_SAVED_XIRR(r13)
2320 rlwinm. r3, r3, 0, 0xffffff
2322 beq 1f /* if nothing pending in the ICP */
2324 /* We found something in the ICP...
2326 * If it's not an IPI, stash it in the PACA and return to
2327 * the host, we don't (yet) handle directing real external
2328 * interrupts directly to the guest
2330 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
2333 /* It's an IPI, clear the MFRR and EOI it */
2336 stbcix r3, r6, r8 /* clear the IPI */
2337 stwcix r0, r6, r7 /* EOI it */
2340 /* We need to re-check host IPI now in case it got set in the
2341 * meantime. If it's clear, we bounce the interrupt to the
2344 lbz r0, HSTATE_HOST_IPI(r13)
2348 /* OK, it's an IPI for us */
2353 42: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
2354 * the PACA earlier, it will be picked up by the host ICP driver
2359 43: /* We raced with the host, we need to resend that IPI, bummer */
2361 stbcix r0, r6, r8 /* set the IPI */
2367 * Save away FP, VMX and VSX registers.
2369 * N.B. r30 and r31 are volatile across this function,
2370 * thus it is not callable from C.
2377 #ifdef CONFIG_ALTIVEC
2379 oris r8,r8,MSR_VEC@h
2380 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2384 oris r8,r8,MSR_VSX@h
2385 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2388 addi r3,r3,VCPU_FPRS
2390 #ifdef CONFIG_ALTIVEC
2392 addi r3,r31,VCPU_VRS
2394 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2396 mfspr r6,SPRN_VRSAVE
2397 stw r6,VCPU_VRSAVE(r31)
2402 * Load up FP, VMX and VSX registers
2404 * N.B. r30 and r31 are volatile across this function,
2405 * thus it is not callable from C.
2412 #ifdef CONFIG_ALTIVEC
2414 oris r8,r8,MSR_VEC@h
2415 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2419 oris r8,r8,MSR_VSX@h
2420 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2423 addi r3,r4,VCPU_FPRS
2425 #ifdef CONFIG_ALTIVEC
2427 addi r3,r31,VCPU_VRS
2429 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2431 lwz r7,VCPU_VRSAVE(r31)
2432 mtspr SPRN_VRSAVE,r7
2437 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2439 * Save transactional state and TM-related registers.
2440 * Called with r9 pointing to the vcpu struct.
2441 * This can modify all checkpointed registers, but
2442 * restores r1, r2 and r9 (vcpu pointer) before exit.
2446 std r0, PPC_LR_STKOFF(r1)
2451 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2455 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2456 beq 1f /* TM not active in guest. */
2458 std r1, HSTATE_HOST_R1(r13)
2459 li r3, TM_CAUSE_KVM_RESCHED
2461 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2465 /* All GPRs are volatile at this point. */
2468 /* Temporarily store r13 and r9 so we have some regs to play with */
2471 std r9, PACATMSCRATCH(r13)
2472 ld r9, HSTATE_KVM_VCPU(r13)
2474 /* Get a few more GPRs free. */
2475 std r29, VCPU_GPRS_TM(29)(r9)
2476 std r30, VCPU_GPRS_TM(30)(r9)
2477 std r31, VCPU_GPRS_TM(31)(r9)
2479 /* Save away PPR and DSCR soon so don't run with user values. */
2482 mfspr r30, SPRN_DSCR
2483 ld r29, HSTATE_DSCR(r13)
2484 mtspr SPRN_DSCR, r29
2486 /* Save all but r9, r13 & r29-r31 */
2489 .if (reg != 9) && (reg != 13)
2490 std reg, VCPU_GPRS_TM(reg)(r9)
2494 /* ... now save r13 */
2496 std r4, VCPU_GPRS_TM(13)(r9)
2497 /* ... and save r9 */
2498 ld r4, PACATMSCRATCH(r13)
2499 std r4, VCPU_GPRS_TM(9)(r9)
2501 /* Reload stack pointer and TOC. */
2502 ld r1, HSTATE_HOST_R1(r13)
2505 /* Set MSR RI now we have r1 and r13 back. */
2509 /* Save away checkpinted SPRs. */
2510 std r31, VCPU_PPR_TM(r9)
2511 std r30, VCPU_DSCR_TM(r9)
2518 std r5, VCPU_LR_TM(r9)
2519 stw r6, VCPU_CR_TM(r9)
2520 std r7, VCPU_CTR_TM(r9)
2521 std r8, VCPU_AMR_TM(r9)
2522 std r10, VCPU_TAR_TM(r9)
2523 std r11, VCPU_XER_TM(r9)
2525 /* Restore r12 as trap number. */
2526 lwz r12, VCPU_TRAP(r9)
2529 addi r3, r9, VCPU_FPRS_TM
2531 addi r3, r9, VCPU_VRS_TM
2533 mfspr r6, SPRN_VRSAVE
2534 stw r6, VCPU_VRSAVE_TM(r9)
2537 * We need to save these SPRs after the treclaim so that the software
2538 * error code is recorded correctly in the TEXASR. Also the user may
2539 * change these outside of a transaction, so they must always be
2542 mfspr r5, SPRN_TFHAR
2543 mfspr r6, SPRN_TFIAR
2544 mfspr r7, SPRN_TEXASR
2545 std r5, VCPU_TFHAR(r9)
2546 std r6, VCPU_TFIAR(r9)
2547 std r7, VCPU_TEXASR(r9)
2549 ld r0, PPC_LR_STKOFF(r1)
2554 * Restore transactional state and TM-related registers.
2555 * Called with r4 pointing to the vcpu struct.
2556 * This potentially modifies all checkpointed registers.
2557 * It restores r1, r2, r4 from the PACA.
2561 std r0, PPC_LR_STKOFF(r1)
2563 /* Turn on TM/FP/VSX/VMX so we can restore them. */
2569 oris r5, r5, (MSR_VEC | MSR_VSX)@h
2573 * The user may change these outside of a transaction, so they must
2574 * always be context switched.
2576 ld r5, VCPU_TFHAR(r4)
2577 ld r6, VCPU_TFIAR(r4)
2578 ld r7, VCPU_TEXASR(r4)
2579 mtspr SPRN_TFHAR, r5
2580 mtspr SPRN_TFIAR, r6
2581 mtspr SPRN_TEXASR, r7
2584 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2585 beqlr /* TM not active in guest */
2586 std r1, HSTATE_HOST_R1(r13)
2588 /* Make sure the failure summary is set, otherwise we'll program check
2589 * when we trechkpt. It's possible that this might have been not set
2590 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
2593 oris r7, r7, (TEXASR_FS)@h
2594 mtspr SPRN_TEXASR, r7
2597 * We need to load up the checkpointed state for the guest.
2598 * We need to do this early as it will blow away any GPRs, VSRs and
2603 addi r3, r31, VCPU_FPRS_TM
2605 addi r3, r31, VCPU_VRS_TM
2608 lwz r7, VCPU_VRSAVE_TM(r4)
2609 mtspr SPRN_VRSAVE, r7
2611 ld r5, VCPU_LR_TM(r4)
2612 lwz r6, VCPU_CR_TM(r4)
2613 ld r7, VCPU_CTR_TM(r4)
2614 ld r8, VCPU_AMR_TM(r4)
2615 ld r9, VCPU_TAR_TM(r4)
2616 ld r10, VCPU_XER_TM(r4)
2625 * Load up PPR and DSCR values but don't put them in the actual SPRs
2626 * till the last moment to avoid running with userspace PPR and DSCR for
2629 ld r29, VCPU_DSCR_TM(r4)
2630 ld r30, VCPU_PPR_TM(r4)
2632 std r2, PACATMSCRATCH(r13) /* Save TOC */
2634 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2638 /* Load GPRs r0-r28 */
2641 ld reg, VCPU_GPRS_TM(reg)(r31)
2645 mtspr SPRN_DSCR, r29
2648 /* Load final GPRs */
2649 ld 29, VCPU_GPRS_TM(29)(r31)
2650 ld 30, VCPU_GPRS_TM(30)(r31)
2651 ld 31, VCPU_GPRS_TM(31)(r31)
2653 /* TM checkpointed state is now setup. All GPRs are now volatile. */
2656 /* Now let's get back the state we need. */
2659 ld r29, HSTATE_DSCR(r13)
2660 mtspr SPRN_DSCR, r29
2661 ld r4, HSTATE_KVM_VCPU(r13)
2662 ld r1, HSTATE_HOST_R1(r13)
2663 ld r2, PACATMSCRATCH(r13)
2665 /* Set the MSR RI since we have our registers back. */
2669 ld r0, PPC_LR_STKOFF(r1)
2675 * We come here if we get any exception or interrupt while we are
2676 * executing host real mode code while in guest MMU context.
2677 * For now just spin, but we should do something better.
2679 kvmppc_bad_host_intr:
2683 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2684 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2685 * r11 has the guest MSR value (in/out)
2686 * r9 has a vcpu pointer (in)
2687 * r0 is used as a scratch register
2689 kvmppc_msr_interrupt:
2690 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2691 cmpwi r0, 2 /* Check if we are in transactional state.. */
2692 ld r11, VCPU_INTR_MSR(r9)
2694 /* ... if transactional, change to suspended */
2696 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2700 * This works around a hardware bug on POWER8E processors, where
2701 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2702 * performance monitor interrupt. Instead, when we need to have
2703 * an interrupt pending, we have to arrange for a counter to overflow.
2707 mtspr SPRN_MMCR2, r3
2708 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2709 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2710 mtspr SPRN_MMCR0, r3
2717 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2719 * Start timing an activity
2720 * r3 = pointer to time accumulation struct, r4 = vcpu
2723 ld r5, HSTATE_KVM_VCORE(r13)
2724 lbz r6, VCORE_IN_GUEST(r5)
2726 beq 5f /* if in guest, need to */
2727 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2730 std r3, VCPU_CUR_ACTIVITY(r4)
2731 std r5, VCPU_ACTIVITY_START(r4)
2735 * Accumulate time to one activity and start another.
2736 * r3 = pointer to new time accumulation struct, r4 = vcpu
2738 kvmhv_accumulate_time:
2739 ld r5, HSTATE_KVM_VCORE(r13)
2740 lbz r8, VCORE_IN_GUEST(r5)
2742 beq 4f /* if in guest, need to */
2743 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2744 4: ld r5, VCPU_CUR_ACTIVITY(r4)
2745 ld r6, VCPU_ACTIVITY_START(r4)
2746 std r3, VCPU_CUR_ACTIVITY(r4)
2749 std r7, VCPU_ACTIVITY_START(r4)
2753 ld r8, TAS_SEQCOUNT(r5)
2756 std r8, TAS_SEQCOUNT(r5)
2758 ld r7, TAS_TOTAL(r5)
2760 std r7, TAS_TOTAL(r5)
2766 3: std r3, TAS_MIN(r5)
2772 std r8, TAS_SEQCOUNT(r5)