1 #include "qemu/osdep.h"
2 #include "qapi/error.h"
3 #include "sysemu/sysemu.h"
5 #include "helper_regs.h"
6 #include "hw/ppc/spapr.h"
7 #include "mmu-hash64.h"
8 #include "cpu-models.h"
19 static void do_spr_sync(void *arg)
21 struct SPRSyncState *s = arg;
22 PowerPCCPU *cpu = POWERPC_CPU(s->cs);
23 CPUPPCState *env = &cpu->env;
25 cpu_synchronize_state(s->cs);
26 env->spr[s->spr] &= ~s->mask;
27 env->spr[s->spr] |= s->value;
30 static void set_spr(CPUState *cs, int spr, target_ulong value,
33 struct SPRSyncState s = {
39 run_on_cpu(cs, do_spr_sync, &s);
42 static bool has_spr(PowerPCCPU *cpu, int spr)
44 /* We can test whether the SPR is defined by checking for a valid name */
45 return cpu->env.spr_cb[spr].name != NULL;
48 static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
51 * hash value/pteg group index is normalized by htab_mask
53 if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) {
59 static bool is_ram_address(sPAPRMachineState *spapr, hwaddr addr)
61 MachineState *machine = MACHINE(spapr);
62 MemoryHotplugState *hpms = &spapr->hotplug_memory;
64 if (addr < machine->ram_size) {
67 if ((addr >= hpms->base)
68 && ((addr - hpms->base) < memory_region_size(&hpms->mr))) {
75 static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
76 target_ulong opcode, target_ulong *args)
78 CPUPPCState *env = &cpu->env;
79 target_ulong flags = args[0];
80 target_ulong pte_index = args[1];
81 target_ulong pteh = args[2];
82 target_ulong ptel = args[3];
83 unsigned apshift, spshift;
88 apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, &spshift);
90 /* Bad page size encoding */
94 raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
96 if (is_ram_address(spapr, raddr)) {
97 /* Regular RAM - should have WIMG=0010 */
98 if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
102 /* Looks like an IO address */
103 /* FIXME: What WIMG combinations could be sensible for IO?
104 * For now we allow WIMG=010x, but are there others? */
105 /* FIXME: Should we check against registered IO addresses? */
106 if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) {
113 if (!valid_pte_index(env, pte_index)) {
118 if (likely((flags & H_EXACT) == 0)) {
120 token = ppc_hash64_start_access(cpu, pte_index);
121 for (; index < 8; index++) {
122 if (!(ppc_hash64_load_hpte0(cpu, token, index) & HPTE64_V_VALID)) {
126 ppc_hash64_stop_access(cpu, token);
131 token = ppc_hash64_start_access(cpu, pte_index);
132 if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) {
133 ppc_hash64_stop_access(cpu, token);
136 ppc_hash64_stop_access(cpu, token);
139 ppc_hash64_store_hpte(cpu, pte_index + index,
140 pteh | HPTE64_V_HPTE_DIRTY, ptel);
142 args[0] = pte_index + index;
148 REMOVE_NOT_FOUND = 1,
153 static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
156 target_ulong *vp, target_ulong *rp)
158 CPUPPCState *env = &cpu->env;
162 if (!valid_pte_index(env, ptex)) {
166 token = ppc_hash64_start_access(cpu, ptex);
167 v = ppc_hash64_load_hpte0(cpu, token, 0);
168 r = ppc_hash64_load_hpte1(cpu, token, 0);
169 ppc_hash64_stop_access(cpu, token);
171 if ((v & HPTE64_V_VALID) == 0 ||
172 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
173 ((flags & H_ANDCOND) && (v & avpn) != 0)) {
174 return REMOVE_NOT_FOUND;
178 ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
179 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
180 return REMOVE_SUCCESS;
183 static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
184 target_ulong opcode, target_ulong *args)
186 target_ulong flags = args[0];
187 target_ulong pte_index = args[1];
188 target_ulong avpn = args[2];
191 ret = remove_hpte(cpu, pte_index, avpn, flags,
198 case REMOVE_NOT_FOUND:
208 g_assert_not_reached();
211 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
212 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
213 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
214 #define H_BULK_REMOVE_END 0xc000000000000000ULL
215 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
216 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
217 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
218 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
219 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
220 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
221 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
222 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
223 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
224 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
225 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
227 #define H_BULK_REMOVE_MAX_BATCH 4
229 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
230 target_ulong opcode, target_ulong *args)
234 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
235 target_ulong *tsh = &args[i*2];
236 target_ulong tsl = args[i*2 + 1];
237 target_ulong v, r, ret;
239 if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
241 } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
245 *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
246 *tsh |= H_BULK_REMOVE_RESPONSE;
248 if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
249 *tsh |= H_BULK_REMOVE_PARM;
253 ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
254 (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
261 *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
275 static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
276 target_ulong opcode, target_ulong *args)
278 CPUPPCState *env = &cpu->env;
279 target_ulong flags = args[0];
280 target_ulong pte_index = args[1];
281 target_ulong avpn = args[2];
285 if (!valid_pte_index(env, pte_index)) {
289 token = ppc_hash64_start_access(cpu, pte_index);
290 v = ppc_hash64_load_hpte0(cpu, token, 0);
291 r = ppc_hash64_load_hpte1(cpu, token, 0);
292 ppc_hash64_stop_access(cpu, token);
294 if ((v & HPTE64_V_VALID) == 0 ||
295 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
299 r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
300 HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
301 r |= (flags << 55) & HPTE64_R_PP0;
302 r |= (flags << 48) & HPTE64_R_KEY_HI;
303 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
304 ppc_hash64_store_hpte(cpu, pte_index,
305 (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
306 ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r);
307 /* Don't need a memory barrier, due to qemu's global lock */
308 ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
312 static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr,
313 target_ulong opcode, target_ulong *args)
315 CPUPPCState *env = &cpu->env;
316 target_ulong flags = args[0];
317 target_ulong pte_index = args[1];
319 int i, ridx, n_entries = 1;
321 if (!valid_pte_index(env, pte_index)) {
325 if (flags & H_READ_4) {
326 /* Clear the two low order bits */
327 pte_index &= ~(3ULL);
331 hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
333 for (i = 0, ridx = 0; i < n_entries; i++) {
334 args[ridx++] = ldq_p(hpte);
335 args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
336 hpte += HASH_PTE_SIZE_64;
342 static target_ulong h_set_sprg0(PowerPCCPU *cpu, sPAPRMachineState *spapr,
343 target_ulong opcode, target_ulong *args)
345 cpu_synchronize_state(CPU(cpu));
346 cpu->env.spr[SPR_SPRG0] = args[0];
351 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
352 target_ulong opcode, target_ulong *args)
354 if (!has_spr(cpu, SPR_DABR)) {
355 return H_HARDWARE; /* DABR register not available */
357 cpu_synchronize_state(CPU(cpu));
359 if (has_spr(cpu, SPR_DABRX)) {
360 cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */
361 } else if (!(args[0] & 0x4)) { /* Breakpoint Translation set? */
362 return H_RESERVED_DABR;
365 cpu->env.spr[SPR_DABR] = args[0];
369 static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
370 target_ulong opcode, target_ulong *args)
372 target_ulong dabrx = args[1];
374 if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) {
378 if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0
379 || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) {
383 cpu_synchronize_state(CPU(cpu));
384 cpu->env.spr[SPR_DABRX] = dabrx;
385 cpu->env.spr[SPR_DABR] = args[0];
390 static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr,
391 target_ulong opcode, target_ulong *args)
393 target_ulong flags = args[0];
394 hwaddr dst = args[1];
395 hwaddr src = args[2];
396 hwaddr len = TARGET_PAGE_SIZE;
397 uint8_t *pdst, *psrc;
398 target_long ret = H_SUCCESS;
400 if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
401 | H_COPY_PAGE | H_ZERO_PAGE)) {
402 qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
407 /* Map-in destination */
408 if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
411 pdst = cpu_physical_memory_map(dst, &len, 1);
412 if (!pdst || len != TARGET_PAGE_SIZE) {
416 if (flags & H_COPY_PAGE) {
417 /* Map-in source, copy to destination, and unmap source again */
418 if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
422 psrc = cpu_physical_memory_map(src, &len, 0);
423 if (!psrc || len != TARGET_PAGE_SIZE) {
427 memcpy(pdst, psrc, len);
428 cpu_physical_memory_unmap(psrc, len, 0, len);
429 } else if (flags & H_ZERO_PAGE) {
430 memset(pdst, 0, len); /* Just clear the destination page */
433 if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
434 kvmppc_dcbst_range(cpu, pdst, len);
436 if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
438 kvmppc_icbi_range(cpu, pdst, len);
445 cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
449 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL
450 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL
451 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL
452 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL
453 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL
454 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
456 #define VPA_MIN_SIZE 640
457 #define VPA_SIZE_OFFSET 0x4
458 #define VPA_SHARED_PROC_OFFSET 0x9
459 #define VPA_SHARED_PROC_VAL 0x2
461 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
463 CPUState *cs = CPU(ppc_env_get_cpu(env));
468 hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
472 if (vpa % env->dcache_line_size) {
475 /* FIXME: bounds check the address */
477 size = lduw_be_phys(cs->as, vpa + 0x4);
479 if (size < VPA_MIN_SIZE) {
483 /* VPA is not allowed to cross a page boundary */
484 if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
490 tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
491 tmp |= VPA_SHARED_PROC_VAL;
492 stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
497 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
499 if (env->slb_shadow_addr) {
511 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
513 CPUState *cs = CPU(ppc_env_get_cpu(env));
517 hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
521 size = ldl_be_phys(cs->as, addr + 0x4);
526 if ((addr / 4096) != ((addr + size - 1) / 4096)) {
530 if (!env->vpa_addr) {
534 env->slb_shadow_addr = addr;
535 env->slb_shadow_size = size;
540 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
542 env->slb_shadow_addr = 0;
543 env->slb_shadow_size = 0;
547 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
549 CPUState *cs = CPU(ppc_env_get_cpu(env));
553 hcall_dprintf("Can't cope with DTL at logical 0\n");
557 size = ldl_be_phys(cs->as, addr + 0x4);
563 if (!env->vpa_addr) {
567 env->dtl_addr = addr;
568 env->dtl_size = size;
573 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
581 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr,
582 target_ulong opcode, target_ulong *args)
584 target_ulong flags = args[0];
585 target_ulong procno = args[1];
586 target_ulong vpa = args[2];
587 target_ulong ret = H_PARAMETER;
591 tcpu = ppc_get_vcpu_by_dt_id(procno);
598 case FLAGS_REGISTER_VPA:
599 ret = register_vpa(tenv, vpa);
602 case FLAGS_DEREGISTER_VPA:
603 ret = deregister_vpa(tenv, vpa);
606 case FLAGS_REGISTER_SLBSHADOW:
607 ret = register_slb_shadow(tenv, vpa);
610 case FLAGS_DEREGISTER_SLBSHADOW:
611 ret = deregister_slb_shadow(tenv, vpa);
614 case FLAGS_REGISTER_DTL:
615 ret = register_dtl(tenv, vpa);
618 case FLAGS_DEREGISTER_DTL:
619 ret = deregister_dtl(tenv, vpa);
626 static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr,
627 target_ulong opcode, target_ulong *args)
629 CPUPPCState *env = &cpu->env;
630 CPUState *cs = CPU(cpu);
632 env->msr |= (1ULL << MSR_EE);
633 hreg_compute_hflags(env);
634 if (!cpu_has_work(cs)) {
636 cs->exception_index = EXCP_HLT;
637 cs->exit_request = 1;
642 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr,
643 target_ulong opcode, target_ulong *args)
645 target_ulong rtas_r3 = args[0];
646 uint32_t token = rtas_ld(rtas_r3, 0);
647 uint32_t nargs = rtas_ld(rtas_r3, 1);
648 uint32_t nret = rtas_ld(rtas_r3, 2);
650 return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
651 nret, rtas_r3 + 12 + 4*nargs);
654 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr,
655 target_ulong opcode, target_ulong *args)
657 CPUState *cs = CPU(cpu);
658 target_ulong size = args[0];
659 target_ulong addr = args[1];
663 args[0] = ldub_phys(cs->as, addr);
666 args[0] = lduw_phys(cs->as, addr);
669 args[0] = ldl_phys(cs->as, addr);
672 args[0] = ldq_phys(cs->as, addr);
678 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr,
679 target_ulong opcode, target_ulong *args)
681 CPUState *cs = CPU(cpu);
683 target_ulong size = args[0];
684 target_ulong addr = args[1];
685 target_ulong val = args[2];
689 stb_phys(cs->as, addr, val);
692 stw_phys(cs->as, addr, val);
695 stl_phys(cs->as, addr, val);
698 stq_phys(cs->as, addr, val);
704 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr,
705 target_ulong opcode, target_ulong *args)
707 CPUState *cs = CPU(cpu);
709 target_ulong dst = args[0]; /* Destination address */
710 target_ulong src = args[1]; /* Source address */
711 target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
712 target_ulong count = args[3]; /* Element count */
713 target_ulong op = args[4]; /* 0 = copy, 1 = invert */
715 unsigned int mask = (1 << esize) - 1;
716 int step = 1 << esize;
718 if (count > 0x80000000) {
722 if ((dst & mask) || (src & mask) || (op > 1)) {
726 if (dst >= src && dst < (src + (count << esize))) {
727 dst = dst + ((count - 1) << esize);
728 src = src + ((count - 1) << esize);
735 tmp = ldub_phys(cs->as, src);
738 tmp = lduw_phys(cs->as, src);
741 tmp = ldl_phys(cs->as, src);
744 tmp = ldq_phys(cs->as, src);
754 stb_phys(cs->as, dst, tmp);
757 stw_phys(cs->as, dst, tmp);
760 stl_phys(cs->as, dst, tmp);
763 stq_phys(cs->as, dst, tmp);
773 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
774 target_ulong opcode, target_ulong *args)
776 /* Nothing to do on emulation, KVM will trap this in the kernel */
780 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr,
781 target_ulong opcode, target_ulong *args)
783 /* Nothing to do on emulation, KVM will trap this in the kernel */
787 static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
802 case H_SET_MODE_ENDIAN_BIG:
804 set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
806 spapr_pci_switch_vga(true);
809 case H_SET_MODE_ENDIAN_LITTLE:
811 set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
813 spapr_pci_switch_vga(false);
817 return H_UNSUPPORTED_FLAG;
820 static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
826 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
828 if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
838 if (mflags == AIL_RESERVED) {
839 return H_UNSUPPORTED_FLAG;
843 set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL);
849 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr,
850 target_ulong opcode, target_ulong *args)
852 target_ulong resource = args[1];
853 target_ulong ret = H_P2;
856 case H_SET_MODE_RESOURCE_LE:
857 ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
859 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
860 ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
869 * Return the offset to the requested option vector @vector in the
870 * option vector table @table.
872 static target_ulong cas_get_option_vector(int vector, target_ulong table)
875 char nr_vectors, nr_entries;
881 nr_vectors = (ldl_phys(&address_space_memory, table) >> 24) + 1;
882 if (!vector || vector > nr_vectors) {
885 table++; /* skip nr option vectors */
887 for (i = 0; i < vector - 1; i++) {
888 nr_entries = ldl_phys(&address_space_memory, table) >> 24;
889 table += nr_entries + 2;
896 uint32_t cpu_version;
900 static void do_set_compat(void *arg)
902 SetCompatState *s = arg;
904 cpu_synchronize_state(CPU(s->cpu));
905 ppc_set_compat(s->cpu, s->cpu_version, &s->err);
908 #define get_compat_level(cpuver) ( \
909 ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \
910 ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \
911 ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \
912 ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0)
914 #define OV5_DRCONF_MEMORY 0x20
916 static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
917 sPAPRMachineState *spapr,
921 target_ulong list = ppc64_phys_to_real(args[0]);
922 target_ulong ov_table, ov5;
923 PowerPCCPUClass *pcc_ = POWERPC_CPU_GET_CLASS(cpu_);
925 bool cpu_match = false, cpu_update = true, memory_update = false;
926 unsigned old_cpu_version = cpu_->cpu_version;
927 unsigned compat_lvl = 0, cpu_version = 0;
928 unsigned max_lvl = get_compat_level(cpu_->max_compat);
933 for (counter = 0; counter < 512; ++counter) {
934 uint32_t pvr, pvr_mask;
936 pvr_mask = ldl_be_phys(&address_space_memory, list);
938 pvr = ldl_be_phys(&address_space_memory, list);
941 trace_spapr_cas_pvr_try(pvr);
943 ((cpu_->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask))) {
946 } else if (pvr == cpu_->cpu_version) {
948 cpu_version = cpu_->cpu_version;
949 } else if (!cpu_match) {
950 /* If it is a logical PVR, try to determine the highest level */
951 unsigned lvl = get_compat_level(pvr);
953 bool is205 = (pcc_->pcr_mask & PCR_COMPAT_2_05) &&
954 (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05));
955 bool is206 = (pcc_->pcr_mask & PCR_COMPAT_2_06) &&
956 ((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) ||
957 (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS)));
959 if (is205 || is206) {
961 /* User did not set the level, choose the highest */
962 if (compat_lvl <= lvl) {
966 } else if (max_lvl >= lvl) {
967 /* User chose the level, don't set higher than this */
974 /* Terminator record */
975 if (~pvr_mask & pvr) {
980 /* Parsing finished */
981 trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match,
982 cpu_version, pcc_->pcr_mask);
985 if (old_cpu_version != cpu_version) {
988 .cpu = POWERPC_CPU(cs),
989 .cpu_version = cpu_version,
993 run_on_cpu(cs, do_set_compat, &s);
996 error_report_err(s.err);
1006 /* For the future use: here @ov_table points to the first option vector */
1009 ov5 = cas_get_option_vector(5, ov_table);
1014 /* @list now points to OV 5 */
1015 ov5_byte2 = ldub_phys(&address_space_memory, ov5 + 2);
1016 if (ov5_byte2 & OV5_DRCONF_MEMORY) {
1017 memory_update = true;
1020 if (spapr_h_cas_compose_response(spapr, args[1], args[2],
1021 cpu_update, memory_update)) {
1022 qemu_system_reset_request();
1028 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
1029 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
1031 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
1033 spapr_hcall_fn *slot;
1035 if (opcode <= MAX_HCALL_OPCODE) {
1036 assert((opcode & 0x3) == 0);
1038 slot = &papr_hypercall_table[opcode / 4];
1040 assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
1042 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1049 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
1052 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1054 if ((opcode <= MAX_HCALL_OPCODE)
1055 && ((opcode & 0x3) == 0)) {
1056 spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
1059 return fn(cpu, spapr, opcode, args);
1061 } else if ((opcode >= KVMPPC_HCALL_BASE) &&
1062 (opcode <= KVMPPC_HCALL_MAX)) {
1063 spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1066 return fn(cpu, spapr, opcode, args);
1070 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n",
1075 static void hypercall_register_types(void)
1078 spapr_register_hypercall(H_ENTER, h_enter);
1079 spapr_register_hypercall(H_REMOVE, h_remove);
1080 spapr_register_hypercall(H_PROTECT, h_protect);
1081 spapr_register_hypercall(H_READ, h_read);
1084 spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
1087 spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
1088 spapr_register_hypercall(H_CEDE, h_cede);
1090 /* processor register resource access h-calls */
1091 spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
1092 spapr_register_hypercall(H_SET_DABR, h_set_dabr);
1093 spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
1094 spapr_register_hypercall(H_PAGE_INIT, h_page_init);
1095 spapr_register_hypercall(H_SET_MODE, h_set_mode);
1097 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
1098 * here between the "CI" and the "CACHE" variants, they will use whatever
1099 * mapping attributes qemu is using. When using KVM, the kernel will
1100 * enforce the attributes more strongly
1102 spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
1103 spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
1104 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
1105 spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
1106 spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
1107 spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
1108 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
1110 /* qemu/KVM-PPC specific hcalls */
1111 spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
1113 /* ibm,client-architecture-support support */
1114 spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
1117 type_init(hypercall_register_types)