X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=qemu%2Ftarget-i386%2Fcpu.h;h=732eb6d7ec79d0eff30bb4286794fd1a256787ab;hb=437fd90c0250dee670290f9b714253671a990160;hp=ead28325bd677aabda83b9981f90434c96796c63;hpb=e44e3482bdb4d0ebde2d8b41830ac2cdb07948fb;p=kvmfornfv.git diff --git a/qemu/target-i386/cpu.h b/qemu/target-i386/cpu.h index ead28325b..732eb6d7e 100644 --- a/qemu/target-i386/cpu.h +++ b/qemu/target-i386/cpu.h @@ -19,8 +19,8 @@ #ifndef CPU_I386_H #define CPU_I386_H -#include "config.h" #include "qemu-common.h" +#include "standard-headers/asm-x86/hyperv.h" #ifdef TARGET_X86_64 #define TARGET_LONG_BITS 64 @@ -36,10 +36,10 @@ #define TARGET_HAS_PRECISE_SMC #ifdef TARGET_X86_64 -#define ELF_MACHINE EM_X86_64 +#define I386_ELF_MACHINE EM_X86_64 #define ELF_MACHINE_UNAME "x86_64" #else -#define ELF_MACHINE EM_386 +#define I386_ELF_MACHINE EM_386 #define ELF_MACHINE_UNAME "i686" #endif @@ -154,6 +154,9 @@ #define HF_SVMI_SHIFT 21 /* SVM intercepts are active */ #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ #define HF_SMAP_SHIFT 23 /* CR4.SMAP */ +#define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */ +#define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */ +#define HF_MPX_IU_SHIFT 26 /* BND registers in-use */ #define HF_CPL_MASK (3 << HF_CPL_SHIFT) #define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) @@ -177,6 +180,9 @@ #define HF_SVMI_MASK (1 << HF_SVMI_SHIFT) #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) +#define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT) +#define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT) +#define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT) /* hflags2 */ @@ -185,12 +191,14 @@ #define HF2_NMI_SHIFT 2 /* CPU serving NMI */ #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */ +#define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */ #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT) +#define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT) #define CR0_PE_SHIFT 0 #define CR0_MP_SHIFT 1 @@ -224,6 +232,7 @@ #define CR4_OSXSAVE_MASK (1U << 18) #define CR4_SMEP_MASK (1U << 20) #define CR4_SMAP_MASK (1U << 21) +#define CR4_PKE_MASK (1U << 22) #define DR6_BD (1 << 13) #define DR6_BS (1 << 14) @@ -234,6 +243,7 @@ #define DR7_TYPE_SHIFT 16 #define DR7_LEN_SHIFT 18 #define DR7_FIXED_1 0x00000400 +#define DR7_GLOBAL_BP_MASK 0xaa #define DR7_LOCAL_BP_MASK 0x55 #define DR7_MAX_BP 4 #define DR7_TYPE_BP_INST 0x0 @@ -251,6 +261,7 @@ #define PG_PSE_BIT 7 #define PG_GLOBAL_BIT 8 #define PG_PSE_PAT_BIT 12 +#define PG_PKRU_BIT 59 #define PG_NX_BIT 63 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) @@ -266,7 +277,8 @@ #define PG_ADDRESS_MASK 0x000ffffffffff000LL #define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK) #define PG_HI_USER_MASK 0x7ff0000000000000LL -#define PG_NX_MASK (1LL << PG_NX_BIT) +#define PG_PKRU_MASK (15ULL << PG_PKRU_BIT) +#define PG_NX_MASK (1ULL << PG_NX_BIT) #define PG_ERROR_W_BIT 1 @@ -275,6 +287,7 @@ #define PG_ERROR_U_MASK 0x04 #define PG_ERROR_RSVD_MASK 0x08 #define PG_ERROR_I_D_MASK 0x10 +#define PG_ERROR_PK_MASK 0x20 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ @@ -282,6 +295,8 @@ #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) #define MCE_BANKS_DEF 10 +#define MCG_CAP_BANKS_MASK 0xff + #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ @@ -393,21 +408,32 @@ #define MSR_IA32_BNDCFGS 0x00000d90 #define MSR_IA32_XSS 0x00000da0 -#define XSTATE_FP (1ULL << 0) -#define XSTATE_SSE (1ULL << 1) -#define XSTATE_YMM (1ULL << 2) -#define XSTATE_BNDREGS (1ULL << 3) -#define XSTATE_BNDCSR (1ULL << 4) -#define XSTATE_OPMASK (1ULL << 5) -#define XSTATE_ZMM_Hi256 (1ULL << 6) -#define XSTATE_Hi16_ZMM (1ULL << 7) - +#define XSTATE_FP_BIT 0 +#define XSTATE_SSE_BIT 1 +#define XSTATE_YMM_BIT 2 +#define XSTATE_BNDREGS_BIT 3 +#define XSTATE_BNDCSR_BIT 4 +#define XSTATE_OPMASK_BIT 5 +#define XSTATE_ZMM_Hi256_BIT 6 +#define XSTATE_Hi16_ZMM_BIT 7 +#define XSTATE_PKRU_BIT 9 + +#define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT) +#define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT) +#define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT) +#define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT) +#define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT) +#define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT) +#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) +#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) +#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) /* CPUID feature words */ typedef enum FeatureWord { FEAT_1_EDX, /* CPUID[1].EDX */ FEAT_1_ECX, /* CPUID[1].ECX */ FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ + FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */ FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ @@ -572,10 +598,16 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_EBX_RDSEED (1U << 18) #define CPUID_7_0_EBX_ADX (1U << 19) #define CPUID_7_0_EBX_SMAP (1U << 20) +#define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Persistent Commit */ +#define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */ +#define CPUID_7_0_EBX_CLWB (1U << 24) /* Cache Line Write Back */ #define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */ #define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */ #define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */ +#define CPUID_7_0_ECX_PKU (1U << 3) +#define CPUID_7_0_ECX_OSPKE (1U << 4) + #define CPUID_XSAVE_XSAVEOPT (1U << 0) #define CPUID_XSAVE_XSAVEC (1U << 1) #define CPUID_XSAVE_XGETBV1 (1U << 2) @@ -716,22 +748,18 @@ typedef struct SegmentCache { uint32_t flags; } SegmentCache; -typedef union { - uint8_t _b[64]; - uint16_t _w[32]; - uint32_t _l[16]; - uint64_t _q[8]; - float32 _s[16]; - float64 _d[8]; -} XMMReg; /* really zmm */ +#define MMREG_UNION(n, bits) \ + union n { \ + uint8_t _b_##n[(bits)/8]; \ + uint16_t _w_##n[(bits)/16]; \ + uint32_t _l_##n[(bits)/32]; \ + uint64_t _q_##n[(bits)/64]; \ + float32 _s_##n[(bits)/32]; \ + float64 _d_##n[(bits)/64]; \ + } -typedef union { - uint8_t _b[8]; - uint16_t _w[4]; - uint32_t _l[2]; - float32 _s[2]; - uint64_t q; -} MMXReg; +typedef MMREG_UNION(ZMMReg, 512) ZMMReg; +typedef MMREG_UNION(MMXReg, 64) MMXReg; typedef struct BNDReg { uint64_t lb; @@ -743,32 +771,36 @@ typedef struct BNDCSReg { uint64_t sts; } BNDCSReg; +#define BNDCFG_ENABLE 1ULL +#define BNDCFG_BNDPRESERVE 2ULL +#define BNDCFG_BDIR_MASK TARGET_PAGE_MASK + #ifdef HOST_WORDS_BIGENDIAN -#define XMM_B(n) _b[63 - (n)] -#define XMM_W(n) _w[31 - (n)] -#define XMM_L(n) _l[15 - (n)] -#define XMM_S(n) _s[15 - (n)] -#define XMM_Q(n) _q[7 - (n)] -#define XMM_D(n) _d[7 - (n)] - -#define MMX_B(n) _b[7 - (n)] -#define MMX_W(n) _w[3 - (n)] -#define MMX_L(n) _l[1 - (n)] -#define MMX_S(n) _s[1 - (n)] +#define ZMM_B(n) _b_ZMMReg[63 - (n)] +#define ZMM_W(n) _w_ZMMReg[31 - (n)] +#define ZMM_L(n) _l_ZMMReg[15 - (n)] +#define ZMM_S(n) _s_ZMMReg[15 - (n)] +#define ZMM_Q(n) _q_ZMMReg[7 - (n)] +#define ZMM_D(n) _d_ZMMReg[7 - (n)] + +#define MMX_B(n) _b_MMXReg[7 - (n)] +#define MMX_W(n) _w_MMXReg[3 - (n)] +#define MMX_L(n) _l_MMXReg[1 - (n)] +#define MMX_S(n) _s_MMXReg[1 - (n)] #else -#define XMM_B(n) _b[n] -#define XMM_W(n) _w[n] -#define XMM_L(n) _l[n] -#define XMM_S(n) _s[n] -#define XMM_Q(n) _q[n] -#define XMM_D(n) _d[n] - -#define MMX_B(n) _b[n] -#define MMX_W(n) _w[n] -#define MMX_L(n) _l[n] -#define MMX_S(n) _s[n] +#define ZMM_B(n) _b_ZMMReg[n] +#define ZMM_W(n) _w_ZMMReg[n] +#define ZMM_L(n) _l_ZMMReg[n] +#define ZMM_S(n) _s_ZMMReg[n] +#define ZMM_Q(n) _q_ZMMReg[n] +#define ZMM_D(n) _d_ZMMReg[n] + +#define MMX_B(n) _b_MMXReg[n] +#define MMX_W(n) _w_MMXReg[n] +#define MMX_L(n) _l_MMXReg[n] +#define MMX_S(n) _s_MMXReg[n] #endif -#define MMX_Q(n) q +#define MMX_Q(n) _q_MMXReg[n] typedef union { floatx80 d __attribute__((aligned(16))); @@ -793,6 +825,7 @@ typedef struct { #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) #define NB_MMU_MODES 3 +#define TARGET_INSN_START_EXTRA_WORDS 1 #define NB_OPMASK_REGS 8 @@ -832,6 +865,7 @@ typedef struct CPUX86State { BNDReg bnd_regs[4]; BNDCSReg bndcs_regs; uint64_t msr_bndcfgs; + uint64_t efer; /* Beginning of state preserved by INIT (dummy marker). */ struct {} start_init_save; @@ -854,8 +888,8 @@ typedef struct CPUX86State { float_status mmx_status; /* for 3DNow! float ops */ float_status sse_status; uint32_t mxcsr; - XMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32]; - XMMReg xmm_t0; + ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32]; + ZMMReg xmm_t0; MMXReg mmx_t0; uint64_t opmask_regs[NB_OPMASK_REGS]; @@ -864,7 +898,6 @@ typedef struct CPUX86State { uint32_t sysenter_cs; target_ulong sysenter_esp; target_ulong sysenter_eip; - uint64_t efer; uint64_t star; uint64_t vm_hsave; @@ -908,12 +941,21 @@ typedef struct CPUX86State { uint64_t msr_hv_guest_os_id; uint64_t msr_hv_vapic; uint64_t msr_hv_tsc; + uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS]; + uint64_t msr_hv_runtime; + uint64_t msr_hv_synic_control; + uint64_t msr_hv_synic_version; + uint64_t msr_hv_synic_evt_page; + uint64_t msr_hv_synic_msg_page; + uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT]; + uint64_t msr_hv_stimer_config[HV_SYNIC_STIMER_COUNT]; + uint64_t msr_hv_stimer_count[HV_SYNIC_STIMER_COUNT]; /* exception/interrupt handling */ int error_code; int exception_is_int; target_ulong exception_next_eip; - target_ulong dr[8]; /* debug registers */ + target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */ union { struct CPUBreakpoint *cpu_breakpoint[4]; struct CPUWatchpoint *cpu_watchpoint[4]; @@ -963,6 +1005,7 @@ typedef struct CPUX86State { uint32_t sipi_vector; bool tsc_valid; int64_t tsc_khz; + int64_t user_tsc_khz; /* for sanity check only */ void *kvm_xsave_buf; uint64_t mcg_cap; @@ -980,6 +1023,8 @@ typedef struct CPUX86State { uint64_t xcr0; uint64_t xss; + uint32_t pkru; + TPRAccess tpr_access_type; } CPUX86State; @@ -1098,7 +1143,14 @@ void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); int cpu_x86_signal_handler(int host_signum, void *pinfo, void *puc); -/* cpuid.c */ +/* cpu.c */ +typedef struct ExtSaveArea { + uint32_t feature, bits; + uint32_t offset, size; +} ExtSaveArea; + +extern const ExtSaveArea x86_ext_save_areas[]; + void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); @@ -1123,42 +1175,13 @@ void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val); void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val); #endif -static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index) -{ - return (dr7 >> (index * 2)) & 1; -} - -static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index) -{ - return (dr7 >> (index * 2)) & 2; - -} -static inline bool hw_breakpoint_enabled(unsigned long dr7, int index) -{ - return hw_global_breakpoint_enabled(dr7, index) || - hw_local_breakpoint_enabled(dr7, index); -} - -static inline int hw_breakpoint_type(unsigned long dr7, int index) -{ - return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3; -} - -static inline int hw_breakpoint_len(unsigned long dr7, int index) -{ - int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3); - return (len == 2) ? 8 : len + 1; -} - -void hw_breakpoint_insert(CPUX86State *env, int index); -void hw_breakpoint_remove(CPUX86State *env, int index); -bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update); void breakpoint_handler(CPUState *cs); /* will be suppressed */ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); +void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7); /* hw/pc.c */ uint64_t cpu_get_tsc(CPUX86State *env); @@ -1187,7 +1210,6 @@ uint64_t cpu_get_tsc(CPUX86State *env); #define cpu_init(cpu_model) CPU(cpu_x86_init(cpu_model)) #define cpu_exec cpu_x86_exec -#define cpu_gen_code cpu_x86_gen_code #define cpu_signal_handler cpu_x86_signal_handler #define cpu_list x86_cpu_list #define cpudef_setup x86_cpudef_setup @@ -1199,7 +1221,7 @@ uint64_t cpu_get_tsc(CPUX86State *env); #define MMU_KSMAP_IDX 0 #define MMU_USER_IDX 1 #define MMU_KNOSMAP_IDX 2 -static inline int cpu_mmu_index(CPUX86State *env) +static inline int cpu_mmu_index(CPUX86State *env, bool ifetch) { return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK)) @@ -1235,7 +1257,7 @@ static inline target_long lshift(target_long x, int n) #define ST1 ST(1) /* translate.c */ -void optimize_flags_init(void); +void tcg_x86_init(void); #include "exec/cpu-all.h" #include "svm.h" @@ -1267,8 +1289,12 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, /* excp_helper.c */ void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index); +void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index, + uintptr_t retaddr); void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, int error_code); +void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index, + int error_code, uintptr_t retaddr); void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int, int error_code, int next_eip_addend); @@ -1318,6 +1344,9 @@ static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env) void cpu_set_mxcsr(CPUX86State *env, uint32_t val); void cpu_set_fpuc(CPUX86State *env, uint16_t val); +/* mem_helper.c */ +void helper_lock_init(void); + /* svm_helper.c */ void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, uint64_t param); @@ -1332,12 +1361,18 @@ void cpu_smm_update(X86CPU *cpu); void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); -void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w, - uint32_t feat_add, uint32_t feat_remove); - -void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features); -void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features); +/* Change the value of a KVM-specific default + * + * If value is NULL, no default will be set and the original + * value from the CPU model table will be kept. + * + * It is valid to call this funciton only for properties that + * are already present in the kvm_default_props table. + */ +void x86_cpu_change_kvm_default(const char *prop, const char *value); +/* mpx_helper.c */ +void cpu_sync_bndcs_hflags(CPUX86State *env); /* Return name of 32-bit register, from a R_* constant */ const char *get_register_name_32(unsigned int reg); @@ -1347,4 +1382,7 @@ void enable_compat_apic_id_mode(void); #define APIC_DEFAULT_ADDRESS 0xfee00000 #define APIC_SPACE_SIZE 0x100000 +void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f, + fprintf_function cpu_fprintf, int flags); + #endif /* CPU_I386_H */