Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / arch / x86 / kvm / vmx.c
index a722f72..8d744a8 100644 (file)
@@ -415,6 +415,7 @@ struct nested_vmx {
        struct list_head vmcs02_pool;
        int vmcs02_num;
        u64 vmcs01_tsc_offset;
+       bool change_vmcs01_virtual_x2apic_mode;
        /* L2 must run next, and mustn't decide to exit to L1. */
        bool nested_run_pending;
        /*
@@ -1308,10 +1309,10 @@ static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
        return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
 }
 
-static inline bool is_exception(u32 intr_info)
+static inline bool is_nmi(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
-               == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
+               == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
 }
 
 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
@@ -2699,8 +2700,15 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
        } else
                vmx->nested.nested_vmx_ept_caps = 0;
 
+       /*
+        * Old versions of KVM use the single-context version without
+        * checking for support, so declare that it is supported even
+        * though it is treated as global context.  The alternative is
+        * not failing the single-context invvpid, and it is worse.
+        */
        if (enable_vpid)
                vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
+                               VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
                                VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
        else
                vmx->nested.nested_vmx_vpid_caps = 0;
@@ -5016,8 +5024,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
                vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
 
        cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
-       vmx_set_cr0(vcpu, cr0); /* enter rmode */
        vmx->vcpu.arch.cr0 = cr0;
+       vmx_set_cr0(vcpu, cr0); /* enter rmode */
        vmx_set_cr4(vcpu, 0);
        vmx_set_efer(vcpu, 0);
        vmx_fpu_activate(vcpu);
@@ -5295,7 +5303,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
        if (is_machine_check(intr_info))
                return handle_machine_check(vcpu);
 
-       if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
+       if (is_nmi(intr_info))
                return 1;  /* already handled by vmx_vcpu_run() */
 
        if (is_no_device(intr_info)) {
@@ -6652,7 +6660,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
 
        /* Checks for #GP/#SS exceptions. */
        exn = false;
-       if (is_protmode(vcpu)) {
+       if (is_long_mode(vcpu)) {
+               /* Long mode: #GP(0)/#SS(0) if the memory address is in a
+                * non-canonical form. This is the only check on the memory
+                * destination for long mode!
+                */
+               exn = is_noncanonical_address(*ret);
+       } else if (is_protmode(vcpu)) {
                /* Protected mode: apply checks for segment validity in the
                 * following order:
                 * - segment type check (#GP(0) may be thrown)
@@ -6669,17 +6683,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
                         * execute-only code segment
                         */
                        exn = ((s.type & 0xa) == 8);
-       }
-       if (exn) {
-               kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
-               return 1;
-       }
-       if (is_long_mode(vcpu)) {
-               /* Long mode: #GP(0)/#SS(0) if the memory address is in a
-                * non-canonical form. This is an only check for long mode.
-                */
-               exn = is_noncanonical_address(*ret);
-       } else if (is_protmode(vcpu)) {
+               if (exn) {
+                       kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+                       return 1;
+               }
                /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
                 */
                exn = (s.unusable != 0);
@@ -7420,6 +7427,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
        if (!(types & (1UL << type))) {
                nested_vmx_failValid(vcpu,
                                VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+               skip_emulated_instruction(vcpu);
                return 1;
        }
 
@@ -7478,6 +7486,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
        if (!(types & (1UL << type))) {
                nested_vmx_failValid(vcpu,
                        VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+               skip_emulated_instruction(vcpu);
                return 1;
        }
 
@@ -7494,12 +7503,17 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
        }
 
        switch (type) {
+       case VMX_VPID_EXTENT_SINGLE_CONTEXT:
+               /*
+                * Old versions of KVM use the single-context version so we
+                * have to support it; just treat it the same as all-context.
+                */
        case VMX_VPID_EXTENT_ALL_CONTEXT:
                __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
                nested_vmx_succeed(vcpu);
                break;
        default:
-               /* Trap single context invalidation invvpid calls */
+               /* Trap individual address invalidation invvpid calls */
                BUG_ON(1);
                break;
        }
@@ -7795,7 +7809,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
 
        switch (exit_reason) {
        case EXIT_REASON_EXCEPTION_NMI:
-               if (!is_exception(intr_info))
+               if (is_nmi(intr_info))
                        return false;
                else if (is_page_fault(intr_info))
                        return enable_ept;
@@ -8200,6 +8214,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
        if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
                        (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
                        exit_reason != EXIT_REASON_EPT_VIOLATION &&
+                       exit_reason != EXIT_REASON_PML_FULL &&
                        exit_reason != EXIT_REASON_TASK_SWITCH)) {
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
@@ -8259,6 +8274,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
 {
        u32 sec_exec_control;
 
+       /* Postpone execution until vmcs01 is the current VMCS. */
+       if (is_guest_mode(vcpu)) {
+               to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
+               return;
+       }
+
        /*
         * There is not point to enable virtualize x2apic without enable
         * apicv
@@ -8397,8 +8418,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
                kvm_machine_check();
 
        /* We need to handle NMIs before interrupts are enabled */
-       if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
-           (exit_intr_info & INTR_INFO_VALID_MASK)) {
+       if (is_nmi(exit_intr_info)) {
                kvm_before_handle_nmi(&vmx->vcpu);
                asm("int $2");
                kvm_after_handle_nmi(&vmx->vcpu);
@@ -8834,6 +8854,22 @@ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
        put_cpu();
 }
 
+/*
+ * Ensure that the current vmcs of the logical processor is the
+ * vmcs01 of the vcpu before calling free_nested().
+ */
+static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       int r;
+
+       r = vcpu_load(vcpu);
+       BUG_ON(r);
+       vmx_load_vmcs01(vcpu);
+       free_nested(vmx);
+       vcpu_put(vcpu);
+}
+
 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -8842,8 +8878,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
                vmx_destroy_pml_buffer(vmx);
        free_vpid(vmx->vpid);
        leave_guest_mode(vcpu);
-       vmx_load_vmcs01(vcpu);
-       free_nested(vmx);
+       vmx_free_vcpu_nested(vcpu);
        free_loaded_vmcs(vmx->loaded_vmcs);
        kfree(vmx->guest_msrs);
        kvm_vcpu_uninit(vcpu);
@@ -10576,6 +10611,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
                vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
                              PIN_BASED_VMX_PREEMPTION_TIMER);
 
+       if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
+               vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
+               vmx_set_virtual_x2apic_mode(vcpu,
+                               vcpu->arch.apic_base & X2APIC_ENABLE);
+       }
+
        /* This is needed for same reason as it was needed in prepare_vmcs02 */
        vmx->host_rsp = 0;