Merge "KVM: x86: support using the vmx preemption timer for tsc deadline timer"
authorJiang, Yunhong <yunhong.jiang@intel.com>
Thu, 21 Jul 2016 17:36:18 +0000 (17:36 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Thu, 21 Jul 2016 17:36:18 +0000 (17:36 +0000)
kernel/arch/x86/include/asm/kvm_host.h
kernel/arch/x86/kvm/lapic.c
kernel/arch/x86/kvm/lapic.h
kernel/arch/x86/kvm/trace.h
kernel/arch/x86/kvm/x86.c

index 30cfd64..21637c9 100644 (file)
@@ -911,6 +911,9 @@ struct kvm_x86_ops {
        void (*post_block)(struct kvm_vcpu *vcpu);
        int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
                              uint32_t guest_irq, bool set);
+
+       int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
+       void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
 };
 
 struct kvm_arch_async_pf {
index 1e1e7eb..701f912 100644 (file)
@@ -1288,6 +1288,68 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
        local_irq_restore(flags);
 }
 
+bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
+
+void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
+{
+       struct kvm_lapic *apic = vcpu->arch.apic;
+
+       WARN_ON(!apic->lapic_timer.hv_timer_in_use);
+       WARN_ON(swait_active(&vcpu->wq));
+       kvm_x86_ops->cancel_hv_timer(vcpu);
+       apic->lapic_timer.hv_timer_in_use = false;
+       apic_timer_expired(apic);
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
+
+void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
+{
+       struct kvm_lapic *apic = vcpu->arch.apic;
+
+       WARN_ON(apic->lapic_timer.hv_timer_in_use);
+
+       if (apic_lvtt_tscdeadline(apic) &&
+           !atomic_read(&apic->lapic_timer.pending)) {
+               u64 tscdeadline = apic->lapic_timer.tscdeadline;
+
+               if (!kvm_x86_ops->set_hv_timer(vcpu, tscdeadline)) {
+                       apic->lapic_timer.hv_timer_in_use = true;
+                       hrtimer_cancel(&apic->lapic_timer.timer);
+
+                       /* In case the sw timer triggered in the window */
+                       if (atomic_read(&apic->lapic_timer.pending)) {
+                               apic->lapic_timer.hv_timer_in_use = false;
+                               kvm_x86_ops->cancel_hv_timer(apic->vcpu);
+                       }
+               }
+               trace_kvm_hv_timer_state(vcpu->vcpu_id,
+                               apic->lapic_timer.hv_timer_in_use);
+       }
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
+
+void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
+{
+       struct kvm_lapic *apic = vcpu->arch.apic;
+
+       /* Possibly the TSC deadline timer is not enabled yet */
+       if (!apic->lapic_timer.hv_timer_in_use)
+               return;
+
+       kvm_x86_ops->cancel_hv_timer(vcpu);
+       apic->lapic_timer.hv_timer_in_use = false;
+
+       if (atomic_read(&apic->lapic_timer.pending))
+               return;
+
+       start_sw_tscdeadline(apic);
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
+
 static void start_apic_timer(struct kvm_lapic *apic)
 {
        ktime_t now;
@@ -1334,7 +1396,16 @@ static void start_apic_timer(struct kvm_lapic *apic)
                           ktime_to_ns(ktime_add_ns(now,
                                        apic->lapic_timer.period)));
        } else if (apic_lvtt_tscdeadline(apic)) {
-               start_sw_tscdeadline(apic);
+               /* lapic timer in tsc deadline mode */
+               u64 tscdeadline = apic->lapic_timer.tscdeadline;
+
+               if (kvm_x86_ops->set_hv_timer &&
+                   !kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
+                       apic->lapic_timer.hv_timer_in_use = true;
+                       trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
+                                       apic->lapic_timer.hv_timer_in_use);
+               } else
+                       start_sw_tscdeadline(apic);
        }
 }
 
index fde8e35..640ad27 100644 (file)
@@ -16,6 +16,7 @@ struct kvm_timer {
        u64 tscdeadline;
        u64 expired_tscdeadline;
        atomic_t pending;                       /* accumulated triggered timers */
+       bool hv_timer_in_use;
 };
 
 struct kvm_lapic {
@@ -170,4 +171,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu);
 
 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
                        struct kvm_vcpu **dest_vcpu);
+void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu);
+void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu);
+void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
+bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
 #endif
index ab9ae67..b41f7a0 100644 (file)
@@ -1025,6 +1025,21 @@ TRACE_EVENT(kvm_pi_irte_update,
                  __entry->pi_desc_addr)
 );
 
+TRACE_EVENT(kvm_hv_timer_state,
+               TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
+               TP_ARGS(vcpu_id, hv_timer_in_use),
+               TP_STRUCT__entry(
+                       __field(unsigned int, vcpu_id)
+                       __field(unsigned int, hv_timer_in_use)
+                       ),
+               TP_fast_assign(
+                       __entry->vcpu_id = vcpu_id;
+                       __entry->hv_timer_in_use = hv_timer_in_use;
+                       ),
+               TP_printk("vcpu_id %x hv_timer %x\n",
+                       __entry->vcpu_id,
+                       __entry->hv_timer_in_use)
+);
 #endif /* _TRACE_KVM_H */
 
 #undef TRACE_INCLUDE_PATH
index 27419ba..e3e1a8c 100644 (file)
@@ -2718,6 +2718,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                                rdtsc() - vcpu->arch.last_host_tsc;
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
+
+               if (kvm_lapic_hv_timer_in_use(vcpu) &&
+                               kvm_x86_ops->set_hv_timer(vcpu,
+                                       kvm_get_lapic_tscdeadline_msr(vcpu)))
+                       kvm_lapic_switch_to_sw_timer(vcpu);
                if (check_tsc_unstable()) {
                        u64 offset = kvm_compute_tsc_offset(vcpu,
                                                vcpu->arch.last_guest_tsc);