2 * KVM Microsoft Hyper-V emulation
4 * derived from arch/x86/kvm/x86.c
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
17 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 * This work is licensed under the terms of the GNU GPL, version 2. See
20 * the COPYING file in the top-level directory.
28 #include <linux/kvm_host.h>
29 #include <trace/events/kvm.h>
33 static bool kvm_hv_msr_partition_wide(u32 msr)
38 case HV_X64_MSR_GUEST_OS_ID:
39 case HV_X64_MSR_HYPERCALL:
40 case HV_X64_MSR_REFERENCE_TSC:
41 case HV_X64_MSR_TIME_REF_COUNT:
42 case HV_X64_MSR_CRASH_CTL:
43 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
44 case HV_X64_MSR_RESET:
52 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
53 u32 index, u64 *pdata)
55 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
57 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
60 *pdata = hv->hv_crash_param[index];
64 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
66 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
68 *pdata = hv->hv_crash_ctl;
72 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
74 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
77 hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
79 if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
81 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
82 hv->hv_crash_param[0],
83 hv->hv_crash_param[1],
84 hv->hv_crash_param[2],
85 hv->hv_crash_param[3],
86 hv->hv_crash_param[4]);
88 /* Send notification about crash to user space */
89 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
95 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
98 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
100 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
103 hv->hv_crash_param[index] = data;
107 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
110 struct kvm *kvm = vcpu->kvm;
111 struct kvm_hv *hv = &kvm->arch.hyperv;
114 case HV_X64_MSR_GUEST_OS_ID:
115 hv->hv_guest_os_id = data;
116 /* setting guest os id to zero disables hypercall page */
117 if (!hv->hv_guest_os_id)
118 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
120 case HV_X64_MSR_HYPERCALL: {
125 /* if guest os id is not set hypercall should remain disabled */
126 if (!hv->hv_guest_os_id)
128 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
129 hv->hv_hypercall = data;
132 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
133 addr = gfn_to_hva(kvm, gfn);
134 if (kvm_is_error_hva(addr))
136 kvm_x86_ops->patch_hypercall(vcpu, instructions);
137 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
138 if (__copy_to_user((void __user *)addr, instructions, 4))
140 hv->hv_hypercall = data;
141 mark_page_dirty(kvm, gfn);
144 case HV_X64_MSR_REFERENCE_TSC: {
146 HV_REFERENCE_TSC_PAGE tsc_ref;
148 memset(&tsc_ref, 0, sizeof(tsc_ref));
149 hv->hv_tsc_page = data;
150 if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
152 gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
155 gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
156 &tsc_ref, sizeof(tsc_ref)))
158 mark_page_dirty(kvm, gfn);
161 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
162 return kvm_hv_msr_set_crash_data(vcpu,
163 msr - HV_X64_MSR_CRASH_P0,
165 case HV_X64_MSR_CRASH_CTL:
166 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
167 case HV_X64_MSR_RESET:
169 vcpu_debug(vcpu, "hyper-v reset requested\n");
170 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
174 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
181 /* Calculate cpu time spent by current task in 100ns units */
182 static u64 current_task_runtime_100ns(void)
184 cputime_t utime, stime;
186 task_cputime_adjusted(current, &utime, &stime);
187 return div_u64(cputime_to_nsecs(utime + stime), 100);
190 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
192 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
195 case HV_X64_MSR_APIC_ASSIST_PAGE: {
199 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
201 if (kvm_lapic_enable_pv_eoi(vcpu, 0))
205 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
206 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
207 if (kvm_is_error_hva(addr))
209 if (__clear_user((void __user *)addr, PAGE_SIZE))
212 kvm_vcpu_mark_page_dirty(vcpu, gfn);
213 if (kvm_lapic_enable_pv_eoi(vcpu,
214 gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
219 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
221 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
223 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
224 case HV_X64_MSR_VP_RUNTIME:
227 hv->runtime_offset = data - current_task_runtime_100ns();
230 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
238 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
241 struct kvm *kvm = vcpu->kvm;
242 struct kvm_hv *hv = &kvm->arch.hyperv;
245 case HV_X64_MSR_GUEST_OS_ID:
246 data = hv->hv_guest_os_id;
248 case HV_X64_MSR_HYPERCALL:
249 data = hv->hv_hypercall;
251 case HV_X64_MSR_TIME_REF_COUNT: {
253 div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
256 case HV_X64_MSR_REFERENCE_TSC:
257 data = hv->hv_tsc_page;
259 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
260 return kvm_hv_msr_get_crash_data(vcpu,
261 msr - HV_X64_MSR_CRASH_P0,
263 case HV_X64_MSR_CRASH_CTL:
264 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
265 case HV_X64_MSR_RESET:
269 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
277 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
280 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
283 case HV_X64_MSR_VP_INDEX: {
287 kvm_for_each_vcpu(r, v, vcpu->kvm) {
296 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
298 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
300 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
301 case HV_X64_MSR_APIC_ASSIST_PAGE:
304 case HV_X64_MSR_VP_RUNTIME:
305 data = current_task_runtime_100ns() + hv->runtime_offset;
308 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
315 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
317 if (kvm_hv_msr_partition_wide(msr)) {
320 mutex_lock(&vcpu->kvm->lock);
321 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
322 mutex_unlock(&vcpu->kvm->lock);
325 return kvm_hv_set_msr(vcpu, msr, data, host);
328 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
330 if (kvm_hv_msr_partition_wide(msr)) {
333 mutex_lock(&vcpu->kvm->lock);
334 r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
335 mutex_unlock(&vcpu->kvm->lock);
338 return kvm_hv_get_msr(vcpu, msr, pdata);
341 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
343 return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
346 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
348 u64 param, ingpa, outgpa, ret;
349 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
353 * hypercall generates UD from non zero cpl and real mode
356 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
357 kvm_queue_exception(vcpu, UD_VECTOR);
361 longmode = is_64_bit_mode(vcpu);
364 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
365 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
366 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
367 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
368 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
369 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
373 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
374 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
375 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
379 code = param & 0xffff;
380 fast = (param >> 16) & 0x1;
381 rep_cnt = (param >> 32) & 0xfff;
382 rep_idx = (param >> 48) & 0xfff;
384 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
387 case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
388 kvm_vcpu_on_spin(vcpu);
391 res = HV_STATUS_INVALID_HYPERCALL_CODE;
395 ret = res | (((u64)rep_done & 0xfff) << 32);
397 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
399 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
400 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);