Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / arch / x86 / kvm / cpuid.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  * cpuid support routines
4  *
5  * derived from arch/x86/kvm/x86.c
6  *
7  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8  * Copyright IBM Corporation, 2008
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2.  See
11  * the COPYING file in the top-level directory.
12  *
13  */
14
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
18 #include <linux/uaccess.h>
19 #include <asm/fpu/internal.h> /* For use_eager_fpu.  Ugh! */
20 #include <asm/user.h>
21 #include <asm/fpu/xstate.h>
22 #include "cpuid.h"
23 #include "lapic.h"
24 #include "mmu.h"
25 #include "trace.h"
26 #include "pmu.h"
27
28 static u32 xstate_required_size(u64 xstate_bv, bool compacted)
29 {
30         int feature_bit = 0;
31         u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
32
33         xstate_bv &= XFEATURE_MASK_EXTEND;
34         while (xstate_bv) {
35                 if (xstate_bv & 0x1) {
36                         u32 eax, ebx, ecx, edx, offset;
37                         cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
38                         offset = compacted ? ret : ebx;
39                         ret = max(ret, offset + eax);
40                 }
41
42                 xstate_bv >>= 1;
43                 feature_bit++;
44         }
45
46         return ret;
47 }
48
49 u64 kvm_supported_xcr0(void)
50 {
51         u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
52
53         if (!kvm_x86_ops->mpx_supported())
54                 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
55
56         return xcr0;
57 }
58
59 #define F(x) bit(X86_FEATURE_##x)
60
61 int kvm_update_cpuid(struct kvm_vcpu *vcpu)
62 {
63         struct kvm_cpuid_entry2 *best;
64         struct kvm_lapic *apic = vcpu->arch.apic;
65
66         best = kvm_find_cpuid_entry(vcpu, 1, 0);
67         if (!best)
68                 return 0;
69
70         /* Update OSXSAVE bit */
71         if (cpu_has_xsave && best->function == 0x1) {
72                 best->ecx &= ~F(OSXSAVE);
73                 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
74                         best->ecx |= F(OSXSAVE);
75         }
76
77         if (apic) {
78                 if (best->ecx & F(TSC_DEADLINE_TIMER))
79                         apic->lapic_timer.timer_mode_mask = 3 << 17;
80                 else
81                         apic->lapic_timer.timer_mode_mask = 1 << 17;
82         }
83
84         best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
85         if (!best) {
86                 vcpu->arch.guest_supported_xcr0 = 0;
87                 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
88         } else {
89                 vcpu->arch.guest_supported_xcr0 =
90                         (best->eax | ((u64)best->edx << 32)) &
91                         kvm_supported_xcr0();
92                 vcpu->arch.guest_xstate_size = best->ebx =
93                         xstate_required_size(vcpu->arch.xcr0, false);
94         }
95
96         best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
97         if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
98                 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
99
100         vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
101         if (vcpu->arch.eager_fpu)
102                 kvm_x86_ops->fpu_activate(vcpu);
103
104         /*
105          * The existing code assumes virtual address is 48-bit in the canonical
106          * address checks; exit if it is ever changed.
107          */
108         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
109         if (best && ((best->eax & 0xff00) >> 8) != 48 &&
110                 ((best->eax & 0xff00) >> 8) != 0)
111                 return -EINVAL;
112
113         /* Update physical-address width */
114         vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
115
116         kvm_pmu_refresh(vcpu);
117         return 0;
118 }
119
120 static int is_efer_nx(void)
121 {
122         unsigned long long efer = 0;
123
124         rdmsrl_safe(MSR_EFER, &efer);
125         return efer & EFER_NX;
126 }
127
128 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
129 {
130         int i;
131         struct kvm_cpuid_entry2 *e, *entry;
132
133         entry = NULL;
134         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
135                 e = &vcpu->arch.cpuid_entries[i];
136                 if (e->function == 0x80000001) {
137                         entry = e;
138                         break;
139                 }
140         }
141         if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
142                 entry->edx &= ~F(NX);
143                 printk(KERN_INFO "kvm: guest NX capability removed\n");
144         }
145 }
146
147 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
148 {
149         struct kvm_cpuid_entry2 *best;
150
151         best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
152         if (!best || best->eax < 0x80000008)
153                 goto not_found;
154         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
155         if (best)
156                 return best->eax & 0xff;
157 not_found:
158         return 36;
159 }
160 EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
161
162 /* when an old userspace process fills a new kernel module */
163 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
164                              struct kvm_cpuid *cpuid,
165                              struct kvm_cpuid_entry __user *entries)
166 {
167         int r, i;
168         struct kvm_cpuid_entry *cpuid_entries;
169
170         r = -E2BIG;
171         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
172                 goto out;
173         r = -ENOMEM;
174         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
175         if (!cpuid_entries)
176                 goto out;
177         r = -EFAULT;
178         if (copy_from_user(cpuid_entries, entries,
179                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
180                 goto out_free;
181         for (i = 0; i < cpuid->nent; i++) {
182                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
183                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
184                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
185                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
186                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
187                 vcpu->arch.cpuid_entries[i].index = 0;
188                 vcpu->arch.cpuid_entries[i].flags = 0;
189                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
190                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
191                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
192         }
193         vcpu->arch.cpuid_nent = cpuid->nent;
194         cpuid_fix_nx_cap(vcpu);
195         kvm_apic_set_version(vcpu);
196         kvm_x86_ops->cpuid_update(vcpu);
197         r = kvm_update_cpuid(vcpu);
198
199 out_free:
200         vfree(cpuid_entries);
201 out:
202         return r;
203 }
204
205 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
206                               struct kvm_cpuid2 *cpuid,
207                               struct kvm_cpuid_entry2 __user *entries)
208 {
209         int r;
210
211         r = -E2BIG;
212         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
213                 goto out;
214         r = -EFAULT;
215         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
216                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
217                 goto out;
218         vcpu->arch.cpuid_nent = cpuid->nent;
219         kvm_apic_set_version(vcpu);
220         kvm_x86_ops->cpuid_update(vcpu);
221         r = kvm_update_cpuid(vcpu);
222 out:
223         return r;
224 }
225
226 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
227                               struct kvm_cpuid2 *cpuid,
228                               struct kvm_cpuid_entry2 __user *entries)
229 {
230         int r;
231
232         r = -E2BIG;
233         if (cpuid->nent < vcpu->arch.cpuid_nent)
234                 goto out;
235         r = -EFAULT;
236         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
237                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
238                 goto out;
239         return 0;
240
241 out:
242         cpuid->nent = vcpu->arch.cpuid_nent;
243         return r;
244 }
245
246 static void cpuid_mask(u32 *word, int wordnum)
247 {
248         *word &= boot_cpu_data.x86_capability[wordnum];
249 }
250
251 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
252                            u32 index)
253 {
254         entry->function = function;
255         entry->index = index;
256         cpuid_count(entry->function, entry->index,
257                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
258         entry->flags = 0;
259 }
260
261 static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
262                                    u32 func, u32 index, int *nent, int maxnent)
263 {
264         switch (func) {
265         case 0:
266                 entry->eax = 1;         /* only one leaf currently */
267                 ++*nent;
268                 break;
269         case 1:
270                 entry->ecx = F(MOVBE);
271                 ++*nent;
272                 break;
273         default:
274                 break;
275         }
276
277         entry->function = func;
278         entry->index = index;
279
280         return 0;
281 }
282
283 static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
284                                  u32 index, int *nent, int maxnent)
285 {
286         int r;
287         unsigned f_nx = is_efer_nx() ? F(NX) : 0;
288 #ifdef CONFIG_X86_64
289         unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
290                                 ? F(GBPAGES) : 0;
291         unsigned f_lm = F(LM);
292 #else
293         unsigned f_gbpages = 0;
294         unsigned f_lm = 0;
295 #endif
296         unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
297         unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
298         unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
299         unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
300
301         /* cpuid 1.edx */
302         const u32 kvm_supported_word0_x86_features =
303                 F(FPU) | F(VME) | F(DE) | F(PSE) |
304                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
305                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
306                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
307                 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
308                 0 /* Reserved, DS, ACPI */ | F(MMX) |
309                 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
310                 0 /* HTT, TM, Reserved, PBE */;
311         /* cpuid 0x80000001.edx */
312         const u32 kvm_supported_word1_x86_features =
313                 F(FPU) | F(VME) | F(DE) | F(PSE) |
314                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
315                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
316                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
317                 F(PAT) | F(PSE36) | 0 /* Reserved */ |
318                 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
319                 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
320                 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
321         /* cpuid 1.ecx */
322         const u32 kvm_supported_word4_x86_features =
323                 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
324                  * but *not* advertised to guests via CPUID ! */
325                 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
326                 0 /* DS-CPL, VMX, SMX, EST */ |
327                 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
328                 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
329                 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
330                 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
331                 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
332                 F(F16C) | F(RDRAND);
333         /* cpuid 0x80000001.ecx */
334         const u32 kvm_supported_word6_x86_features =
335                 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
336                 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
337                 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
338                 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
339
340         /* cpuid 0xC0000001.edx */
341         const u32 kvm_supported_word5_x86_features =
342                 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
343                 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
344                 F(PMM) | F(PMM_EN);
345
346         /* cpuid 7.0.ebx */
347         const u32 kvm_supported_word9_x86_features =
348                 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
349                 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
350                 F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
351                 F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(PCOMMIT);
352
353         /* cpuid 0xD.1.eax */
354         const u32 kvm_supported_word10_x86_features =
355                 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
356
357         /* all calls to cpuid_count() should be made on the same cpu */
358         get_cpu();
359
360         r = -E2BIG;
361
362         if (*nent >= maxnent)
363                 goto out;
364
365         do_cpuid_1_ent(entry, function, index);
366         ++*nent;
367
368         switch (function) {
369         case 0:
370                 entry->eax = min(entry->eax, (u32)0xd);
371                 break;
372         case 1:
373                 entry->edx &= kvm_supported_word0_x86_features;
374                 cpuid_mask(&entry->edx, 0);
375                 entry->ecx &= kvm_supported_word4_x86_features;
376                 cpuid_mask(&entry->ecx, 4);
377                 /* we support x2apic emulation even if host does not support
378                  * it since we emulate x2apic in software */
379                 entry->ecx |= F(X2APIC);
380                 break;
381         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
382          * may return different values. This forces us to get_cpu() before
383          * issuing the first command, and also to emulate this annoying behavior
384          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
385         case 2: {
386                 int t, times = entry->eax & 0xff;
387
388                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
389                 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
390                 for (t = 1; t < times; ++t) {
391                         if (*nent >= maxnent)
392                                 goto out;
393
394                         do_cpuid_1_ent(&entry[t], function, 0);
395                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
396                         ++*nent;
397                 }
398                 break;
399         }
400         /* function 4 has additional index. */
401         case 4: {
402                 int i, cache_type;
403
404                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
405                 /* read more entries until cache_type is zero */
406                 for (i = 1; ; ++i) {
407                         if (*nent >= maxnent)
408                                 goto out;
409
410                         cache_type = entry[i - 1].eax & 0x1f;
411                         if (!cache_type)
412                                 break;
413                         do_cpuid_1_ent(&entry[i], function, i);
414                         entry[i].flags |=
415                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
416                         ++*nent;
417                 }
418                 break;
419         }
420         case 6: /* Thermal management */
421                 entry->eax = 0x4; /* allow ARAT */
422                 entry->ebx = 0;
423                 entry->ecx = 0;
424                 entry->edx = 0;
425                 break;
426         case 7: {
427                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
428                 /* Mask ebx against host capability word 9 */
429                 if (index == 0) {
430                         entry->ebx &= kvm_supported_word9_x86_features;
431                         cpuid_mask(&entry->ebx, 9);
432                         // TSC_ADJUST is emulated
433                         entry->ebx |= F(TSC_ADJUST);
434                 } else
435                         entry->ebx = 0;
436                 entry->eax = 0;
437                 entry->ecx = 0;
438                 entry->edx = 0;
439                 break;
440         }
441         case 9:
442                 break;
443         case 0xa: { /* Architectural Performance Monitoring */
444                 struct x86_pmu_capability cap;
445                 union cpuid10_eax eax;
446                 union cpuid10_edx edx;
447
448                 perf_get_x86_pmu_capability(&cap);
449
450                 /*
451                  * Only support guest architectural pmu on a host
452                  * with architectural pmu.
453                  */
454                 if (!cap.version)
455                         memset(&cap, 0, sizeof(cap));
456
457                 eax.split.version_id = min(cap.version, 2);
458                 eax.split.num_counters = cap.num_counters_gp;
459                 eax.split.bit_width = cap.bit_width_gp;
460                 eax.split.mask_length = cap.events_mask_len;
461
462                 edx.split.num_counters_fixed = cap.num_counters_fixed;
463                 edx.split.bit_width_fixed = cap.bit_width_fixed;
464                 edx.split.reserved = 0;
465
466                 entry->eax = eax.full;
467                 entry->ebx = cap.events_mask;
468                 entry->ecx = 0;
469                 entry->edx = edx.full;
470                 break;
471         }
472         /* function 0xb has additional index. */
473         case 0xb: {
474                 int i, level_type;
475
476                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
477                 /* read more entries until level_type is zero */
478                 for (i = 1; ; ++i) {
479                         if (*nent >= maxnent)
480                                 goto out;
481
482                         level_type = entry[i - 1].ecx & 0xff00;
483                         if (!level_type)
484                                 break;
485                         do_cpuid_1_ent(&entry[i], function, i);
486                         entry[i].flags |=
487                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
488                         ++*nent;
489                 }
490                 break;
491         }
492         case 0xd: {
493                 int idx, i;
494                 u64 supported = kvm_supported_xcr0();
495
496                 entry->eax &= supported;
497                 entry->ebx = xstate_required_size(supported, false);
498                 entry->ecx = entry->ebx;
499                 entry->edx &= supported >> 32;
500                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
501                 if (!supported)
502                         break;
503
504                 for (idx = 1, i = 1; idx < 64; ++idx) {
505                         u64 mask = ((u64)1 << idx);
506                         if (*nent >= maxnent)
507                                 goto out;
508
509                         do_cpuid_1_ent(&entry[i], function, idx);
510                         if (idx == 1) {
511                                 entry[i].eax &= kvm_supported_word10_x86_features;
512                                 cpuid_mask(&entry[i].eax, 10);
513                                 entry[i].ebx = 0;
514                                 if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
515                                         entry[i].ebx =
516                                                 xstate_required_size(supported,
517                                                                      true);
518                         } else {
519                                 if (entry[i].eax == 0 || !(supported & mask))
520                                         continue;
521                                 if (WARN_ON_ONCE(entry[i].ecx & 1))
522                                         continue;
523                         }
524                         entry[i].ecx = 0;
525                         entry[i].edx = 0;
526                         entry[i].flags |=
527                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
528                         ++*nent;
529                         ++i;
530                 }
531                 break;
532         }
533         case KVM_CPUID_SIGNATURE: {
534                 static const char signature[12] = "KVMKVMKVM\0\0";
535                 const u32 *sigptr = (const u32 *)signature;
536                 entry->eax = KVM_CPUID_FEATURES;
537                 entry->ebx = sigptr[0];
538                 entry->ecx = sigptr[1];
539                 entry->edx = sigptr[2];
540                 break;
541         }
542         case KVM_CPUID_FEATURES:
543                 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
544                              (1 << KVM_FEATURE_NOP_IO_DELAY) |
545                              (1 << KVM_FEATURE_CLOCKSOURCE2) |
546                              (1 << KVM_FEATURE_ASYNC_PF) |
547                              (1 << KVM_FEATURE_PV_EOI) |
548                              (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
549                              (1 << KVM_FEATURE_PV_UNHALT);
550
551                 if (sched_info_on())
552                         entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
553
554                 entry->ebx = 0;
555                 entry->ecx = 0;
556                 entry->edx = 0;
557                 break;
558         case 0x80000000:
559                 entry->eax = min(entry->eax, 0x8000001a);
560                 break;
561         case 0x80000001:
562                 entry->edx &= kvm_supported_word1_x86_features;
563                 cpuid_mask(&entry->edx, 1);
564                 entry->ecx &= kvm_supported_word6_x86_features;
565                 cpuid_mask(&entry->ecx, 6);
566                 break;
567         case 0x80000007: /* Advanced power management */
568                 /* invariant TSC is CPUID.80000007H:EDX[8] */
569                 entry->edx &= (1 << 8);
570                 /* mask against host */
571                 entry->edx &= boot_cpu_data.x86_power;
572                 entry->eax = entry->ebx = entry->ecx = 0;
573                 break;
574         case 0x80000008: {
575                 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
576                 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
577                 unsigned phys_as = entry->eax & 0xff;
578
579                 if (!g_phys_as)
580                         g_phys_as = phys_as;
581                 entry->eax = g_phys_as | (virt_as << 8);
582                 entry->ebx = entry->edx = 0;
583                 break;
584         }
585         case 0x80000019:
586                 entry->ecx = entry->edx = 0;
587                 break;
588         case 0x8000001a:
589                 break;
590         case 0x8000001d:
591                 break;
592         /*Add support for Centaur's CPUID instruction*/
593         case 0xC0000000:
594                 /*Just support up to 0xC0000004 now*/
595                 entry->eax = min(entry->eax, 0xC0000004);
596                 break;
597         case 0xC0000001:
598                 entry->edx &= kvm_supported_word5_x86_features;
599                 cpuid_mask(&entry->edx, 5);
600                 break;
601         case 3: /* Processor serial number */
602         case 5: /* MONITOR/MWAIT */
603         case 0xC0000002:
604         case 0xC0000003:
605         case 0xC0000004:
606         default:
607                 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
608                 break;
609         }
610
611         kvm_x86_ops->set_supported_cpuid(function, entry);
612
613         r = 0;
614
615 out:
616         put_cpu();
617
618         return r;
619 }
620
621 static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
622                         u32 idx, int *nent, int maxnent, unsigned int type)
623 {
624         if (type == KVM_GET_EMULATED_CPUID)
625                 return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
626
627         return __do_cpuid_ent(entry, func, idx, nent, maxnent);
628 }
629
630 #undef F
631
632 struct kvm_cpuid_param {
633         u32 func;
634         u32 idx;
635         bool has_leaf_count;
636         bool (*qualifier)(const struct kvm_cpuid_param *param);
637 };
638
639 static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
640 {
641         return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
642 }
643
644 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
645                                  __u32 num_entries, unsigned int ioctl_type)
646 {
647         int i;
648         __u32 pad[3];
649
650         if (ioctl_type != KVM_GET_EMULATED_CPUID)
651                 return false;
652
653         /*
654          * We want to make sure that ->padding is being passed clean from
655          * userspace in case we want to use it for something in the future.
656          *
657          * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
658          * have to give ourselves satisfied only with the emulated side. /me
659          * sheds a tear.
660          */
661         for (i = 0; i < num_entries; i++) {
662                 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
663                         return true;
664
665                 if (pad[0] || pad[1] || pad[2])
666                         return true;
667         }
668         return false;
669 }
670
671 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
672                             struct kvm_cpuid_entry2 __user *entries,
673                             unsigned int type)
674 {
675         struct kvm_cpuid_entry2 *cpuid_entries;
676         int limit, nent = 0, r = -E2BIG, i;
677         u32 func;
678         static const struct kvm_cpuid_param param[] = {
679                 { .func = 0, .has_leaf_count = true },
680                 { .func = 0x80000000, .has_leaf_count = true },
681                 { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
682                 { .func = KVM_CPUID_SIGNATURE },
683                 { .func = KVM_CPUID_FEATURES },
684         };
685
686         if (cpuid->nent < 1)
687                 goto out;
688         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
689                 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
690
691         if (sanity_check_entries(entries, cpuid->nent, type))
692                 return -EINVAL;
693
694         r = -ENOMEM;
695         cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
696         if (!cpuid_entries)
697                 goto out;
698
699         r = 0;
700         for (i = 0; i < ARRAY_SIZE(param); i++) {
701                 const struct kvm_cpuid_param *ent = &param[i];
702
703                 if (ent->qualifier && !ent->qualifier(ent))
704                         continue;
705
706                 r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
707                                 &nent, cpuid->nent, type);
708
709                 if (r)
710                         goto out_free;
711
712                 if (!ent->has_leaf_count)
713                         continue;
714
715                 limit = cpuid_entries[nent - 1].eax;
716                 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
717                         r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
718                                      &nent, cpuid->nent, type);
719
720                 if (r)
721                         goto out_free;
722         }
723
724         r = -EFAULT;
725         if (copy_to_user(entries, cpuid_entries,
726                          nent * sizeof(struct kvm_cpuid_entry2)))
727                 goto out_free;
728         cpuid->nent = nent;
729         r = 0;
730
731 out_free:
732         vfree(cpuid_entries);
733 out:
734         return r;
735 }
736
737 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
738 {
739         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
740         int j, nent = vcpu->arch.cpuid_nent;
741
742         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
743         /* when no next entry is found, the current entry[i] is reselected */
744         for (j = i + 1; ; j = (j + 1) % nent) {
745                 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
746                 if (ej->function == e->function) {
747                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
748                         return j;
749                 }
750         }
751         return 0; /* silence gcc, even though control never reaches here */
752 }
753
754 /* find an entry with matching function, matching index (if needed), and that
755  * should be read next (if it's stateful) */
756 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
757         u32 function, u32 index)
758 {
759         if (e->function != function)
760                 return 0;
761         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
762                 return 0;
763         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
764             !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
765                 return 0;
766         return 1;
767 }
768
769 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
770                                               u32 function, u32 index)
771 {
772         int i;
773         struct kvm_cpuid_entry2 *best = NULL;
774
775         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
776                 struct kvm_cpuid_entry2 *e;
777
778                 e = &vcpu->arch.cpuid_entries[i];
779                 if (is_matching_cpuid_entry(e, function, index)) {
780                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
781                                 move_to_next_stateful_cpuid_entry(vcpu, i);
782                         best = e;
783                         break;
784                 }
785         }
786         return best;
787 }
788 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
789
790 /*
791  * If no match is found, check whether we exceed the vCPU's limit
792  * and return the content of the highest valid _standard_ leaf instead.
793  * This is to satisfy the CPUID specification.
794  */
795 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
796                                                   u32 function, u32 index)
797 {
798         struct kvm_cpuid_entry2 *maxlevel;
799
800         maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
801         if (!maxlevel || maxlevel->eax >= function)
802                 return NULL;
803         if (function & 0x80000000) {
804                 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
805                 if (!maxlevel)
806                         return NULL;
807         }
808         return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
809 }
810
811 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
812 {
813         u32 function = *eax, index = *ecx;
814         struct kvm_cpuid_entry2 *best;
815
816         best = kvm_find_cpuid_entry(vcpu, function, index);
817
818         if (!best)
819                 best = check_cpuid_limit(vcpu, function, index);
820
821         /*
822          * Perfmon not yet supported for L2 guest.
823          */
824         if (is_guest_mode(vcpu) && function == 0xa)
825                 best = NULL;
826
827         if (best) {
828                 *eax = best->eax;
829                 *ebx = best->ebx;
830                 *ecx = best->ecx;
831                 *edx = best->edx;
832         } else
833                 *eax = *ebx = *ecx = *edx = 0;
834         trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx);
835 }
836 EXPORT_SYMBOL_GPL(kvm_cpuid);
837
838 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
839 {
840         u32 function, eax, ebx, ecx, edx;
841
842         function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
843         ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
844         kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
845         kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
846         kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
847         kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
848         kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
849         kvm_x86_ops->skip_emulated_instruction(vcpu);
850 }
851 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);