These changes are a raw update to a vanilla kernel 4.1.10, with the
[kvmfornfv.git] / qemu / target-i386 / svm_helper.c
1 /*
2  *  x86 SVM helpers
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "cpu.h"
21 #include "exec/cpu-all.h"
22 #include "exec/helper-proto.h"
23 #include "exec/cpu_ldst.h"
24
25 /* Secure Virtual Machine helpers */
26
27 #if defined(CONFIG_USER_ONLY)
28
29 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
30 {
31 }
32
33 void helper_vmmcall(CPUX86State *env)
34 {
35 }
36
37 void helper_vmload(CPUX86State *env, int aflag)
38 {
39 }
40
41 void helper_vmsave(CPUX86State *env, int aflag)
42 {
43 }
44
45 void helper_stgi(CPUX86State *env)
46 {
47 }
48
49 void helper_clgi(CPUX86State *env)
50 {
51 }
52
53 void helper_skinit(CPUX86State *env)
54 {
55 }
56
57 void helper_invlpga(CPUX86State *env, int aflag)
58 {
59 }
60
61 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
62 {
63 }
64
65 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
66 {
67 }
68
69 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
70                                       uint64_t param)
71 {
72 }
73
74 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
75                                    uint64_t param)
76 {
77 }
78
79 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
80                          uint32_t next_eip_addend)
81 {
82 }
83 #else
84
85 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
86                                 const SegmentCache *sc)
87 {
88     CPUState *cs = CPU(x86_env_get_cpu(env));
89
90     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
91              sc->selector);
92     x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
93              sc->base);
94     x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
95              sc->limit);
96     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
97              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
98 }
99
100 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
101                                 SegmentCache *sc)
102 {
103     CPUState *cs = CPU(x86_env_get_cpu(env));
104     unsigned int flags;
105
106     sc->selector = x86_lduw_phys(cs,
107                              addr + offsetof(struct vmcb_seg, selector));
108     sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
109     sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
110     flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
111     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
112 }
113
114 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
115                                       int seg_reg)
116 {
117     SegmentCache sc1, *sc = &sc1;
118
119     svm_load_seg(env, addr, sc);
120     cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
121                            sc->base, sc->limit, sc->flags);
122 }
123
124 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
125 {
126     CPUState *cs = CPU(x86_env_get_cpu(env));
127     target_ulong addr;
128     uint32_t event_inj;
129     uint32_t int_ctl;
130
131     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
132
133     if (aflag == 2) {
134         addr = env->regs[R_EAX];
135     } else {
136         addr = (uint32_t)env->regs[R_EAX];
137     }
138
139     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
140
141     env->vm_vmcb = addr;
142
143     /* save the current CPU state in the hsave page */
144     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
145              env->gdt.base);
146     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
147              env->gdt.limit);
148
149     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
150              env->idt.base);
151     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
152              env->idt.limit);
153
154     x86_stq_phys(cs,
155              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
156     x86_stq_phys(cs,
157              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
158     x86_stq_phys(cs,
159              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
160     x86_stq_phys(cs,
161              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
162     x86_stq_phys(cs,
163              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
164     x86_stq_phys(cs,
165              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
166
167     x86_stq_phys(cs,
168              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
169     x86_stq_phys(cs,
170              env->vm_hsave + offsetof(struct vmcb, save.rflags),
171              cpu_compute_eflags(env));
172
173     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
174                  &env->segs[R_ES]);
175     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
176                  &env->segs[R_CS]);
177     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
178                  &env->segs[R_SS]);
179     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
180                  &env->segs[R_DS]);
181
182     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
183              env->eip + next_eip_addend);
184     x86_stq_phys(cs,
185              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
186     x86_stq_phys(cs,
187              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
188
189     /* load the interception bitmaps so we do not need to access the
190        vmcb in svm mode */
191     env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
192                                                       control.intercept));
193     env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
194                                        offsetof(struct vmcb,
195                                                 control.intercept_cr_read));
196     env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
197                                         offsetof(struct vmcb,
198                                                  control.intercept_cr_write));
199     env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
200                                        offsetof(struct vmcb,
201                                                 control.intercept_dr_read));
202     env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
203                                         offsetof(struct vmcb,
204                                                  control.intercept_dr_write));
205     env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
206                                          offsetof(struct vmcb,
207                                                   control.intercept_exceptions
208                                                   ));
209
210     /* enable intercepts */
211     env->hflags |= HF_SVMI_MASK;
212
213     env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
214                                offsetof(struct vmcb, control.tsc_offset));
215
216     env->gdt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
217                                                       save.gdtr.base));
218     env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
219                                                       save.gdtr.limit));
220
221     env->idt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
222                                                       save.idtr.base));
223     env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
224                                                       save.idtr.limit));
225
226     /* clear exit_info_2 so we behave like the real hardware */
227     x86_stq_phys(cs,
228              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
229
230     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
231                                      env->vm_vmcb + offsetof(struct vmcb,
232                                                              save.cr0)));
233     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
234                                      env->vm_vmcb + offsetof(struct vmcb,
235                                                              save.cr4)));
236     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
237                                      env->vm_vmcb + offsetof(struct vmcb,
238                                                              save.cr3)));
239     env->cr[2] = x86_ldq_phys(cs,
240                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
241     int_ctl = x86_ldl_phys(cs,
242                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
243     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
244     if (int_ctl & V_INTR_MASKING_MASK) {
245         env->v_tpr = int_ctl & V_TPR_MASK;
246         env->hflags2 |= HF2_VINTR_MASK;
247         if (env->eflags & IF_MASK) {
248             env->hflags2 |= HF2_HIF_MASK;
249         }
250     }
251
252     cpu_load_efer(env,
253                   x86_ldq_phys(cs,
254                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
255     env->eflags = 0;
256     cpu_load_eflags(env, x86_ldq_phys(cs,
257                                   env->vm_vmcb + offsetof(struct vmcb,
258                                                           save.rflags)),
259                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
260
261     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
262                        R_ES);
263     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
264                        R_CS);
265     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
266                        R_SS);
267     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
268                        R_DS);
269
270     env->eip = x86_ldq_phys(cs,
271                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
272
273     env->regs[R_ESP] = x86_ldq_phys(cs,
274                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
275     env->regs[R_EAX] = x86_ldq_phys(cs,
276                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
277     env->dr[7] = x86_ldq_phys(cs,
278                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
279     env->dr[6] = x86_ldq_phys(cs,
280                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
281
282     /* FIXME: guest state consistency checks */
283
284     switch (x86_ldub_phys(cs,
285                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
286     case TLB_CONTROL_DO_NOTHING:
287         break;
288     case TLB_CONTROL_FLUSH_ALL_ASID:
289         /* FIXME: this is not 100% correct but should work for now */
290         tlb_flush(cs, 1);
291         break;
292     }
293
294     env->hflags2 |= HF2_GIF_MASK;
295
296     if (int_ctl & V_IRQ_MASK) {
297         CPUState *cs = CPU(x86_env_get_cpu(env));
298
299         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
300     }
301
302     /* maybe we need to inject an event */
303     event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
304                                                  control.event_inj));
305     if (event_inj & SVM_EVTINJ_VALID) {
306         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
307         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
308         uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
309                                           offsetof(struct vmcb,
310                                                    control.event_inj_err));
311
312         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
313         /* FIXME: need to implement valid_err */
314         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
315         case SVM_EVTINJ_TYPE_INTR:
316             cs->exception_index = vector;
317             env->error_code = event_inj_err;
318             env->exception_is_int = 0;
319             env->exception_next_eip = -1;
320             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
321             /* XXX: is it always correct? */
322             do_interrupt_x86_hardirq(env, vector, 1);
323             break;
324         case SVM_EVTINJ_TYPE_NMI:
325             cs->exception_index = EXCP02_NMI;
326             env->error_code = event_inj_err;
327             env->exception_is_int = 0;
328             env->exception_next_eip = env->eip;
329             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
330             cpu_loop_exit(cs);
331             break;
332         case SVM_EVTINJ_TYPE_EXEPT:
333             cs->exception_index = vector;
334             env->error_code = event_inj_err;
335             env->exception_is_int = 0;
336             env->exception_next_eip = -1;
337             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
338             cpu_loop_exit(cs);
339             break;
340         case SVM_EVTINJ_TYPE_SOFT:
341             cs->exception_index = vector;
342             env->error_code = event_inj_err;
343             env->exception_is_int = 1;
344             env->exception_next_eip = env->eip;
345             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
346             cpu_loop_exit(cs);
347             break;
348         }
349         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
350                       env->error_code);
351     }
352 }
353
354 void helper_vmmcall(CPUX86State *env)
355 {
356     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
357     raise_exception(env, EXCP06_ILLOP);
358 }
359
360 void helper_vmload(CPUX86State *env, int aflag)
361 {
362     CPUState *cs = CPU(x86_env_get_cpu(env));
363     target_ulong addr;
364
365     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
366
367     if (aflag == 2) {
368         addr = env->regs[R_EAX];
369     } else {
370         addr = (uint32_t)env->regs[R_EAX];
371     }
372
373     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
374                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
375                   addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
376                                                           save.fs.base)),
377                   env->segs[R_FS].base);
378
379     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
380     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
381     svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
382     svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
383
384 #ifdef TARGET_X86_64
385     env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
386                                                  save.kernel_gs_base));
387     env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
388     env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
389     env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
390 #endif
391     env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
392     env->sysenter_cs = x86_ldq_phys(cs,
393                                 addr + offsetof(struct vmcb, save.sysenter_cs));
394     env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
395                                                  save.sysenter_esp));
396     env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
397                                                  save.sysenter_eip));
398 }
399
400 void helper_vmsave(CPUX86State *env, int aflag)
401 {
402     CPUState *cs = CPU(x86_env_get_cpu(env));
403     target_ulong addr;
404
405     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
406
407     if (aflag == 2) {
408         addr = env->regs[R_EAX];
409     } else {
410         addr = (uint32_t)env->regs[R_EAX];
411     }
412
413     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
414                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
415                   addr, x86_ldq_phys(cs,
416                                  addr + offsetof(struct vmcb, save.fs.base)),
417                   env->segs[R_FS].base);
418
419     svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
420                  &env->segs[R_FS]);
421     svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
422                  &env->segs[R_GS]);
423     svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
424                  &env->tr);
425     svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
426                  &env->ldt);
427
428 #ifdef TARGET_X86_64
429     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
430              env->kernelgsbase);
431     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
432     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
433     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
434 #endif
435     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
436     x86_stq_phys(cs,
437              addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
438     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
439              env->sysenter_esp);
440     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
441              env->sysenter_eip);
442 }
443
444 void helper_stgi(CPUX86State *env)
445 {
446     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
447     env->hflags2 |= HF2_GIF_MASK;
448 }
449
450 void helper_clgi(CPUX86State *env)
451 {
452     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
453     env->hflags2 &= ~HF2_GIF_MASK;
454 }
455
456 void helper_skinit(CPUX86State *env)
457 {
458     cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
459     /* XXX: not implemented */
460     raise_exception(env, EXCP06_ILLOP);
461 }
462
463 void helper_invlpga(CPUX86State *env, int aflag)
464 {
465     X86CPU *cpu = x86_env_get_cpu(env);
466     target_ulong addr;
467
468     cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
469
470     if (aflag == 2) {
471         addr = env->regs[R_EAX];
472     } else {
473         addr = (uint32_t)env->regs[R_EAX];
474     }
475
476     /* XXX: could use the ASID to see if it is needed to do the
477        flush */
478     tlb_flush_page(CPU(cpu), addr);
479 }
480
481 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
482                                       uint64_t param)
483 {
484     CPUState *cs = CPU(x86_env_get_cpu(env));
485
486     if (likely(!(env->hflags & HF_SVMI_MASK))) {
487         return;
488     }
489     switch (type) {
490     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
491         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
492             helper_vmexit(env, type, param);
493         }
494         break;
495     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
496         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
497             helper_vmexit(env, type, param);
498         }
499         break;
500     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
501         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
502             helper_vmexit(env, type, param);
503         }
504         break;
505     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
506         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
507             helper_vmexit(env, type, param);
508         }
509         break;
510     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
511         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
512             helper_vmexit(env, type, param);
513         }
514         break;
515     case SVM_EXIT_MSR:
516         if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
517             /* FIXME: this should be read in at vmrun (faster this way?) */
518             uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
519                                      offsetof(struct vmcb,
520                                               control.msrpm_base_pa));
521             uint32_t t0, t1;
522
523             switch ((uint32_t)env->regs[R_ECX]) {
524             case 0 ... 0x1fff:
525                 t0 = (env->regs[R_ECX] * 2) % 8;
526                 t1 = (env->regs[R_ECX] * 2) / 8;
527                 break;
528             case 0xc0000000 ... 0xc0001fff:
529                 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
530                 t1 = (t0 / 8);
531                 t0 %= 8;
532                 break;
533             case 0xc0010000 ... 0xc0011fff:
534                 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
535                 t1 = (t0 / 8);
536                 t0 %= 8;
537                 break;
538             default:
539                 helper_vmexit(env, type, param);
540                 t0 = 0;
541                 t1 = 0;
542                 break;
543             }
544             if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
545                 helper_vmexit(env, type, param);
546             }
547         }
548         break;
549     default:
550         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
551             helper_vmexit(env, type, param);
552         }
553         break;
554     }
555 }
556
557 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
558                                    uint64_t param)
559 {
560     helper_svm_check_intercept_param(env, type, param);
561 }
562
563 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
564                          uint32_t next_eip_addend)
565 {
566     CPUState *cs = CPU(x86_env_get_cpu(env));
567
568     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
569         /* FIXME: this should be read in at vmrun (faster this way?) */
570         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
571                                  offsetof(struct vmcb, control.iopm_base_pa));
572         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
573
574         if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
575             /* next env->eip */
576             x86_stq_phys(cs,
577                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
578                      env->eip + next_eip_addend);
579             helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
580         }
581     }
582 }
583
584 /* Note: currently only 32 bits of exit_code are used */
585 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
586 {
587     CPUState *cs = CPU(x86_env_get_cpu(env));
588     uint32_t int_ctl;
589
590     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
591                   PRIx64 ", " TARGET_FMT_lx ")!\n",
592                   exit_code, exit_info_1,
593                   x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
594                                                    control.exit_info_2)),
595                   env->eip);
596
597     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
598         x86_stl_phys(cs,
599                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
600                  SVM_INTERRUPT_SHADOW_MASK);
601         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
602     } else {
603         x86_stl_phys(cs,
604                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
605     }
606
607     /* Save the VM state in the vmcb */
608     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
609                  &env->segs[R_ES]);
610     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
611                  &env->segs[R_CS]);
612     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
613                  &env->segs[R_SS]);
614     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
615                  &env->segs[R_DS]);
616
617     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
618              env->gdt.base);
619     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
620              env->gdt.limit);
621
622     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
623              env->idt.base);
624     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
625              env->idt.limit);
626
627     x86_stq_phys(cs,
628              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
629     x86_stq_phys(cs,
630              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
631     x86_stq_phys(cs,
632              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
633     x86_stq_phys(cs,
634              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
635     x86_stq_phys(cs,
636              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
637
638     int_ctl = x86_ldl_phys(cs,
639                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
640     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
641     int_ctl |= env->v_tpr & V_TPR_MASK;
642     if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
643         int_ctl |= V_IRQ_MASK;
644     }
645     x86_stl_phys(cs,
646              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
647
648     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
649              cpu_compute_eflags(env));
650     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
651              env->eip);
652     x86_stq_phys(cs,
653              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
654     x86_stq_phys(cs,
655              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
656     x86_stq_phys(cs,
657              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
658     x86_stq_phys(cs,
659              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
660     x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
661              env->hflags & HF_CPL_MASK);
662
663     /* Reload the host state from vm_hsave */
664     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
665     env->hflags &= ~HF_SVMI_MASK;
666     env->intercept = 0;
667     env->intercept_exceptions = 0;
668     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
669     env->tsc_offset = 0;
670
671     env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
672                                                        save.gdtr.base));
673     env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
674                                                        save.gdtr.limit));
675
676     env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
677                                                        save.idtr.base));
678     env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
679                                                        save.idtr.limit));
680
681     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
682                                      env->vm_hsave + offsetof(struct vmcb,
683                                                               save.cr0)) |
684                        CR0_PE_MASK);
685     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
686                                      env->vm_hsave + offsetof(struct vmcb,
687                                                               save.cr4)));
688     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
689                                      env->vm_hsave + offsetof(struct vmcb,
690                                                               save.cr3)));
691     /* we need to set the efer after the crs so the hidden flags get
692        set properly */
693     cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
694                                                          save.efer)));
695     env->eflags = 0;
696     cpu_load_eflags(env, x86_ldq_phys(cs,
697                                   env->vm_hsave + offsetof(struct vmcb,
698                                                            save.rflags)),
699                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
700                       VM_MASK));
701
702     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
703                        R_ES);
704     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
705                        R_CS);
706     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
707                        R_SS);
708     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
709                        R_DS);
710
711     env->eip = x86_ldq_phys(cs,
712                         env->vm_hsave + offsetof(struct vmcb, save.rip));
713     env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
714                                 offsetof(struct vmcb, save.rsp));
715     env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
716                                 offsetof(struct vmcb, save.rax));
717
718     env->dr[6] = x86_ldq_phys(cs,
719                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
720     env->dr[7] = x86_ldq_phys(cs,
721                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
722
723     /* other setups */
724     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
725              exit_code);
726     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
727              exit_info_1);
728
729     x86_stl_phys(cs,
730              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
731              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
732                                               control.event_inj)));
733     x86_stl_phys(cs,
734              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
735              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
736                                               control.event_inj_err)));
737     x86_stl_phys(cs,
738              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
739
740     env->hflags2 &= ~HF2_GIF_MASK;
741     /* FIXME: Resets the current ASID register to zero (host ASID). */
742
743     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
744
745     /* Clears the TSC_OFFSET inside the processor. */
746
747     /* If the host is in PAE mode, the processor reloads the host's PDPEs
748        from the page table indicated the host's CR3. If the PDPEs contain
749        illegal state, the processor causes a shutdown. */
750
751     /* Disables all breakpoints in the host DR7 register. */
752
753     /* Checks the reloaded host state for consistency. */
754
755     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
756        host's code segment or non-canonical (in the case of long mode), a
757        #GP fault is delivered inside the host. */
758
759     /* remove any pending exception */
760     cs->exception_index = -1;
761     env->error_code = 0;
762     env->old_exception = -1;
763
764     cpu_loop_exit(cs);
765 }
766
767 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
768 {
769     helper_vmexit(env, exit_code, exit_info_1);
770 }
771
772 #endif