1 #ifndef _ASM_X86_PARAVIRT_TYPES_H
2 #define _ASM_X86_PARAVIRT_TYPES_H
4 /* Bitmask of what can be clobbered: usually at least eax. */
6 #define CLBR_EAX (1 << 0)
7 #define CLBR_ECX (1 << 1)
8 #define CLBR_EDX (1 << 2)
9 #define CLBR_EDI (1 << 3)
12 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
13 #define CLBR_ANY ((1 << 4) - 1)
15 #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
16 #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
17 #define CLBR_SCRATCH (0)
19 #define CLBR_RAX CLBR_EAX
20 #define CLBR_RCX CLBR_ECX
21 #define CLBR_RDX CLBR_EDX
22 #define CLBR_RDI CLBR_EDI
23 #define CLBR_RSI (1 << 4)
24 #define CLBR_R8 (1 << 5)
25 #define CLBR_R9 (1 << 6)
26 #define CLBR_R10 (1 << 7)
27 #define CLBR_R11 (1 << 8)
29 #define CLBR_ANY ((1 << 9) - 1)
31 #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
32 CLBR_RCX | CLBR_R8 | CLBR_R9)
33 #define CLBR_RET_REG (CLBR_RAX)
34 #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
38 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
42 #include <asm/desc_defs.h>
43 #include <asm/kmap_types.h>
44 #include <asm/pgtable_types.h>
56 * Wrapper type for pointers to code which uses the non-standard
57 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
59 struct paravirt_callee_save {
65 unsigned int kernel_rpl;
66 int shared_kernel_pmd;
69 u16 extra_user_64bit_cs; /* __USER_CS if none */
73 unsigned int features; /* valid only if paravirt_enabled is set */
77 #define paravirt_has(x) paravirt_has_feature(PV_SUPPORTED_##x)
78 /* Supported features */
79 #define PV_SUPPORTED_RTC (1<<0)
83 * Patch may replace one of the defined code sequences with
84 * arbitrary code, subject to the same register constraints.
85 * This generally means the code is not free to clobber any
86 * registers other than EAX. The patch function should return
87 * the number of bytes of code generated, as we nop pad the
88 * rest in generic code.
90 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
91 unsigned long addr, unsigned len);
96 /* Set deferred update mode, used for batching operations. */
103 unsigned long long (*sched_clock)(void);
104 unsigned long long (*steal_clock)(int cpu);
108 /* hooks for various privileged instructions */
109 unsigned long (*get_debugreg)(int regno);
110 void (*set_debugreg)(int regno, unsigned long value);
114 unsigned long (*read_cr0)(void);
115 void (*write_cr0)(unsigned long);
117 unsigned long (*read_cr4_safe)(void);
118 unsigned long (*read_cr4)(void);
119 void (*write_cr4)(unsigned long);
122 unsigned long (*read_cr8)(void);
123 void (*write_cr8)(unsigned long);
126 /* Segment descriptor handling */
127 void (*load_tr_desc)(void);
128 void (*load_gdt)(const struct desc_ptr *);
129 void (*load_idt)(const struct desc_ptr *);
130 /* store_gdt has been removed. */
131 void (*store_idt)(struct desc_ptr *);
132 void (*set_ldt)(const void *desc, unsigned entries);
133 unsigned long (*store_tr)(void);
134 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
136 void (*load_gs_index)(unsigned int idx);
138 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
140 void (*write_gdt_entry)(struct desc_struct *,
141 int entrynum, const void *desc, int size);
142 void (*write_idt_entry)(gate_desc *,
143 int entrynum, const gate_desc *gate);
144 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
145 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
147 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
149 void (*set_iopl_mask)(unsigned mask);
151 void (*wbinvd)(void);
152 void (*io_delay)(void);
154 /* cpuid emulation, mostly so that caps bits can be disabled */
155 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
156 unsigned int *ecx, unsigned int *edx);
158 /* MSR, PMC and TSR operations.
159 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
160 u64 (*read_msr)(unsigned int msr, int *err);
161 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
163 u64 (*read_pmc)(int counter);
167 * Atomically enable interrupts and return to userspace. This
168 * is only used in 32-bit kernels. 64-bit kernels use
169 * usergs_sysret32 instead.
171 void (*irq_enable_sysexit)(void);
175 * Switch to usermode gs and return to 64-bit usermode using
176 * sysret. Only used in 64-bit kernels to return to 64-bit
177 * processes. Usermode register state, including %rsp, must
178 * already be restored.
180 void (*usergs_sysret64)(void);
183 * Switch to usermode gs and return to 32-bit usermode using
184 * sysret. Used to return to 32-on-64 compat processes.
185 * Other usermode register state, including %esp, must already
188 void (*usergs_sysret32)(void);
190 /* Normal iret. Jump to this with the standard iret stack
194 void (*swapgs)(void);
196 void (*start_context_switch)(struct task_struct *prev);
197 void (*end_context_switch)(struct task_struct *next);
202 * Get/set interrupt state. save_fl and restore_fl are only
203 * expected to use X86_EFLAGS_IF; all other bits
204 * returned from save_fl are undefined, and may be ignored by
207 * NOTE: These functions callers expect the callee to preserve
208 * more registers than the standard C calling convention.
210 struct paravirt_callee_save save_fl;
211 struct paravirt_callee_save restore_fl;
212 struct paravirt_callee_save irq_disable;
213 struct paravirt_callee_save irq_enable;
215 void (*safe_halt)(void);
219 void (*adjust_exception_frame)(void);
224 #ifdef CONFIG_X86_LOCAL_APIC
225 void (*startup_ipi_hook)(int phys_apicid,
226 unsigned long start_eip,
227 unsigned long start_esp);
232 unsigned long (*read_cr2)(void);
233 void (*write_cr2)(unsigned long);
235 unsigned long (*read_cr3)(void);
236 void (*write_cr3)(unsigned long);
239 * Hooks for intercepting the creation/use/destruction of an
242 void (*activate_mm)(struct mm_struct *prev,
243 struct mm_struct *next);
244 void (*dup_mmap)(struct mm_struct *oldmm,
245 struct mm_struct *mm);
246 void (*exit_mmap)(struct mm_struct *mm);
250 void (*flush_tlb_user)(void);
251 void (*flush_tlb_kernel)(void);
252 void (*flush_tlb_single)(unsigned long addr);
253 void (*flush_tlb_others)(const struct cpumask *cpus,
254 struct mm_struct *mm,
258 /* Hooks for allocating and freeing a pagetable top-level */
259 int (*pgd_alloc)(struct mm_struct *mm);
260 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
263 * Hooks for allocating/releasing pagetable pages when they're
264 * attached to a pagetable
266 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
267 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
268 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
269 void (*release_pte)(unsigned long pfn);
270 void (*release_pmd)(unsigned long pfn);
271 void (*release_pud)(unsigned long pfn);
273 /* Pagetable manipulation functions */
274 void (*set_pte)(pte_t *ptep, pte_t pteval);
275 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
276 pte_t *ptep, pte_t pteval);
277 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
278 void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
279 pmd_t *pmdp, pmd_t pmdval);
280 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
282 void (*pte_update_defer)(struct mm_struct *mm,
283 unsigned long addr, pte_t *ptep);
284 void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
286 void (*pmd_update_defer)(struct mm_struct *mm,
287 unsigned long addr, pmd_t *pmdp);
289 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
291 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
292 pte_t *ptep, pte_t pte);
294 struct paravirt_callee_save pte_val;
295 struct paravirt_callee_save make_pte;
297 struct paravirt_callee_save pgd_val;
298 struct paravirt_callee_save make_pgd;
300 #if CONFIG_PGTABLE_LEVELS >= 3
301 #ifdef CONFIG_X86_PAE
302 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
303 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
305 void (*pmd_clear)(pmd_t *pmdp);
307 #endif /* CONFIG_X86_PAE */
309 void (*set_pud)(pud_t *pudp, pud_t pudval);
311 struct paravirt_callee_save pmd_val;
312 struct paravirt_callee_save make_pmd;
314 #if CONFIG_PGTABLE_LEVELS == 4
315 struct paravirt_callee_save pud_val;
316 struct paravirt_callee_save make_pud;
318 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
319 #endif /* CONFIG_PGTABLE_LEVELS == 4 */
320 #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
322 struct pv_lazy_ops lazy_mode;
326 /* Sometimes the physical address is a pfn, and sometimes its
327 an mfn. We can tell which is which from the index. */
328 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
329 phys_addr_t phys, pgprot_t flags);
332 struct arch_spinlock;
334 #include <asm/spinlock_types.h>
336 typedef u16 __ticket_t;
342 #ifdef CONFIG_QUEUED_SPINLOCKS
343 void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
344 struct paravirt_callee_save queued_spin_unlock;
346 void (*wait)(u8 *ptr, u8 val);
347 void (*kick)(int cpu);
348 #else /* !CONFIG_QUEUED_SPINLOCKS */
349 struct paravirt_callee_save lock_spinning;
350 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
351 #endif /* !CONFIG_QUEUED_SPINLOCKS */
354 /* This contains all the paravirt structures: we get a convenient
355 * number for each function using the offset which we use to indicate
357 struct paravirt_patch_template {
358 struct pv_init_ops pv_init_ops;
359 struct pv_time_ops pv_time_ops;
360 struct pv_cpu_ops pv_cpu_ops;
361 struct pv_irq_ops pv_irq_ops;
362 struct pv_apic_ops pv_apic_ops;
363 struct pv_mmu_ops pv_mmu_ops;
364 struct pv_lock_ops pv_lock_ops;
367 extern struct pv_info pv_info;
368 extern struct pv_init_ops pv_init_ops;
369 extern struct pv_time_ops pv_time_ops;
370 extern struct pv_cpu_ops pv_cpu_ops;
371 extern struct pv_irq_ops pv_irq_ops;
372 extern struct pv_apic_ops pv_apic_ops;
373 extern struct pv_mmu_ops pv_mmu_ops;
374 extern struct pv_lock_ops pv_lock_ops;
376 #define PARAVIRT_PATCH(x) \
377 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
379 #define paravirt_type(op) \
380 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
381 [paravirt_opptr] "i" (&(op))
382 #define paravirt_clobber(clobber) \
383 [paravirt_clobber] "i" (clobber)
386 * Generate some code, and mark it as patchable by the
387 * apply_paravirt() alternate instruction patcher.
389 #define _paravirt_alt(insn_string, type, clobber) \
390 "771:\n\t" insn_string "\n" "772:\n" \
391 ".pushsection .parainstructions,\"a\"\n" \
394 " .byte " type "\n" \
395 " .byte 772b-771b\n" \
396 " .short " clobber "\n" \
399 /* Generate patchable code, with the default asm parameters. */
400 #define paravirt_alt(insn_string) \
401 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
403 /* Simple instruction patching code. */
404 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
406 #define DEF_NATIVE(ops, name, code) \
407 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
408 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
410 unsigned paravirt_patch_nop(void);
411 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
412 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
413 unsigned paravirt_patch_ignore(unsigned len);
414 unsigned paravirt_patch_call(void *insnbuf,
415 const void *target, u16 tgt_clobbers,
416 unsigned long addr, u16 site_clobbers,
418 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
419 unsigned long addr, unsigned len);
420 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
421 unsigned long addr, unsigned len);
423 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
424 const char *start, const char *end);
426 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
427 unsigned long addr, unsigned len);
429 int paravirt_disable_iospace(void);
432 * This generates an indirect call based on the operation type number.
433 * The type number, computed in PARAVIRT_PATCH, is derived from the
434 * offset into the paravirt_patch_template structure, and can therefore be
435 * freely converted back into a structure offset.
437 #define PARAVIRT_CALL "call *%c[paravirt_opptr];"
440 * These macros are intended to wrap calls through one of the paravirt
441 * ops structs, so that they can be later identified and patched at
444 * Normally, a call to a pv_op function is a simple indirect call:
445 * (pv_op_struct.operations)(args...).
447 * Unfortunately, this is a relatively slow operation for modern CPUs,
448 * because it cannot necessarily determine what the destination
449 * address is. In this case, the address is a runtime constant, so at
450 * the very least we can patch the call to e a simple direct call, or
451 * ideally, patch an inline implementation into the callsite. (Direct
452 * calls are essentially free, because the call and return addresses
453 * are completely predictable.)
455 * For i386, these macros rely on the standard gcc "regparm(3)" calling
456 * convention, in which the first three arguments are placed in %eax,
457 * %edx, %ecx (in that order), and the remaining arguments are placed
458 * on the stack. All caller-save registers (eax,edx,ecx) are expected
459 * to be modified (either clobbered or used for return values).
460 * X86_64, on the other hand, already specifies a register-based calling
461 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
462 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
463 * special handling for dealing with 4 arguments, unlike i386.
464 * However, x86_64 also have to clobber all caller saved registers, which
465 * unfortunately, are quite a bit (r8 - r11)
467 * The call instruction itself is marked by placing its start address
468 * and size into the .parainstructions section, so that
469 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
470 * appropriate patching under the control of the backend pv_init_ops
473 * Unfortunately there's no way to get gcc to generate the args setup
474 * for the call, and then allow the call itself to be generated by an
475 * inline asm. Because of this, we must do the complete arg setup and
476 * return value handling from within these macros. This is fairly
479 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
480 * It could be extended to more arguments, but there would be little
481 * to be gained from that. For each number of arguments, there are
482 * the two VCALL and CALL variants for void and non-void functions.
484 * When there is a return value, the invoker of the macro must specify
485 * the return type. The macro then uses sizeof() on that type to
486 * determine whether its a 32 or 64 bit value, and places the return
487 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
488 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
489 * the return value size.
491 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
492 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
495 * Small structures are passed and returned in registers. The macro
496 * calling convention can't directly deal with this, so the wrapper
497 * functions must do this.
499 * These PVOP_* macros are only defined within this header. This
500 * means that all uses must be wrapped in inline functions. This also
501 * makes sure the incoming and outgoing types are always correct.
504 #define PVOP_VCALL_ARGS \
505 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
506 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
508 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
509 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
510 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
512 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
514 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
516 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
517 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
519 #define EXTRA_CLOBBERS
520 #define VEXTRA_CLOBBERS
521 #else /* CONFIG_X86_64 */
522 /* [re]ax isn't an arg, but the return val */
523 #define PVOP_VCALL_ARGS \
524 unsigned long __edi = __edi, __esi = __esi, \
525 __edx = __edx, __ecx = __ecx, __eax = __eax
526 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
528 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
529 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
530 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
531 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
533 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
534 "=S" (__esi), "=d" (__edx), \
536 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
538 /* void functions are still allowed [re]ax for scratch */
539 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
540 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
542 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
543 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
544 #endif /* CONFIG_X86_32 */
546 #ifdef CONFIG_PARAVIRT_DEBUG
547 #define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
549 #define PVOP_TEST_NULL(op) ((void)op)
552 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
557 PVOP_TEST_NULL(op); \
558 /* This is 32-bit specific, but is okay in 64-bit */ \
559 /* since this condition will never hold */ \
560 if (sizeof(rettype) > sizeof(unsigned long)) { \
562 paravirt_alt(PARAVIRT_CALL) \
565 : paravirt_type(op), \
566 paravirt_clobber(clbr), \
568 : "memory", "cc" extra_clbr); \
569 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
572 paravirt_alt(PARAVIRT_CALL) \
575 : paravirt_type(op), \
576 paravirt_clobber(clbr), \
578 : "memory", "cc" extra_clbr); \
579 __ret = (rettype)__eax; \
584 #define __PVOP_CALL(rettype, op, pre, post, ...) \
585 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
586 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
588 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
589 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
590 PVOP_CALLEE_CLOBBERS, , \
591 pre, post, ##__VA_ARGS__)
594 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
597 PVOP_TEST_NULL(op); \
599 paravirt_alt(PARAVIRT_CALL) \
602 : paravirt_type(op), \
603 paravirt_clobber(clbr), \
605 : "memory", "cc" extra_clbr); \
608 #define __PVOP_VCALL(op, pre, post, ...) \
609 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
611 pre, post, ##__VA_ARGS__)
613 #define __PVOP_VCALLEESAVE(op, pre, post, ...) \
614 ____PVOP_VCALL(op.func, CLBR_RET_REG, \
615 PVOP_VCALLEE_CLOBBERS, , \
616 pre, post, ##__VA_ARGS__)
620 #define PVOP_CALL0(rettype, op) \
621 __PVOP_CALL(rettype, op, "", "")
622 #define PVOP_VCALL0(op) \
623 __PVOP_VCALL(op, "", "")
625 #define PVOP_CALLEE0(rettype, op) \
626 __PVOP_CALLEESAVE(rettype, op, "", "")
627 #define PVOP_VCALLEE0(op) \
628 __PVOP_VCALLEESAVE(op, "", "")
631 #define PVOP_CALL1(rettype, op, arg1) \
632 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
633 #define PVOP_VCALL1(op, arg1) \
634 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
636 #define PVOP_CALLEE1(rettype, op, arg1) \
637 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
638 #define PVOP_VCALLEE1(op, arg1) \
639 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
642 #define PVOP_CALL2(rettype, op, arg1, arg2) \
643 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
644 PVOP_CALL_ARG2(arg2))
645 #define PVOP_VCALL2(op, arg1, arg2) \
646 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
647 PVOP_CALL_ARG2(arg2))
649 #define PVOP_CALLEE2(rettype, op, arg1, arg2) \
650 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
651 PVOP_CALL_ARG2(arg2))
652 #define PVOP_VCALLEE2(op, arg1, arg2) \
653 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
654 PVOP_CALL_ARG2(arg2))
657 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
658 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
659 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
660 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
661 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
662 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
664 /* This is the only difference in x86_64. We can make it much simpler */
666 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
667 __PVOP_CALL(rettype, op, \
668 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
669 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
670 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
671 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
673 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
674 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
675 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
677 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
678 __PVOP_CALL(rettype, op, "", "", \
679 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
680 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
681 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
682 __PVOP_VCALL(op, "", "", \
683 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
684 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
687 /* Lazy mode for batching updates / context switch */
688 enum paravirt_lazy_mode {
694 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
695 void paravirt_start_context_switch(struct task_struct *prev);
696 void paravirt_end_context_switch(struct task_struct *next);
698 void paravirt_enter_lazy_mmu(void);
699 void paravirt_leave_lazy_mmu(void);
700 void paravirt_flush_lazy_mmu(void);
702 void _paravirt_nop(void);
703 u32 _paravirt_ident_32(u32);
704 u64 _paravirt_ident_64(u64);
706 #define paravirt_nop ((void *)_paravirt_nop)
708 /* These all sit in the .parainstructions section to tell us what to patch. */
709 struct paravirt_patch_site {
710 u8 *instr; /* original instructions */
711 u8 instrtype; /* type of this instruction */
712 u8 len; /* length of original instruction */
713 u16 clobbers; /* what registers you may clobber */
716 extern struct paravirt_patch_site __parainstructions[],
717 __parainstructions_end[];
719 #endif /* __ASSEMBLY__ */
721 #endif /* _ASM_X86_PARAVIRT_TYPES_H */