These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / x86 / kernel / paravirt_patch_64.c
1 #include <asm/paravirt.h>
2 #include <asm/asm-offsets.h>
3 #include <linux/stringify.h>
4
5 DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
6 DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
7 DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
8 DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
9 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
10 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
11 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
12 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
13 DEF_NATIVE(pv_cpu_ops, clts, "clts");
14 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
15
16 DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "swapgs; sti; sysexit");
17 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
18 DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
19 DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
20
21 DEF_NATIVE(, mov32, "mov %edi, %eax");
22 DEF_NATIVE(, mov64, "mov %rdi, %rax");
23
24 #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
25 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
26 #endif
27
28 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
29 {
30         return paravirt_patch_insns(insnbuf, len,
31                                     start__mov32, end__mov32);
32 }
33
34 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
35 {
36         return paravirt_patch_insns(insnbuf, len,
37                                     start__mov64, end__mov64);
38 }
39
40 extern bool pv_is_native_spin_unlock(void);
41
42 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
43                       unsigned long addr, unsigned len)
44 {
45         const unsigned char *start, *end;
46         unsigned ret;
47
48 #define PATCH_SITE(ops, x)                                      \
49                 case PARAVIRT_PATCH(ops.x):                     \
50                         start = start_##ops##_##x;              \
51                         end = end_##ops##_##x;                  \
52                         goto patch_site
53         switch(type) {
54                 PATCH_SITE(pv_irq_ops, restore_fl);
55                 PATCH_SITE(pv_irq_ops, save_fl);
56                 PATCH_SITE(pv_irq_ops, irq_enable);
57                 PATCH_SITE(pv_irq_ops, irq_disable);
58                 PATCH_SITE(pv_cpu_ops, usergs_sysret32);
59                 PATCH_SITE(pv_cpu_ops, usergs_sysret64);
60                 PATCH_SITE(pv_cpu_ops, swapgs);
61                 PATCH_SITE(pv_mmu_ops, read_cr2);
62                 PATCH_SITE(pv_mmu_ops, read_cr3);
63                 PATCH_SITE(pv_mmu_ops, write_cr3);
64                 PATCH_SITE(pv_cpu_ops, clts);
65                 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
66                 PATCH_SITE(pv_cpu_ops, wbinvd);
67 #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
68                 case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
69                         if (pv_is_native_spin_unlock()) {
70                                 start = start_pv_lock_ops_queued_spin_unlock;
71                                 end   = end_pv_lock_ops_queued_spin_unlock;
72                                 goto patch_site;
73                         }
74 #endif
75
76         default:
77                 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
78                 break;
79
80 patch_site:
81                 ret = paravirt_patch_insns(ibuf, len, start, end);
82                 break;
83         }
84 #undef PATCH_SITE
85         return ret;
86 }