Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / x86 / include / asm / efi.h
1 #ifndef _ASM_X86_EFI_H
2 #define _ASM_X86_EFI_H
3
4 #include <asm/i387.h>
5 #include <asm/pgtable.h>
6
7 /*
8  * We map the EFI regions needed for runtime services non-contiguously,
9  * with preserved alignment on virtual addresses starting from -4G down
10  * for a total max space of 64G. This way, we provide for stable runtime
11  * services addresses across kernels so that a kexec'd kernel can still
12  * use them.
13  *
14  * This is the main reason why we're doing stable VA mappings for RT
15  * services.
16  *
17  * This flag is used in conjuction with a chicken bit called
18  * "efi=old_map" which can be used as a fallback to the old runtime
19  * services mapping method in case there's some b0rkage with a
20  * particular EFI implementation (haha, it is hard to hold up the
21  * sarcasm here...).
22  */
23 #define EFI_OLD_MEMMAP          EFI_ARCH_1
24
25 #define EFI32_LOADER_SIGNATURE  "EL32"
26 #define EFI64_LOADER_SIGNATURE  "EL64"
27
28 #ifdef CONFIG_X86_32
29
30
31 extern unsigned long asmlinkage efi_call_phys(void *, ...);
32
33 /*
34  * Wrap all the virtual calls in a way that forces the parameters on the stack.
35  */
36
37 /* Use this macro if your virtual returns a non-void value */
38 #define efi_call_virt(f, args...) \
39 ({                                                                      \
40         efi_status_t __s;                                               \
41         kernel_fpu_begin();                                             \
42         __s = ((efi_##f##_t __attribute__((regparm(0)))*)               \
43                 efi.systab->runtime->f)(args);                          \
44         kernel_fpu_end();                                               \
45         __s;                                                            \
46 })
47
48 /* Use this macro if your virtual call does not return any value */
49 #define __efi_call_virt(f, args...) \
50 ({                                                                      \
51         kernel_fpu_begin();                                             \
52         ((efi_##f##_t __attribute__((regparm(0)))*)                     \
53                 efi.systab->runtime->f)(args);                          \
54         kernel_fpu_end();                                               \
55 })
56
57 #define efi_ioremap(addr, size, type, attr)     ioremap_cache(addr, size)
58
59 #else /* !CONFIG_X86_32 */
60
61 #define EFI_LOADER_SIGNATURE    "EL64"
62
63 extern u64 asmlinkage efi_call(void *fp, ...);
64
65 #define efi_call_phys(f, args...)               efi_call((f), args)
66
67 #define efi_call_virt(f, ...)                                           \
68 ({                                                                      \
69         efi_status_t __s;                                               \
70                                                                         \
71         efi_sync_low_kernel_mappings();                                 \
72         preempt_disable();                                              \
73         __kernel_fpu_begin();                                           \
74         __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__);    \
75         __kernel_fpu_end();                                             \
76         preempt_enable();                                               \
77         __s;                                                            \
78 })
79
80 /*
81  * All X86_64 virt calls return non-void values. Thus, use non-void call for
82  * virt calls that would be void on X86_32.
83  */
84 #define __efi_call_virt(f, args...) efi_call_virt(f, args)
85
86 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
87                                         u32 type, u64 attribute);
88
89 #endif /* CONFIG_X86_32 */
90
91 extern struct efi_scratch efi_scratch;
92 extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
93 extern int __init efi_memblock_x86_reserve_range(void);
94 extern pgd_t * __init efi_call_phys_prolog(void);
95 extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
96 extern void __init efi_unmap_memmap(void);
97 extern void __init efi_memory_uc(u64 addr, unsigned long size);
98 extern void __init efi_map_region(efi_memory_desc_t *md);
99 extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
100 extern void efi_sync_low_kernel_mappings(void);
101 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
102 extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
103 extern void __init old_map_region(efi_memory_desc_t *md);
104 extern void __init runtime_code_page_mkexec(void);
105 extern void __init efi_runtime_mkexec(void);
106 extern void __init efi_dump_pagetable(void);
107 extern void __init efi_apply_memmap_quirks(void);
108 extern int __init efi_reuse_config(u64 tables, int nr_tables);
109 extern void efi_delete_dummy_variable(void);
110
111 struct efi_setup_data {
112         u64 fw_vendor;
113         u64 runtime;
114         u64 tables;
115         u64 smbios;
116         u64 reserved[8];
117 };
118
119 extern u64 efi_setup;
120
121 #ifdef CONFIG_EFI
122
123 static inline bool efi_is_native(void)
124 {
125         return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
126 }
127
128 static inline bool efi_runtime_supported(void)
129 {
130         if (efi_is_native())
131                 return true;
132
133         if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
134                 return true;
135
136         return false;
137 }
138
139 extern struct console early_efi_console;
140 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
141
142 #ifdef CONFIG_EFI_MIXED
143 extern void efi_thunk_runtime_setup(void);
144 extern efi_status_t efi_thunk_set_virtual_address_map(
145         void *phys_set_virtual_address_map,
146         unsigned long memory_map_size,
147         unsigned long descriptor_size,
148         u32 descriptor_version,
149         efi_memory_desc_t *virtual_map);
150 #else
151 static inline void efi_thunk_runtime_setup(void) {}
152 static inline efi_status_t efi_thunk_set_virtual_address_map(
153         void *phys_set_virtual_address_map,
154         unsigned long memory_map_size,
155         unsigned long descriptor_size,
156         u32 descriptor_version,
157         efi_memory_desc_t *virtual_map)
158 {
159         return EFI_SUCCESS;
160 }
161 #endif /* CONFIG_EFI_MIXED */
162
163
164 /* arch specific definitions used by the stub code */
165
166 struct efi_config {
167         u64 image_handle;
168         u64 table;
169         u64 allocate_pool;
170         u64 allocate_pages;
171         u64 get_memory_map;
172         u64 free_pool;
173         u64 free_pages;
174         u64 locate_handle;
175         u64 handle_protocol;
176         u64 exit_boot_services;
177         u64 text_output;
178         efi_status_t (*call)(unsigned long, ...);
179         bool is64;
180 } __packed;
181
182 __pure const struct efi_config *__efi_early(void);
183
184 #define efi_call_early(f, ...)                                          \
185         __efi_early()->call(__efi_early()->f, __VA_ARGS__);
186
187 extern bool efi_reboot_required(void);
188
189 #else
190 static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
191 static inline bool efi_reboot_required(void)
192 {
193         return false;
194 }
195 #endif /* CONFIG_EFI */
196
197 #endif /* _ASM_X86_EFI_H */