1 #ifndef _LINUX_HIGHMEM_H
2 #define _LINUX_HIGHMEM_H
5 #include <linux/kernel.h>
8 #include <linux/uaccess.h>
9 #include <linux/hardirq.h>
10 #include <linux/sched.h>
12 #include <asm/cacheflush.h>
14 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
15 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
20 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
21 static inline void flush_kernel_dcache_page(struct page *page)
24 static inline void flush_kernel_vmap_range(void *vaddr, int size)
27 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
32 #include <asm/kmap_types.h>
35 #include <asm/highmem.h>
37 /* declarations for linux/mm/highmem.c */
38 unsigned int nr_free_highpages(void);
39 extern unsigned long totalhigh_pages;
41 void kmap_flush_unused(void);
43 struct page *kmap_to_page(void *addr);
45 #else /* CONFIG_HIGHMEM */
47 static inline unsigned int nr_free_highpages(void) { return 0; }
49 static inline struct page *kmap_to_page(void *addr)
51 return virt_to_page(addr);
54 #define totalhigh_pages 0UL
57 static inline void *kmap(struct page *page)
60 return page_address(page);
63 static inline void kunmap(struct page *page)
67 static inline void *kmap_atomic(struct page *page)
69 preempt_disable_nort();
71 return page_address(page);
73 #define kmap_atomic_prot(page, prot) kmap_atomic(page)
75 static inline void __kunmap_atomic(void *addr)
78 preempt_enable_nort();
81 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
82 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
84 #define kmap_flush_unused() do {} while(0)
87 #endif /* CONFIG_HIGHMEM */
89 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
91 #ifndef CONFIG_PREEMPT_RT_FULL
92 DECLARE_PER_CPU(int, __kmap_atomic_idx);
95 static inline int kmap_atomic_idx_push(void)
97 #ifndef CONFIG_PREEMPT_RT_FULL
98 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
100 # ifdef CONFIG_DEBUG_HIGHMEM
101 WARN_ON_ONCE(in_irq() && !irqs_disabled());
102 BUG_ON(idx >= KM_TYPE_NR);
107 BUG_ON(current->kmap_idx > KM_TYPE_NR);
108 return current->kmap_idx - 1;
112 static inline int kmap_atomic_idx(void)
114 #ifndef CONFIG_PREEMPT_RT_FULL
115 return __this_cpu_read(__kmap_atomic_idx) - 1;
117 return current->kmap_idx - 1;
121 static inline void kmap_atomic_idx_pop(void)
123 #ifndef CONFIG_PREEMPT_RT_FULL
124 # ifdef CONFIG_DEBUG_HIGHMEM
125 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
129 __this_cpu_dec(__kmap_atomic_idx);
133 # ifdef CONFIG_DEBUG_HIGHMEM
134 BUG_ON(current->kmap_idx < 0);
142 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
143 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
145 #define kunmap_atomic(addr) \
147 BUILD_BUG_ON(__same_type((addr), struct page *)); \
148 __kunmap_atomic(addr); \
152 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
153 #ifndef clear_user_highpage
154 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
156 void *addr = kmap_atomic(page);
157 clear_user_page(addr, vaddr, page);
162 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
164 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
165 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
166 * @vma: The VMA the page is to be allocated for
167 * @vaddr: The virtual address the page will be inserted into
169 * This function will allocate a page for a VMA but the caller is expected
170 * to specify via movableflags whether the page will be movable in the
173 * An architecture may override this function by defining
174 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
177 static inline struct page *
178 __alloc_zeroed_user_highpage(gfp_t movableflags,
179 struct vm_area_struct *vma,
182 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
186 clear_user_highpage(page, vaddr);
193 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
194 * @vma: The VMA the page is to be allocated for
195 * @vaddr: The virtual address the page will be inserted into
197 * This function will allocate a page for a VMA that the caller knows will
198 * be able to migrate in the future using move_pages() or reclaimed
200 static inline struct page *
201 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
204 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
207 static inline void clear_highpage(struct page *page)
209 void *kaddr = kmap_atomic(page);
211 kunmap_atomic(kaddr);
214 static inline void zero_user_segments(struct page *page,
215 unsigned start1, unsigned end1,
216 unsigned start2, unsigned end2)
218 void *kaddr = kmap_atomic(page);
220 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
223 memset(kaddr + start1, 0, end1 - start1);
226 memset(kaddr + start2, 0, end2 - start2);
228 kunmap_atomic(kaddr);
229 flush_dcache_page(page);
232 static inline void zero_user_segment(struct page *page,
233 unsigned start, unsigned end)
235 zero_user_segments(page, start, end, 0, 0);
238 static inline void zero_user(struct page *page,
239 unsigned start, unsigned size)
241 zero_user_segments(page, start, start + size, 0, 0);
244 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
246 static inline void copy_user_highpage(struct page *to, struct page *from,
247 unsigned long vaddr, struct vm_area_struct *vma)
251 vfrom = kmap_atomic(from);
252 vto = kmap_atomic(to);
253 copy_user_page(vto, vfrom, vaddr, to);
255 kunmap_atomic(vfrom);
260 static inline void copy_highpage(struct page *to, struct page *from)
264 vfrom = kmap_atomic(from);
265 vto = kmap_atomic(to);
266 copy_page(vto, vfrom);
268 kunmap_atomic(vfrom);
271 #endif /* _LINUX_HIGHMEM_H */