Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / include / linux / huge_mm.h
1 #ifndef _LINUX_HUGE_MM_H
2 #define _LINUX_HUGE_MM_H
3
4 extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5                                       struct vm_area_struct *vma,
6                                       unsigned long address, pmd_t *pmd,
7                                       unsigned int flags);
8 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9                          pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10                          struct vm_area_struct *vma);
11 extern void huge_pmd_set_accessed(struct mm_struct *mm,
12                                   struct vm_area_struct *vma,
13                                   unsigned long address, pmd_t *pmd,
14                                   pmd_t orig_pmd, int dirty);
15 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
16                                unsigned long address, pmd_t *pmd,
17                                pmd_t orig_pmd);
18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
19                                           unsigned long addr,
20                                           pmd_t *pmd,
21                                           unsigned int flags);
22 extern int zap_huge_pmd(struct mmu_gather *tlb,
23                         struct vm_area_struct *vma,
24                         pmd_t *pmd, unsigned long addr);
25 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
26                         unsigned long addr, unsigned long end,
27                         unsigned char *vec);
28 extern int move_huge_pmd(struct vm_area_struct *vma,
29                          struct vm_area_struct *new_vma,
30                          unsigned long old_addr,
31                          unsigned long new_addr, unsigned long old_end,
32                          pmd_t *old_pmd, pmd_t *new_pmd);
33 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
34                         unsigned long addr, pgprot_t newprot,
35                         int prot_numa);
36
37 enum transparent_hugepage_flag {
38         TRANSPARENT_HUGEPAGE_FLAG,
39         TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
40         TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
41         TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
42         TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
43         TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
44 #ifdef CONFIG_DEBUG_VM
45         TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
46 #endif
47 };
48
49 enum page_check_address_pmd_flag {
50         PAGE_CHECK_ADDRESS_PMD_FLAG,
51         PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
52         PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
53 };
54 extern pmd_t *page_check_address_pmd(struct page *page,
55                                      struct mm_struct *mm,
56                                      unsigned long address,
57                                      enum page_check_address_pmd_flag flag,
58                                      spinlock_t **ptl);
59
60 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
61 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
62
63 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
64 #define HPAGE_PMD_SHIFT PMD_SHIFT
65 #define HPAGE_PMD_SIZE  ((1UL) << HPAGE_PMD_SHIFT)
66 #define HPAGE_PMD_MASK  (~(HPAGE_PMD_SIZE - 1))
67
68 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
69
70 #define transparent_hugepage_enabled(__vma)                             \
71         ((transparent_hugepage_flags &                                  \
72           (1<<TRANSPARENT_HUGEPAGE_FLAG) ||                             \
73           (transparent_hugepage_flags &                                 \
74            (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) &&                   \
75            ((__vma)->vm_flags & VM_HUGEPAGE))) &&                       \
76          !((__vma)->vm_flags & VM_NOHUGEPAGE) &&                        \
77          !is_vma_temporary_stack(__vma))
78 #define transparent_hugepage_defrag(__vma)                              \
79         ((transparent_hugepage_flags &                                  \
80           (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) ||                     \
81          (transparent_hugepage_flags &                                  \
82           (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) &&             \
83           (__vma)->vm_flags & VM_HUGEPAGE))
84 #define transparent_hugepage_use_zero_page()                            \
85         (transparent_hugepage_flags &                                   \
86          (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
87 #ifdef CONFIG_DEBUG_VM
88 #define transparent_hugepage_debug_cow()                                \
89         (transparent_hugepage_flags &                                   \
90          (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
91 #else /* CONFIG_DEBUG_VM */
92 #define transparent_hugepage_debug_cow() 0
93 #endif /* CONFIG_DEBUG_VM */
94
95 extern unsigned long transparent_hugepage_flags;
96 extern int split_huge_page_to_list(struct page *page, struct list_head *list);
97 static inline int split_huge_page(struct page *page)
98 {
99         return split_huge_page_to_list(page, NULL);
100 }
101 extern void __split_huge_page_pmd(struct vm_area_struct *vma,
102                 unsigned long address, pmd_t *pmd);
103 #define split_huge_page_pmd(__vma, __address, __pmd)                    \
104         do {                                                            \
105                 pmd_t *____pmd = (__pmd);                               \
106                 if (unlikely(pmd_trans_huge(*____pmd)))                 \
107                         __split_huge_page_pmd(__vma, __address,         \
108                                         ____pmd);                       \
109         }  while (0)
110 #define wait_split_huge_page(__anon_vma, __pmd)                         \
111         do {                                                            \
112                 pmd_t *____pmd = (__pmd);                               \
113                 anon_vma_lock_write(__anon_vma);                        \
114                 anon_vma_unlock_write(__anon_vma);                      \
115                 BUG_ON(pmd_trans_splitting(*____pmd) ||                 \
116                        pmd_trans_huge(*____pmd));                       \
117         } while (0)
118 extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
119                 pmd_t *pmd);
120 #if HPAGE_PMD_ORDER >= MAX_ORDER
121 #error "hugepages can't be allocated by the buddy allocator"
122 #endif
123 extern int hugepage_madvise(struct vm_area_struct *vma,
124                             unsigned long *vm_flags, int advice);
125 extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
126                                     unsigned long start,
127                                     unsigned long end,
128                                     long adjust_next);
129 extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
130                 spinlock_t **ptl);
131 /* mmap_sem must be held on entry */
132 static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
133                 spinlock_t **ptl)
134 {
135         VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
136         if (pmd_trans_huge(*pmd))
137                 return __pmd_trans_huge_lock(pmd, vma, ptl);
138         else
139                 return 0;
140 }
141 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
142                                          unsigned long start,
143                                          unsigned long end,
144                                          long adjust_next)
145 {
146         if (!vma->anon_vma || vma->vm_ops)
147                 return;
148         __vma_adjust_trans_huge(vma, start, end, adjust_next);
149 }
150 static inline int hpage_nr_pages(struct page *page)
151 {
152         if (unlikely(PageTransHuge(page)))
153                 return HPAGE_PMD_NR;
154         return 1;
155 }
156
157 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
158                                 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
159
160 extern struct page *huge_zero_page;
161
162 static inline bool is_huge_zero_page(struct page *page)
163 {
164         return ACCESS_ONCE(huge_zero_page) == page;
165 }
166
167 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
168 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
169 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
170 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
171
172 #define hpage_nr_pages(x) 1
173
174 #define transparent_hugepage_enabled(__vma) 0
175
176 #define transparent_hugepage_flags 0UL
177 static inline int
178 split_huge_page_to_list(struct page *page, struct list_head *list)
179 {
180         return 0;
181 }
182 static inline int split_huge_page(struct page *page)
183 {
184         return 0;
185 }
186 #define split_huge_page_pmd(__vma, __address, __pmd)    \
187         do { } while (0)
188 #define wait_split_huge_page(__anon_vma, __pmd) \
189         do { } while (0)
190 #define split_huge_page_pmd_mm(__mm, __address, __pmd)  \
191         do { } while (0)
192 static inline int hugepage_madvise(struct vm_area_struct *vma,
193                                    unsigned long *vm_flags, int advice)
194 {
195         BUG();
196         return 0;
197 }
198 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
199                                          unsigned long start,
200                                          unsigned long end,
201                                          long adjust_next)
202 {
203 }
204 static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
205                 spinlock_t **ptl)
206 {
207         return 0;
208 }
209
210 static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
211                                         unsigned long addr, pmd_t pmd, pmd_t *pmdp)
212 {
213         return 0;
214 }
215
216 static inline bool is_huge_zero_page(struct page *page)
217 {
218         return false;
219 }
220
221 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
222
223 #endif /* _LINUX_HUGE_MM_H */