Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / mips / mm / mmap.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2011 Wind River Systems,
7  *   written by Ralf Baechle <ralf@linux-mips.org>
8  */
9 #include <linux/compiler.h>
10 #include <linux/errno.h>
11 #include <linux/mm.h>
12 #include <linux/mman.h>
13 #include <linux/module.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/sched.h>
17
18 unsigned long shm_align_mask = PAGE_SIZE - 1;   /* Sane caches */
19 EXPORT_SYMBOL(shm_align_mask);
20
21 /* gap between mmap and stack */
22 #define MIN_GAP (128*1024*1024UL)
23 #define MAX_GAP ((TASK_SIZE)/6*5)
24
25 static int mmap_is_legacy(void)
26 {
27         if (current->personality & ADDR_COMPAT_LAYOUT)
28                 return 1;
29
30         if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
31                 return 1;
32
33         return sysctl_legacy_va_layout;
34 }
35
36 static unsigned long mmap_base(unsigned long rnd)
37 {
38         unsigned long gap = rlimit(RLIMIT_STACK);
39
40         if (gap < MIN_GAP)
41                 gap = MIN_GAP;
42         else if (gap > MAX_GAP)
43                 gap = MAX_GAP;
44
45         return PAGE_ALIGN(TASK_SIZE - gap - rnd);
46 }
47
48 #define COLOUR_ALIGN(addr, pgoff)                               \
49         ((((addr) + shm_align_mask) & ~shm_align_mask) +        \
50          (((pgoff) << PAGE_SHIFT) & shm_align_mask))
51
52 enum mmap_allocation_direction {UP, DOWN};
53
54 static unsigned long arch_get_unmapped_area_common(struct file *filp,
55         unsigned long addr0, unsigned long len, unsigned long pgoff,
56         unsigned long flags, enum mmap_allocation_direction dir)
57 {
58         struct mm_struct *mm = current->mm;
59         struct vm_area_struct *vma;
60         unsigned long addr = addr0;
61         int do_color_align;
62         struct vm_unmapped_area_info info;
63
64         if (unlikely(len > TASK_SIZE))
65                 return -ENOMEM;
66
67         if (flags & MAP_FIXED) {
68                 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
69                 if (TASK_SIZE - len < addr)
70                         return -EINVAL;
71
72                 /*
73                  * We do not accept a shared mapping if it would violate
74                  * cache aliasing constraints.
75                  */
76                 if ((flags & MAP_SHARED) &&
77                     ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
78                         return -EINVAL;
79                 return addr;
80         }
81
82         do_color_align = 0;
83         if (filp || (flags & MAP_SHARED))
84                 do_color_align = 1;
85
86         /* requesting a specific address */
87         if (addr) {
88                 if (do_color_align)
89                         addr = COLOUR_ALIGN(addr, pgoff);
90                 else
91                         addr = PAGE_ALIGN(addr);
92
93                 vma = find_vma(mm, addr);
94                 if (TASK_SIZE - len >= addr &&
95                     (!vma || addr + len <= vma->vm_start))
96                         return addr;
97         }
98
99         info.length = len;
100         info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
101         info.align_offset = pgoff << PAGE_SHIFT;
102
103         if (dir == DOWN) {
104                 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
105                 info.low_limit = PAGE_SIZE;
106                 info.high_limit = mm->mmap_base;
107                 addr = vm_unmapped_area(&info);
108
109                 if (!(addr & ~PAGE_MASK))
110                         return addr;
111
112                 /*
113                  * A failed mmap() very likely causes application failure,
114                  * so fall back to the bottom-up function here. This scenario
115                  * can happen with large stack limits and large mmap()
116                  * allocations.
117                  */
118         }
119
120         info.flags = 0;
121         info.low_limit = mm->mmap_base;
122         info.high_limit = TASK_SIZE;
123         return vm_unmapped_area(&info);
124 }
125
126 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
127         unsigned long len, unsigned long pgoff, unsigned long flags)
128 {
129         return arch_get_unmapped_area_common(filp,
130                         addr0, len, pgoff, flags, UP);
131 }
132
133 /*
134  * There is no need to export this but sched.h declares the function as
135  * extern so making it static here results in an error.
136  */
137 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
138         unsigned long addr0, unsigned long len, unsigned long pgoff,
139         unsigned long flags)
140 {
141         return arch_get_unmapped_area_common(filp,
142                         addr0, len, pgoff, flags, DOWN);
143 }
144
145 unsigned long arch_mmap_rnd(void)
146 {
147         unsigned long rnd;
148
149         rnd = (unsigned long)get_random_int();
150         rnd <<= PAGE_SHIFT;
151         if (TASK_IS_32BIT_ADDR)
152                 rnd &= 0xfffffful;
153         else
154                 rnd &= 0xffffffful;
155
156         return rnd;
157 }
158
159 void arch_pick_mmap_layout(struct mm_struct *mm)
160 {
161         unsigned long random_factor = 0UL;
162
163         if (current->flags & PF_RANDOMIZE)
164                 random_factor = arch_mmap_rnd();
165
166         if (mmap_is_legacy()) {
167                 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
168                 mm->get_unmapped_area = arch_get_unmapped_area;
169         } else {
170                 mm->mmap_base = mmap_base(random_factor);
171                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
172         }
173 }
174
175 static inline unsigned long brk_rnd(void)
176 {
177         unsigned long rnd = get_random_int();
178
179         rnd = rnd << PAGE_SHIFT;
180         /* 8MB for 32bit, 256MB for 64bit */
181         if (TASK_IS_32BIT_ADDR)
182                 rnd = rnd & 0x7ffffful;
183         else
184                 rnd = rnd & 0xffffffful;
185
186         return rnd;
187 }
188
189 unsigned long arch_randomize_brk(struct mm_struct *mm)
190 {
191         unsigned long base = mm->brk;
192         unsigned long ret;
193
194         ret = PAGE_ALIGN(base + brk_rnd());
195
196         if (ret < mm->brk)
197                 return mm->brk;
198
199         return ret;
200 }
201
202 int __virt_addr_valid(const volatile void *kaddr)
203 {
204         return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
205 }
206 EXPORT_SYMBOL_GPL(__virt_addr_valid);