These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / tools / testing / selftests / vm / compaction_test.c
1 /*
2  *
3  * A test for the patch "Allow compaction of unevictable pages".
4  * With this patch we should be able to allocate at least 1/4
5  * of RAM in huge pages. Without the patch much less is
6  * allocated.
7  */
8
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <sys/mman.h>
12 #include <sys/resource.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <unistd.h>
16 #include <string.h>
17
18 #define MAP_SIZE 1048576
19
20 struct map_list {
21         void *map;
22         struct map_list *next;
23 };
24
25 int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize)
26 {
27         char  buffer[256] = {0};
28         char *cmd = "cat /proc/meminfo | grep -i memfree | grep -o '[0-9]*'";
29         FILE *cmdfile = popen(cmd, "r");
30
31         if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
32                 perror("Failed to read meminfo\n");
33                 return -1;
34         }
35
36         pclose(cmdfile);
37
38         *memfree = atoll(buffer);
39         cmd = "cat /proc/meminfo | grep -i hugepagesize | grep -o '[0-9]*'";
40         cmdfile = popen(cmd, "r");
41
42         if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
43                 perror("Failed to read meminfo\n");
44                 return -1;
45         }
46
47         pclose(cmdfile);
48         *hugepagesize = atoll(buffer);
49
50         return 0;
51 }
52
53 int prereq(void)
54 {
55         char allowed;
56         int fd;
57
58         fd = open("/proc/sys/vm/compact_unevictable_allowed",
59                   O_RDONLY | O_NONBLOCK);
60         if (fd < 0) {
61                 perror("Failed to open\n"
62                        "/proc/sys/vm/compact_unevictable_allowed\n");
63                 return -1;
64         }
65
66         if (read(fd, &allowed, sizeof(char)) != sizeof(char)) {
67                 perror("Failed to read from\n"
68                        "/proc/sys/vm/compact_unevictable_allowed\n");
69                 close(fd);
70                 return -1;
71         }
72
73         close(fd);
74         if (allowed == '1')
75                 return 0;
76
77         return -1;
78 }
79
80 int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
81 {
82         int fd;
83         int compaction_index = 0;
84         char initial_nr_hugepages[10] = {0};
85         char nr_hugepages[10] = {0};
86
87         /* We want to test with 80% of available memory. Else, OOM killer comes
88            in to play */
89         mem_free = mem_free * 0.8;
90
91         fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK);
92         if (fd < 0) {
93                 perror("Failed to open /proc/sys/vm/nr_hugepages");
94                 return -1;
95         }
96
97         if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) {
98                 perror("Failed to read from /proc/sys/vm/nr_hugepages");
99                 goto close_fd;
100         }
101
102         /* Start with the initial condition of 0 huge pages*/
103         if (write(fd, "0", sizeof(char)) != sizeof(char)) {
104                 perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
105                 goto close_fd;
106         }
107
108         lseek(fd, 0, SEEK_SET);
109
110         /* Request a large number of huge pages. The Kernel will allocate
111            as much as it can */
112         if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
113                 perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
114                 goto close_fd;
115         }
116
117         lseek(fd, 0, SEEK_SET);
118
119         if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
120                 perror("Failed to read from /proc/sys/vm/nr_hugepages\n");
121                 goto close_fd;
122         }
123
124         /* We should have been able to request at least 1/3 rd of the memory in
125            huge pages */
126         compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size);
127
128         if (compaction_index > 3) {
129                 printf("No of huge pages allocated = %d\n",
130                        (atoi(nr_hugepages)));
131                 fprintf(stderr, "ERROR: Less that 1/%d of memory is available\n"
132                         "as huge pages\n", compaction_index);
133                 goto close_fd;
134         }
135
136         printf("No of huge pages allocated = %d\n",
137                (atoi(nr_hugepages)));
138
139         if (write(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages))
140             != strlen(initial_nr_hugepages)) {
141                 perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
142                 goto close_fd;
143         }
144
145         close(fd);
146         return 0;
147
148  close_fd:
149         close(fd);
150         printf("Not OK. Compaction test failed.");
151         return -1;
152 }
153
154
155 int main(int argc, char **argv)
156 {
157         struct rlimit lim;
158         struct map_list *list, *entry;
159         size_t page_size, i;
160         void *map = NULL;
161         unsigned long mem_free = 0;
162         unsigned long hugepage_size = 0;
163         unsigned long mem_fragmentable = 0;
164
165         if (prereq() != 0) {
166                 printf("Either the sysctl compact_unevictable_allowed is not\n"
167                        "set to 1 or couldn't read the proc file.\n"
168                        "Skipping the test\n");
169                 return 0;
170         }
171
172         lim.rlim_cur = RLIM_INFINITY;
173         lim.rlim_max = RLIM_INFINITY;
174         if (setrlimit(RLIMIT_MEMLOCK, &lim)) {
175                 perror("Failed to set rlimit:\n");
176                 return -1;
177         }
178
179         page_size = getpagesize();
180
181         list = NULL;
182
183         if (read_memory_info(&mem_free, &hugepage_size) != 0) {
184                 printf("ERROR: Cannot read meminfo\n");
185                 return -1;
186         }
187
188         mem_fragmentable = mem_free * 0.8 / 1024;
189
190         while (mem_fragmentable > 0) {
191                 map = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
192                            MAP_ANONYMOUS | MAP_PRIVATE | MAP_LOCKED, -1, 0);
193                 if (map == MAP_FAILED)
194                         break;
195
196                 entry = malloc(sizeof(struct map_list));
197                 if (!entry) {
198                         munmap(map, MAP_SIZE);
199                         break;
200                 }
201                 entry->map = map;
202                 entry->next = list;
203                 list = entry;
204
205                 /* Write something (in this case the address of the map) to
206                  * ensure that KSM can't merge the mapped pages
207                  */
208                 for (i = 0; i < MAP_SIZE; i += page_size)
209                         *(unsigned long *)(map + i) = (unsigned long)map + i;
210
211                 mem_fragmentable--;
212         }
213
214         for (entry = list; entry != NULL; entry = entry->next) {
215                 munmap(entry->map, MAP_SIZE);
216                 if (!entry->next)
217                         break;
218                 entry = entry->next;
219         }
220
221         if (check_compaction(mem_free, hugepage_size) == 0)
222                 return 0;
223
224         return -1;
225 }