Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / staging / android / ion / ion_heap.c
1 /*
2  * drivers/staging/android/ion/ion_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/err.h>
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
20 #include <linux/mm.h>
21 #include <linux/rtmutex.h>
22 #include <linux/sched.h>
23 #include <linux/scatterlist.h>
24 #include <linux/vmalloc.h>
25 #include "ion.h"
26 #include "ion_priv.h"
27
28 void *ion_heap_map_kernel(struct ion_heap *heap,
29                           struct ion_buffer *buffer)
30 {
31         struct scatterlist *sg;
32         int i, j;
33         void *vaddr;
34         pgprot_t pgprot;
35         struct sg_table *table = buffer->sg_table;
36         int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37         struct page **pages = vmalloc(sizeof(struct page *) * npages);
38         struct page **tmp = pages;
39
40         if (!pages)
41                 return NULL;
42
43         if (buffer->flags & ION_FLAG_CACHED)
44                 pgprot = PAGE_KERNEL;
45         else
46                 pgprot = pgprot_writecombine(PAGE_KERNEL);
47
48         for_each_sg(table->sgl, sg, table->nents, i) {
49                 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
50                 struct page *page = sg_page(sg);
51
52                 BUG_ON(i >= npages);
53                 for (j = 0; j < npages_this_entry; j++)
54                         *(tmp++) = page++;
55         }
56         vaddr = vmap(pages, npages, VM_MAP, pgprot);
57         vfree(pages);
58
59         if (vaddr == NULL)
60                 return ERR_PTR(-ENOMEM);
61
62         return vaddr;
63 }
64
65 void ion_heap_unmap_kernel(struct ion_heap *heap,
66                            struct ion_buffer *buffer)
67 {
68         vunmap(buffer->vaddr);
69 }
70
71 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
72                       struct vm_area_struct *vma)
73 {
74         struct sg_table *table = buffer->sg_table;
75         unsigned long addr = vma->vm_start;
76         unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
77         struct scatterlist *sg;
78         int i;
79         int ret;
80
81         for_each_sg(table->sgl, sg, table->nents, i) {
82                 struct page *page = sg_page(sg);
83                 unsigned long remainder = vma->vm_end - addr;
84                 unsigned long len = sg->length;
85
86                 if (offset >= sg->length) {
87                         offset -= sg->length;
88                         continue;
89                 } else if (offset) {
90                         page += offset / PAGE_SIZE;
91                         len = sg->length - offset;
92                         offset = 0;
93                 }
94                 len = min(len, remainder);
95                 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
96                                 vma->vm_page_prot);
97                 if (ret)
98                         return ret;
99                 addr += len;
100                 if (addr >= vma->vm_end)
101                         return 0;
102         }
103         return 0;
104 }
105
106 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
107 {
108         void *addr = vm_map_ram(pages, num, -1, pgprot);
109
110         if (!addr)
111                 return -ENOMEM;
112         memset(addr, 0, PAGE_SIZE * num);
113         vm_unmap_ram(addr, num);
114
115         return 0;
116 }
117
118 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
119                                                 pgprot_t pgprot)
120 {
121         int p = 0;
122         int ret = 0;
123         struct sg_page_iter piter;
124         struct page *pages[32];
125
126         for_each_sg_page(sgl, &piter, nents, 0) {
127                 pages[p++] = sg_page_iter_page(&piter);
128                 if (p == ARRAY_SIZE(pages)) {
129                         ret = ion_heap_clear_pages(pages, p, pgprot);
130                         if (ret)
131                                 return ret;
132                         p = 0;
133                 }
134         }
135         if (p)
136                 ret = ion_heap_clear_pages(pages, p, pgprot);
137
138         return ret;
139 }
140
141 int ion_heap_buffer_zero(struct ion_buffer *buffer)
142 {
143         struct sg_table *table = buffer->sg_table;
144         pgprot_t pgprot;
145
146         if (buffer->flags & ION_FLAG_CACHED)
147                 pgprot = PAGE_KERNEL;
148         else
149                 pgprot = pgprot_writecombine(PAGE_KERNEL);
150
151         return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
152 }
153
154 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
155 {
156         struct scatterlist sg;
157
158         sg_init_table(&sg, 1);
159         sg_set_page(&sg, page, size, 0);
160         return ion_heap_sglist_zero(&sg, 1, pgprot);
161 }
162
163 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
164 {
165         spin_lock(&heap->free_lock);
166         list_add(&buffer->list, &heap->free_list);
167         heap->free_list_size += buffer->size;
168         spin_unlock(&heap->free_lock);
169         wake_up(&heap->waitqueue);
170 }
171
172 size_t ion_heap_freelist_size(struct ion_heap *heap)
173 {
174         size_t size;
175
176         spin_lock(&heap->free_lock);
177         size = heap->free_list_size;
178         spin_unlock(&heap->free_lock);
179
180         return size;
181 }
182
183 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
184                                 bool skip_pools)
185 {
186         struct ion_buffer *buffer;
187         size_t total_drained = 0;
188
189         if (ion_heap_freelist_size(heap) == 0)
190                 return 0;
191
192         spin_lock(&heap->free_lock);
193         if (size == 0)
194                 size = heap->free_list_size;
195
196         while (!list_empty(&heap->free_list)) {
197                 if (total_drained >= size)
198                         break;
199                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
200                                           list);
201                 list_del(&buffer->list);
202                 heap->free_list_size -= buffer->size;
203                 if (skip_pools)
204                         buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
205                 total_drained += buffer->size;
206                 spin_unlock(&heap->free_lock);
207                 ion_buffer_destroy(buffer);
208                 spin_lock(&heap->free_lock);
209         }
210         spin_unlock(&heap->free_lock);
211
212         return total_drained;
213 }
214
215 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
216 {
217         return _ion_heap_freelist_drain(heap, size, false);
218 }
219
220 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
221 {
222         return _ion_heap_freelist_drain(heap, size, true);
223 }
224
225 static int ion_heap_deferred_free(void *data)
226 {
227         struct ion_heap *heap = data;
228
229         while (true) {
230                 struct ion_buffer *buffer;
231
232                 wait_event_freezable(heap->waitqueue,
233                                      ion_heap_freelist_size(heap) > 0);
234
235                 spin_lock(&heap->free_lock);
236                 if (list_empty(&heap->free_list)) {
237                         spin_unlock(&heap->free_lock);
238                         continue;
239                 }
240                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
241                                           list);
242                 list_del(&buffer->list);
243                 heap->free_list_size -= buffer->size;
244                 spin_unlock(&heap->free_lock);
245                 ion_buffer_destroy(buffer);
246         }
247
248         return 0;
249 }
250
251 int ion_heap_init_deferred_free(struct ion_heap *heap)
252 {
253         struct sched_param param = { .sched_priority = 0 };
254
255         INIT_LIST_HEAD(&heap->free_list);
256         init_waitqueue_head(&heap->waitqueue);
257         heap->task = kthread_run(ion_heap_deferred_free, heap,
258                                  "%s", heap->name);
259         if (IS_ERR(heap->task)) {
260                 pr_err("%s: creating thread for deferred free failed\n",
261                        __func__);
262                 return PTR_ERR_OR_ZERO(heap->task);
263         }
264         sched_setscheduler(heap->task, SCHED_IDLE, &param);
265         return 0;
266 }
267
268 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
269                                                 struct shrink_control *sc)
270 {
271         struct ion_heap *heap = container_of(shrinker, struct ion_heap,
272                                              shrinker);
273         int total = 0;
274
275         total = ion_heap_freelist_size(heap) / PAGE_SIZE;
276         if (heap->ops->shrink)
277                 total += heap->ops->shrink(heap, sc->gfp_mask, 0);
278         return total;
279 }
280
281 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
282                                                 struct shrink_control *sc)
283 {
284         struct ion_heap *heap = container_of(shrinker, struct ion_heap,
285                                              shrinker);
286         int freed = 0;
287         int to_scan = sc->nr_to_scan;
288
289         if (to_scan == 0)
290                 return 0;
291
292         /*
293          * shrink the free list first, no point in zeroing the memory if we're
294          * just going to reclaim it. Also, skip any possible page pooling.
295          */
296         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
297                 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
298                                 PAGE_SIZE;
299
300         to_scan -= freed;
301         if (to_scan <= 0)
302                 return freed;
303
304         if (heap->ops->shrink)
305                 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
306         return freed;
307 }
308
309 void ion_heap_init_shrinker(struct ion_heap *heap)
310 {
311         heap->shrinker.count_objects = ion_heap_shrink_count;
312         heap->shrinker.scan_objects = ion_heap_shrink_scan;
313         heap->shrinker.seeks = DEFAULT_SEEKS;
314         heap->shrinker.batch = 0;
315         register_shrinker(&heap->shrinker);
316 }
317
318 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
319 {
320         struct ion_heap *heap = NULL;
321
322         switch (heap_data->type) {
323         case ION_HEAP_TYPE_SYSTEM_CONTIG:
324                 heap = ion_system_contig_heap_create(heap_data);
325                 break;
326         case ION_HEAP_TYPE_SYSTEM:
327                 heap = ion_system_heap_create(heap_data);
328                 break;
329         case ION_HEAP_TYPE_CARVEOUT:
330                 heap = ion_carveout_heap_create(heap_data);
331                 break;
332         case ION_HEAP_TYPE_CHUNK:
333                 heap = ion_chunk_heap_create(heap_data);
334                 break;
335         case ION_HEAP_TYPE_DMA:
336                 heap = ion_cma_heap_create(heap_data);
337                 break;
338         default:
339                 pr_err("%s: Invalid heap type %d\n", __func__,
340                        heap_data->type);
341                 return ERR_PTR(-EINVAL);
342         }
343
344         if (IS_ERR_OR_NULL(heap)) {
345                 pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
346                        __func__, heap_data->name, heap_data->type,
347                        heap_data->base, heap_data->size);
348                 return ERR_PTR(-EINVAL);
349         }
350
351         heap->name = heap_data->name;
352         heap->id = heap_data->id;
353         return heap;
354 }
355
356 void ion_heap_destroy(struct ion_heap *heap)
357 {
358         if (!heap)
359                 return;
360
361         switch (heap->type) {
362         case ION_HEAP_TYPE_SYSTEM_CONTIG:
363                 ion_system_contig_heap_destroy(heap);
364                 break;
365         case ION_HEAP_TYPE_SYSTEM:
366                 ion_system_heap_destroy(heap);
367                 break;
368         case ION_HEAP_TYPE_CARVEOUT:
369                 ion_carveout_heap_destroy(heap);
370                 break;
371         case ION_HEAP_TYPE_CHUNK:
372                 ion_chunk_heap_destroy(heap);
373                 break;
374         case ION_HEAP_TYPE_DMA:
375                 ion_cma_heap_destroy(heap);
376                 break;
377         default:
378                 pr_err("%s: Invalid heap type %d\n", __func__,
379                        heap->type);
380         }
381 }