Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / staging / android / ion / ion_page_pool.c
1 /*
2  * drivers/staging/android/ion/ion_mem_pool.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/swap.h>
25 #include "ion_priv.h"
26
27 static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
28 {
29         struct page *page = alloc_pages(pool->gfp_mask, pool->order);
30
31         if (!page)
32                 return NULL;
33         ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
34                                                 DMA_BIDIRECTIONAL);
35         return page;
36 }
37
38 static void ion_page_pool_free_pages(struct ion_page_pool *pool,
39                                      struct page *page)
40 {
41         __free_pages(page, pool->order);
42 }
43
44 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
45 {
46         mutex_lock(&pool->mutex);
47         if (PageHighMem(page)) {
48                 list_add_tail(&page->lru, &pool->high_items);
49                 pool->high_count++;
50         } else {
51                 list_add_tail(&page->lru, &pool->low_items);
52                 pool->low_count++;
53         }
54         mutex_unlock(&pool->mutex);
55         return 0;
56 }
57
58 static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
59 {
60         struct page *page;
61
62         if (high) {
63                 BUG_ON(!pool->high_count);
64                 page = list_first_entry(&pool->high_items, struct page, lru);
65                 pool->high_count--;
66         } else {
67                 BUG_ON(!pool->low_count);
68                 page = list_first_entry(&pool->low_items, struct page, lru);
69                 pool->low_count--;
70         }
71
72         list_del(&page->lru);
73         return page;
74 }
75
76 struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
77 {
78         struct page *page = NULL;
79
80         BUG_ON(!pool);
81
82         mutex_lock(&pool->mutex);
83         if (pool->high_count)
84                 page = ion_page_pool_remove(pool, true);
85         else if (pool->low_count)
86                 page = ion_page_pool_remove(pool, false);
87         mutex_unlock(&pool->mutex);
88
89         if (!page)
90                 page = ion_page_pool_alloc_pages(pool);
91
92         return page;
93 }
94
95 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
96 {
97         int ret;
98
99         BUG_ON(pool->order != compound_order(page));
100
101         ret = ion_page_pool_add(pool, page);
102         if (ret)
103                 ion_page_pool_free_pages(pool, page);
104 }
105
106 static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
107 {
108         int count = pool->low_count;
109
110         if (high)
111                 count += pool->high_count;
112
113         return count << pool->order;
114 }
115
116 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
117                                 int nr_to_scan)
118 {
119         int freed;
120         bool high;
121
122         if (current_is_kswapd())
123                 high = true;
124         else
125                 high = !!(gfp_mask & __GFP_HIGHMEM);
126
127         if (nr_to_scan == 0)
128                 return ion_page_pool_total(pool, high);
129
130         for (freed = 0; freed < nr_to_scan; freed++) {
131                 struct page *page;
132
133                 mutex_lock(&pool->mutex);
134                 if (pool->low_count) {
135                         page = ion_page_pool_remove(pool, false);
136                 } else if (high && pool->high_count) {
137                         page = ion_page_pool_remove(pool, true);
138                 } else {
139                         mutex_unlock(&pool->mutex);
140                         break;
141                 }
142                 mutex_unlock(&pool->mutex);
143                 ion_page_pool_free_pages(pool, page);
144         }
145
146         return freed;
147 }
148
149 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
150 {
151         struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
152                                              GFP_KERNEL);
153         if (!pool)
154                 return NULL;
155         pool->high_count = 0;
156         pool->low_count = 0;
157         INIT_LIST_HEAD(&pool->low_items);
158         INIT_LIST_HEAD(&pool->high_items);
159         pool->gfp_mask = gfp_mask | __GFP_COMP;
160         pool->order = order;
161         mutex_init(&pool->mutex);
162         plist_node_init(&pool->list, order);
163
164         return pool;
165 }
166
167 void ion_page_pool_destroy(struct ion_page_pool *pool)
168 {
169         kfree(pool);
170 }
171
172 static int __init ion_page_pool_init(void)
173 {
174         return 0;
175 }
176
177 static void __exit ion_page_pool_exit(void)
178 {
179 }
180
181 module_init(ion_page_pool_init);
182 module_exit(ion_page_pool_exit);