Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / drivers / staging / android / ion / ion.c
1 /*
2  *
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
22 #include <linux/fs.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39
40 #include "ion.h"
41 #include "ion_priv.h"
42 #include "compat_ion.h"
43
44 /**
45  * struct ion_device - the metadata of the ion device node
46  * @dev:                the actual misc device
47  * @buffers:            an rb tree of all the existing buffers
48  * @buffer_lock:        lock protecting the tree of buffers
49  * @lock:               rwsem protecting the tree of heaps and clients
50  * @heaps:              list of all the heaps in the system
51  * @user_clients:       list of all the clients created from userspace
52  */
53 struct ion_device {
54         struct miscdevice dev;
55         struct rb_root buffers;
56         struct mutex buffer_lock;
57         struct rw_semaphore lock;
58         struct plist_head heaps;
59         long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60                              unsigned long arg);
61         struct rb_root clients;
62         struct dentry *debug_root;
63         struct dentry *heaps_debug_root;
64         struct dentry *clients_debug_root;
65 };
66
67 /**
68  * struct ion_client - a process/hw block local address space
69  * @node:               node in the tree of all clients
70  * @dev:                backpointer to ion device
71  * @handles:            an rb tree of all the handles in this client
72  * @idr:                an idr space for allocating handle ids
73  * @lock:               lock protecting the tree of handles
74  * @name:               used for debugging
75  * @display_name:       used for debugging (unique version of @name)
76  * @display_serial:     used for debugging (to make display_name unique)
77  * @task:               used for debugging
78  *
79  * A client represents a list of buffers this client may access.
80  * The mutex stored here is used to protect both handles tree
81  * as well as the handles themselves, and should be held while modifying either.
82  */
83 struct ion_client {
84         struct rb_node node;
85         struct ion_device *dev;
86         struct rb_root handles;
87         struct idr idr;
88         struct mutex lock;
89         const char *name;
90         char *display_name;
91         int display_serial;
92         struct task_struct *task;
93         pid_t pid;
94         struct dentry *debug_root;
95 };
96
97 /**
98  * ion_handle - a client local reference to a buffer
99  * @ref:                reference count
100  * @client:             back pointer to the client the buffer resides in
101  * @buffer:             pointer to the buffer
102  * @node:               node in the client's handle rbtree
103  * @kmap_cnt:           count of times this client has mapped to kernel
104  * @id:                 client-unique id allocated by client->idr
105  *
106  * Modifications to node, map_cnt or mapping should be protected by the
107  * lock in the client.  Other fields are never changed after initialization.
108  */
109 struct ion_handle {
110         struct kref ref;
111         struct ion_client *client;
112         struct ion_buffer *buffer;
113         struct rb_node node;
114         unsigned int kmap_cnt;
115         int id;
116 };
117
118 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
119 {
120         return (buffer->flags & ION_FLAG_CACHED) &&
121                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
122 }
123
124 bool ion_buffer_cached(struct ion_buffer *buffer)
125 {
126         return !!(buffer->flags & ION_FLAG_CACHED);
127 }
128
129 static inline struct page *ion_buffer_page(struct page *page)
130 {
131         return (struct page *)((unsigned long)page & ~(1UL));
132 }
133
134 static inline bool ion_buffer_page_is_dirty(struct page *page)
135 {
136         return !!((unsigned long)page & 1UL);
137 }
138
139 static inline void ion_buffer_page_dirty(struct page **page)
140 {
141         *page = (struct page *)((unsigned long)(*page) | 1UL);
142 }
143
144 static inline void ion_buffer_page_clean(struct page **page)
145 {
146         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
147 }
148
149 /* this function should only be called while dev->lock is held */
150 static void ion_buffer_add(struct ion_device *dev,
151                            struct ion_buffer *buffer)
152 {
153         struct rb_node **p = &dev->buffers.rb_node;
154         struct rb_node *parent = NULL;
155         struct ion_buffer *entry;
156
157         while (*p) {
158                 parent = *p;
159                 entry = rb_entry(parent, struct ion_buffer, node);
160
161                 if (buffer < entry) {
162                         p = &(*p)->rb_left;
163                 } else if (buffer > entry) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         pr_err("%s: buffer already found.", __func__);
167                         BUG();
168                 }
169         }
170
171         rb_link_node(&buffer->node, parent, p);
172         rb_insert_color(&buffer->node, &dev->buffers);
173 }
174
175 /* this function should only be called while dev->lock is held */
176 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
177                                      struct ion_device *dev,
178                                      unsigned long len,
179                                      unsigned long align,
180                                      unsigned long flags)
181 {
182         struct ion_buffer *buffer;
183         struct sg_table *table;
184         struct scatterlist *sg;
185         int i, ret;
186
187         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
188         if (!buffer)
189                 return ERR_PTR(-ENOMEM);
190
191         buffer->heap = heap;
192         buffer->flags = flags;
193         kref_init(&buffer->ref);
194
195         ret = heap->ops->allocate(heap, buffer, len, align, flags);
196
197         if (ret) {
198                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
199                         goto err2;
200
201                 ion_heap_freelist_drain(heap, 0);
202                 ret = heap->ops->allocate(heap, buffer, len, align,
203                                           flags);
204                 if (ret)
205                         goto err2;
206         }
207
208         buffer->dev = dev;
209         buffer->size = len;
210
211         table = heap->ops->map_dma(heap, buffer);
212         if (WARN_ONCE(table == NULL,
213                         "heap->ops->map_dma should return ERR_PTR on error"))
214                 table = ERR_PTR(-EINVAL);
215         if (IS_ERR(table)) {
216                 ret = -EINVAL;
217                 goto err1;
218         }
219
220         buffer->sg_table = table;
221         if (ion_buffer_fault_user_mappings(buffer)) {
222                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
223                 struct scatterlist *sg;
224                 int i, j, k = 0;
225
226                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
227                 if (!buffer->pages) {
228                         ret = -ENOMEM;
229                         goto err;
230                 }
231
232                 for_each_sg(table->sgl, sg, table->nents, i) {
233                         struct page *page = sg_page(sg);
234
235                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
236                                 buffer->pages[k++] = page++;
237                 }
238         }
239
240         buffer->dev = dev;
241         buffer->size = len;
242         INIT_LIST_HEAD(&buffer->vmas);
243         mutex_init(&buffer->lock);
244         /*
245          * this will set up dma addresses for the sglist -- it is not
246          * technically correct as per the dma api -- a specific
247          * device isn't really taking ownership here.  However, in practice on
248          * our systems the only dma_address space is physical addresses.
249          * Additionally, we can't afford the overhead of invalidating every
250          * allocation via dma_map_sg. The implicit contract here is that
251          * memory coming from the heaps is ready for dma, ie if it has a
252          * cached mapping that mapping has been invalidated
253          */
254         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
255                 sg_dma_address(sg) = sg_phys(sg);
256                 sg_dma_len(sg) = sg->length;
257         }
258         mutex_lock(&dev->buffer_lock);
259         ion_buffer_add(dev, buffer);
260         mutex_unlock(&dev->buffer_lock);
261         return buffer;
262
263 err:
264         heap->ops->unmap_dma(heap, buffer);
265 err1:
266         heap->ops->free(buffer);
267 err2:
268         kfree(buffer);
269         return ERR_PTR(ret);
270 }
271
272 void ion_buffer_destroy(struct ion_buffer *buffer)
273 {
274         if (WARN_ON(buffer->kmap_cnt > 0))
275                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
276         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
277         buffer->heap->ops->free(buffer);
278         vfree(buffer->pages);
279         kfree(buffer);
280 }
281
282 static void _ion_buffer_destroy(struct kref *kref)
283 {
284         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
285         struct ion_heap *heap = buffer->heap;
286         struct ion_device *dev = buffer->dev;
287
288         mutex_lock(&dev->buffer_lock);
289         rb_erase(&buffer->node, &dev->buffers);
290         mutex_unlock(&dev->buffer_lock);
291
292         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
293                 ion_heap_freelist_add(heap, buffer);
294         else
295                 ion_buffer_destroy(buffer);
296 }
297
298 static void ion_buffer_get(struct ion_buffer *buffer)
299 {
300         kref_get(&buffer->ref);
301 }
302
303 static int ion_buffer_put(struct ion_buffer *buffer)
304 {
305         return kref_put(&buffer->ref, _ion_buffer_destroy);
306 }
307
308 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
309 {
310         mutex_lock(&buffer->lock);
311         buffer->handle_count++;
312         mutex_unlock(&buffer->lock);
313 }
314
315 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
316 {
317         /*
318          * when a buffer is removed from a handle, if it is not in
319          * any other handles, copy the taskcomm and the pid of the
320          * process it's being removed from into the buffer.  At this
321          * point there will be no way to track what processes this buffer is
322          * being used by, it only exists as a dma_buf file descriptor.
323          * The taskcomm and pid can provide a debug hint as to where this fd
324          * is in the system
325          */
326         mutex_lock(&buffer->lock);
327         buffer->handle_count--;
328         BUG_ON(buffer->handle_count < 0);
329         if (!buffer->handle_count) {
330                 struct task_struct *task;
331
332                 task = current->group_leader;
333                 get_task_comm(buffer->task_comm, task);
334                 buffer->pid = task_pid_nr(task);
335         }
336         mutex_unlock(&buffer->lock);
337 }
338
339 static struct ion_handle *ion_handle_create(struct ion_client *client,
340                                      struct ion_buffer *buffer)
341 {
342         struct ion_handle *handle;
343
344         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
345         if (!handle)
346                 return ERR_PTR(-ENOMEM);
347         kref_init(&handle->ref);
348         RB_CLEAR_NODE(&handle->node);
349         handle->client = client;
350         ion_buffer_get(buffer);
351         ion_buffer_add_to_handle(buffer);
352         handle->buffer = buffer;
353
354         return handle;
355 }
356
357 static void ion_handle_kmap_put(struct ion_handle *);
358
359 static void ion_handle_destroy(struct kref *kref)
360 {
361         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
362         struct ion_client *client = handle->client;
363         struct ion_buffer *buffer = handle->buffer;
364
365         mutex_lock(&buffer->lock);
366         while (handle->kmap_cnt)
367                 ion_handle_kmap_put(handle);
368         mutex_unlock(&buffer->lock);
369
370         idr_remove(&client->idr, handle->id);
371         if (!RB_EMPTY_NODE(&handle->node))
372                 rb_erase(&handle->node, &client->handles);
373
374         ion_buffer_remove_from_handle(buffer);
375         ion_buffer_put(buffer);
376
377         kfree(handle);
378 }
379
380 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
381 {
382         return handle->buffer;
383 }
384
385 static void ion_handle_get(struct ion_handle *handle)
386 {
387         kref_get(&handle->ref);
388 }
389
390 static int ion_handle_put(struct ion_handle *handle)
391 {
392         struct ion_client *client = handle->client;
393         int ret;
394
395         mutex_lock(&client->lock);
396         ret = kref_put(&handle->ref, ion_handle_destroy);
397         mutex_unlock(&client->lock);
398
399         return ret;
400 }
401
402 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
403                                             struct ion_buffer *buffer)
404 {
405         struct rb_node *n = client->handles.rb_node;
406
407         while (n) {
408                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
409
410                 if (buffer < entry->buffer)
411                         n = n->rb_left;
412                 else if (buffer > entry->buffer)
413                         n = n->rb_right;
414                 else
415                         return entry;
416         }
417         return ERR_PTR(-EINVAL);
418 }
419
420 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
421                                                 int id)
422 {
423         struct ion_handle *handle;
424
425         mutex_lock(&client->lock);
426         handle = idr_find(&client->idr, id);
427         if (handle)
428                 ion_handle_get(handle);
429         mutex_unlock(&client->lock);
430
431         return handle ? handle : ERR_PTR(-EINVAL);
432 }
433
434 static bool ion_handle_validate(struct ion_client *client,
435                                 struct ion_handle *handle)
436 {
437         WARN_ON(!mutex_is_locked(&client->lock));
438         return idr_find(&client->idr, handle->id) == handle;
439 }
440
441 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
442 {
443         int id;
444         struct rb_node **p = &client->handles.rb_node;
445         struct rb_node *parent = NULL;
446         struct ion_handle *entry;
447
448         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
449         if (id < 0)
450                 return id;
451
452         handle->id = id;
453
454         while (*p) {
455                 parent = *p;
456                 entry = rb_entry(parent, struct ion_handle, node);
457
458                 if (handle->buffer < entry->buffer)
459                         p = &(*p)->rb_left;
460                 else if (handle->buffer > entry->buffer)
461                         p = &(*p)->rb_right;
462                 else
463                         WARN(1, "%s: buffer already found.", __func__);
464         }
465
466         rb_link_node(&handle->node, parent, p);
467         rb_insert_color(&handle->node, &client->handles);
468
469         return 0;
470 }
471
472 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
473                              size_t align, unsigned int heap_id_mask,
474                              unsigned int flags)
475 {
476         struct ion_handle *handle;
477         struct ion_device *dev = client->dev;
478         struct ion_buffer *buffer = NULL;
479         struct ion_heap *heap;
480         int ret;
481
482         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
483                  len, align, heap_id_mask, flags);
484         /*
485          * traverse the list of heaps available in this system in priority
486          * order.  If the heap type is supported by the client, and matches the
487          * request of the caller allocate from it.  Repeat until allocate has
488          * succeeded or all heaps have been tried
489          */
490         len = PAGE_ALIGN(len);
491
492         if (!len)
493                 return ERR_PTR(-EINVAL);
494
495         down_read(&dev->lock);
496         plist_for_each_entry(heap, &dev->heaps, node) {
497                 /* if the caller didn't specify this heap id */
498                 if (!((1 << heap->id) & heap_id_mask))
499                         continue;
500                 buffer = ion_buffer_create(heap, dev, len, align, flags);
501                 if (!IS_ERR(buffer))
502                         break;
503         }
504         up_read(&dev->lock);
505
506         if (buffer == NULL)
507                 return ERR_PTR(-ENODEV);
508
509         if (IS_ERR(buffer))
510                 return ERR_CAST(buffer);
511
512         handle = ion_handle_create(client, buffer);
513
514         /*
515          * ion_buffer_create will create a buffer with a ref_cnt of 1,
516          * and ion_handle_create will take a second reference, drop one here
517          */
518         ion_buffer_put(buffer);
519
520         if (IS_ERR(handle))
521                 return handle;
522
523         mutex_lock(&client->lock);
524         ret = ion_handle_add(client, handle);
525         mutex_unlock(&client->lock);
526         if (ret) {
527                 ion_handle_put(handle);
528                 handle = ERR_PTR(ret);
529         }
530
531         return handle;
532 }
533 EXPORT_SYMBOL(ion_alloc);
534
535 void ion_free(struct ion_client *client, struct ion_handle *handle)
536 {
537         bool valid_handle;
538
539         BUG_ON(client != handle->client);
540
541         mutex_lock(&client->lock);
542         valid_handle = ion_handle_validate(client, handle);
543
544         if (!valid_handle) {
545                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
546                 mutex_unlock(&client->lock);
547                 return;
548         }
549         mutex_unlock(&client->lock);
550         ion_handle_put(handle);
551 }
552 EXPORT_SYMBOL(ion_free);
553
554 int ion_phys(struct ion_client *client, struct ion_handle *handle,
555              ion_phys_addr_t *addr, size_t *len)
556 {
557         struct ion_buffer *buffer;
558         int ret;
559
560         mutex_lock(&client->lock);
561         if (!ion_handle_validate(client, handle)) {
562                 mutex_unlock(&client->lock);
563                 return -EINVAL;
564         }
565
566         buffer = handle->buffer;
567
568         if (!buffer->heap->ops->phys) {
569                 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
570                         __func__, buffer->heap->name, buffer->heap->type);
571                 mutex_unlock(&client->lock);
572                 return -ENODEV;
573         }
574         mutex_unlock(&client->lock);
575         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
576         return ret;
577 }
578 EXPORT_SYMBOL(ion_phys);
579
580 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
581 {
582         void *vaddr;
583
584         if (buffer->kmap_cnt) {
585                 buffer->kmap_cnt++;
586                 return buffer->vaddr;
587         }
588         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
589         if (WARN_ONCE(vaddr == NULL,
590                         "heap->ops->map_kernel should return ERR_PTR on error"))
591                 return ERR_PTR(-EINVAL);
592         if (IS_ERR(vaddr))
593                 return vaddr;
594         buffer->vaddr = vaddr;
595         buffer->kmap_cnt++;
596         return vaddr;
597 }
598
599 static void *ion_handle_kmap_get(struct ion_handle *handle)
600 {
601         struct ion_buffer *buffer = handle->buffer;
602         void *vaddr;
603
604         if (handle->kmap_cnt) {
605                 handle->kmap_cnt++;
606                 return buffer->vaddr;
607         }
608         vaddr = ion_buffer_kmap_get(buffer);
609         if (IS_ERR(vaddr))
610                 return vaddr;
611         handle->kmap_cnt++;
612         return vaddr;
613 }
614
615 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
616 {
617         buffer->kmap_cnt--;
618         if (!buffer->kmap_cnt) {
619                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
620                 buffer->vaddr = NULL;
621         }
622 }
623
624 static void ion_handle_kmap_put(struct ion_handle *handle)
625 {
626         struct ion_buffer *buffer = handle->buffer;
627
628         if (!handle->kmap_cnt) {
629                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
630                 return;
631         }
632         handle->kmap_cnt--;
633         if (!handle->kmap_cnt)
634                 ion_buffer_kmap_put(buffer);
635 }
636
637 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
638 {
639         struct ion_buffer *buffer;
640         void *vaddr;
641
642         mutex_lock(&client->lock);
643         if (!ion_handle_validate(client, handle)) {
644                 pr_err("%s: invalid handle passed to map_kernel.\n",
645                        __func__);
646                 mutex_unlock(&client->lock);
647                 return ERR_PTR(-EINVAL);
648         }
649
650         buffer = handle->buffer;
651
652         if (!handle->buffer->heap->ops->map_kernel) {
653                 pr_err("%s: map_kernel is not implemented by this heap.\n",
654                        __func__);
655                 mutex_unlock(&client->lock);
656                 return ERR_PTR(-ENODEV);
657         }
658
659         mutex_lock(&buffer->lock);
660         vaddr = ion_handle_kmap_get(handle);
661         mutex_unlock(&buffer->lock);
662         mutex_unlock(&client->lock);
663         return vaddr;
664 }
665 EXPORT_SYMBOL(ion_map_kernel);
666
667 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
668 {
669         struct ion_buffer *buffer;
670
671         mutex_lock(&client->lock);
672         buffer = handle->buffer;
673         mutex_lock(&buffer->lock);
674         ion_handle_kmap_put(handle);
675         mutex_unlock(&buffer->lock);
676         mutex_unlock(&client->lock);
677 }
678 EXPORT_SYMBOL(ion_unmap_kernel);
679
680 static int ion_debug_client_show(struct seq_file *s, void *unused)
681 {
682         struct ion_client *client = s->private;
683         struct rb_node *n;
684         size_t sizes[ION_NUM_HEAP_IDS] = {0};
685         const char *names[ION_NUM_HEAP_IDS] = {NULL};
686         int i;
687
688         mutex_lock(&client->lock);
689         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
690                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
691                                                      node);
692                 unsigned int id = handle->buffer->heap->id;
693
694                 if (!names[id])
695                         names[id] = handle->buffer->heap->name;
696                 sizes[id] += handle->buffer->size;
697         }
698         mutex_unlock(&client->lock);
699
700         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
701         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
702                 if (!names[i])
703                         continue;
704                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
705         }
706         return 0;
707 }
708
709 static int ion_debug_client_open(struct inode *inode, struct file *file)
710 {
711         return single_open(file, ion_debug_client_show, inode->i_private);
712 }
713
714 static const struct file_operations debug_client_fops = {
715         .open = ion_debug_client_open,
716         .read = seq_read,
717         .llseek = seq_lseek,
718         .release = single_release,
719 };
720
721 static int ion_get_client_serial(const struct rb_root *root,
722                                         const unsigned char *name)
723 {
724         int serial = -1;
725         struct rb_node *node;
726
727         for (node = rb_first(root); node; node = rb_next(node)) {
728                 struct ion_client *client = rb_entry(node, struct ion_client,
729                                                 node);
730
731                 if (strcmp(client->name, name))
732                         continue;
733                 serial = max(serial, client->display_serial);
734         }
735         return serial + 1;
736 }
737
738 struct ion_client *ion_client_create(struct ion_device *dev,
739                                      const char *name)
740 {
741         struct ion_client *client;
742         struct task_struct *task;
743         struct rb_node **p;
744         struct rb_node *parent = NULL;
745         struct ion_client *entry;
746         pid_t pid;
747
748         if (!name) {
749                 pr_err("%s: Name cannot be null\n", __func__);
750                 return ERR_PTR(-EINVAL);
751         }
752
753         get_task_struct(current->group_leader);
754         task_lock(current->group_leader);
755         pid = task_pid_nr(current->group_leader);
756         /*
757          * don't bother to store task struct for kernel threads,
758          * they can't be killed anyway
759          */
760         if (current->group_leader->flags & PF_KTHREAD) {
761                 put_task_struct(current->group_leader);
762                 task = NULL;
763         } else {
764                 task = current->group_leader;
765         }
766         task_unlock(current->group_leader);
767
768         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
769         if (!client)
770                 goto err_put_task_struct;
771
772         client->dev = dev;
773         client->handles = RB_ROOT;
774         idr_init(&client->idr);
775         mutex_init(&client->lock);
776         client->task = task;
777         client->pid = pid;
778         client->name = kstrdup(name, GFP_KERNEL);
779         if (!client->name)
780                 goto err_free_client;
781
782         down_write(&dev->lock);
783         client->display_serial = ion_get_client_serial(&dev->clients, name);
784         client->display_name = kasprintf(
785                 GFP_KERNEL, "%s-%d", name, client->display_serial);
786         if (!client->display_name) {
787                 up_write(&dev->lock);
788                 goto err_free_client_name;
789         }
790         p = &dev->clients.rb_node;
791         while (*p) {
792                 parent = *p;
793                 entry = rb_entry(parent, struct ion_client, node);
794
795                 if (client < entry)
796                         p = &(*p)->rb_left;
797                 else if (client > entry)
798                         p = &(*p)->rb_right;
799         }
800         rb_link_node(&client->node, parent, p);
801         rb_insert_color(&client->node, &dev->clients);
802
803         client->debug_root = debugfs_create_file(client->display_name, 0664,
804                                                 dev->clients_debug_root,
805                                                 client, &debug_client_fops);
806         if (!client->debug_root) {
807                 char buf[256], *path;
808
809                 path = dentry_path(dev->clients_debug_root, buf, 256);
810                 pr_err("Failed to create client debugfs at %s/%s\n",
811                         path, client->display_name);
812         }
813
814         up_write(&dev->lock);
815
816         return client;
817
818 err_free_client_name:
819         kfree(client->name);
820 err_free_client:
821         kfree(client);
822 err_put_task_struct:
823         if (task)
824                 put_task_struct(current->group_leader);
825         return ERR_PTR(-ENOMEM);
826 }
827 EXPORT_SYMBOL(ion_client_create);
828
829 void ion_client_destroy(struct ion_client *client)
830 {
831         struct ion_device *dev = client->dev;
832         struct rb_node *n;
833
834         pr_debug("%s: %d\n", __func__, __LINE__);
835         while ((n = rb_first(&client->handles))) {
836                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
837                                                      node);
838                 ion_handle_destroy(&handle->ref);
839         }
840
841         idr_destroy(&client->idr);
842
843         down_write(&dev->lock);
844         if (client->task)
845                 put_task_struct(client->task);
846         rb_erase(&client->node, &dev->clients);
847         debugfs_remove_recursive(client->debug_root);
848         up_write(&dev->lock);
849
850         kfree(client->display_name);
851         kfree(client->name);
852         kfree(client);
853 }
854 EXPORT_SYMBOL(ion_client_destroy);
855
856 struct sg_table *ion_sg_table(struct ion_client *client,
857                               struct ion_handle *handle)
858 {
859         struct ion_buffer *buffer;
860         struct sg_table *table;
861
862         mutex_lock(&client->lock);
863         if (!ion_handle_validate(client, handle)) {
864                 pr_err("%s: invalid handle passed to map_dma.\n",
865                        __func__);
866                 mutex_unlock(&client->lock);
867                 return ERR_PTR(-EINVAL);
868         }
869         buffer = handle->buffer;
870         table = buffer->sg_table;
871         mutex_unlock(&client->lock);
872         return table;
873 }
874 EXPORT_SYMBOL(ion_sg_table);
875
876 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
877                                        struct device *dev,
878                                        enum dma_data_direction direction);
879
880 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
881                                         enum dma_data_direction direction)
882 {
883         struct dma_buf *dmabuf = attachment->dmabuf;
884         struct ion_buffer *buffer = dmabuf->priv;
885
886         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
887         return buffer->sg_table;
888 }
889
890 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
891                               struct sg_table *table,
892                               enum dma_data_direction direction)
893 {
894 }
895
896 void ion_pages_sync_for_device(struct device *dev, struct page *page,
897                 size_t size, enum dma_data_direction dir)
898 {
899         struct scatterlist sg;
900
901         sg_init_table(&sg, 1);
902         sg_set_page(&sg, page, size, 0);
903         /*
904          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
905          * for the targeted device, but this works on the currently targeted
906          * hardware.
907          */
908         sg_dma_address(&sg) = page_to_phys(page);
909         dma_sync_sg_for_device(dev, &sg, 1, dir);
910 }
911
912 struct ion_vma_list {
913         struct list_head list;
914         struct vm_area_struct *vma;
915 };
916
917 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
918                                        struct device *dev,
919                                        enum dma_data_direction dir)
920 {
921         struct ion_vma_list *vma_list;
922         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
923         int i;
924
925         pr_debug("%s: syncing for device %s\n", __func__,
926                  dev ? dev_name(dev) : "null");
927
928         if (!ion_buffer_fault_user_mappings(buffer))
929                 return;
930
931         mutex_lock(&buffer->lock);
932         for (i = 0; i < pages; i++) {
933                 struct page *page = buffer->pages[i];
934
935                 if (ion_buffer_page_is_dirty(page))
936                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
937                                                         PAGE_SIZE, dir);
938
939                 ion_buffer_page_clean(buffer->pages + i);
940         }
941         list_for_each_entry(vma_list, &buffer->vmas, list) {
942                 struct vm_area_struct *vma = vma_list->vma;
943
944                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
945                                NULL);
946         }
947         mutex_unlock(&buffer->lock);
948 }
949
950 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
951 {
952         struct ion_buffer *buffer = vma->vm_private_data;
953         unsigned long pfn;
954         int ret;
955
956         mutex_lock(&buffer->lock);
957         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
958         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
959
960         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
961         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
962         mutex_unlock(&buffer->lock);
963         if (ret)
964                 return VM_FAULT_ERROR;
965
966         return VM_FAULT_NOPAGE;
967 }
968
969 static void ion_vm_open(struct vm_area_struct *vma)
970 {
971         struct ion_buffer *buffer = vma->vm_private_data;
972         struct ion_vma_list *vma_list;
973
974         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
975         if (!vma_list)
976                 return;
977         vma_list->vma = vma;
978         mutex_lock(&buffer->lock);
979         list_add(&vma_list->list, &buffer->vmas);
980         mutex_unlock(&buffer->lock);
981         pr_debug("%s: adding %p\n", __func__, vma);
982 }
983
984 static void ion_vm_close(struct vm_area_struct *vma)
985 {
986         struct ion_buffer *buffer = vma->vm_private_data;
987         struct ion_vma_list *vma_list, *tmp;
988
989         pr_debug("%s\n", __func__);
990         mutex_lock(&buffer->lock);
991         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
992                 if (vma_list->vma != vma)
993                         continue;
994                 list_del(&vma_list->list);
995                 kfree(vma_list);
996                 pr_debug("%s: deleting %p\n", __func__, vma);
997                 break;
998         }
999         mutex_unlock(&buffer->lock);
1000 }
1001
1002 static const struct vm_operations_struct ion_vma_ops = {
1003         .open = ion_vm_open,
1004         .close = ion_vm_close,
1005         .fault = ion_vm_fault,
1006 };
1007
1008 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1009 {
1010         struct ion_buffer *buffer = dmabuf->priv;
1011         int ret = 0;
1012
1013         if (!buffer->heap->ops->map_user) {
1014                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1015                         __func__);
1016                 return -EINVAL;
1017         }
1018
1019         if (ion_buffer_fault_user_mappings(buffer)) {
1020                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1021                                                         VM_DONTDUMP;
1022                 vma->vm_private_data = buffer;
1023                 vma->vm_ops = &ion_vma_ops;
1024                 ion_vm_open(vma);
1025                 return 0;
1026         }
1027
1028         if (!(buffer->flags & ION_FLAG_CACHED))
1029                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1030
1031         mutex_lock(&buffer->lock);
1032         /* now map it to userspace */
1033         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1034         mutex_unlock(&buffer->lock);
1035
1036         if (ret)
1037                 pr_err("%s: failure mapping buffer to userspace\n",
1038                        __func__);
1039
1040         return ret;
1041 }
1042
1043 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1044 {
1045         struct ion_buffer *buffer = dmabuf->priv;
1046
1047         ion_buffer_put(buffer);
1048 }
1049
1050 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1051 {
1052         struct ion_buffer *buffer = dmabuf->priv;
1053
1054         return buffer->vaddr + offset * PAGE_SIZE;
1055 }
1056
1057 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1058                                void *ptr)
1059 {
1060 }
1061
1062 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1063                                         size_t len,
1064                                         enum dma_data_direction direction)
1065 {
1066         struct ion_buffer *buffer = dmabuf->priv;
1067         void *vaddr;
1068
1069         if (!buffer->heap->ops->map_kernel) {
1070                 pr_err("%s: map kernel is not implemented by this heap.\n",
1071                        __func__);
1072                 return -ENODEV;
1073         }
1074
1075         mutex_lock(&buffer->lock);
1076         vaddr = ion_buffer_kmap_get(buffer);
1077         mutex_unlock(&buffer->lock);
1078         return PTR_ERR_OR_ZERO(vaddr);
1079 }
1080
1081 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1082                                        size_t len,
1083                                        enum dma_data_direction direction)
1084 {
1085         struct ion_buffer *buffer = dmabuf->priv;
1086
1087         mutex_lock(&buffer->lock);
1088         ion_buffer_kmap_put(buffer);
1089         mutex_unlock(&buffer->lock);
1090 }
1091
1092 static struct dma_buf_ops dma_buf_ops = {
1093         .map_dma_buf = ion_map_dma_buf,
1094         .unmap_dma_buf = ion_unmap_dma_buf,
1095         .mmap = ion_mmap,
1096         .release = ion_dma_buf_release,
1097         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1098         .end_cpu_access = ion_dma_buf_end_cpu_access,
1099         .kmap_atomic = ion_dma_buf_kmap,
1100         .kunmap_atomic = ion_dma_buf_kunmap,
1101         .kmap = ion_dma_buf_kmap,
1102         .kunmap = ion_dma_buf_kunmap,
1103 };
1104
1105 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1106                                                 struct ion_handle *handle)
1107 {
1108         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1109         struct ion_buffer *buffer;
1110         struct dma_buf *dmabuf;
1111         bool valid_handle;
1112
1113         mutex_lock(&client->lock);
1114         valid_handle = ion_handle_validate(client, handle);
1115         if (!valid_handle) {
1116                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1117                 mutex_unlock(&client->lock);
1118                 return ERR_PTR(-EINVAL);
1119         }
1120         buffer = handle->buffer;
1121         ion_buffer_get(buffer);
1122         mutex_unlock(&client->lock);
1123
1124         exp_info.ops = &dma_buf_ops;
1125         exp_info.size = buffer->size;
1126         exp_info.flags = O_RDWR;
1127         exp_info.priv = buffer;
1128
1129         dmabuf = dma_buf_export(&exp_info);
1130         if (IS_ERR(dmabuf)) {
1131                 ion_buffer_put(buffer);
1132                 return dmabuf;
1133         }
1134
1135         return dmabuf;
1136 }
1137 EXPORT_SYMBOL(ion_share_dma_buf);
1138
1139 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1140 {
1141         struct dma_buf *dmabuf;
1142         int fd;
1143
1144         dmabuf = ion_share_dma_buf(client, handle);
1145         if (IS_ERR(dmabuf))
1146                 return PTR_ERR(dmabuf);
1147
1148         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1149         if (fd < 0)
1150                 dma_buf_put(dmabuf);
1151
1152         return fd;
1153 }
1154 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1155
1156 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1157 {
1158         struct dma_buf *dmabuf;
1159         struct ion_buffer *buffer;
1160         struct ion_handle *handle;
1161         int ret;
1162
1163         dmabuf = dma_buf_get(fd);
1164         if (IS_ERR(dmabuf))
1165                 return ERR_CAST(dmabuf);
1166         /* if this memory came from ion */
1167
1168         if (dmabuf->ops != &dma_buf_ops) {
1169                 pr_err("%s: can not import dmabuf from another exporter\n",
1170                        __func__);
1171                 dma_buf_put(dmabuf);
1172                 return ERR_PTR(-EINVAL);
1173         }
1174         buffer = dmabuf->priv;
1175
1176         mutex_lock(&client->lock);
1177         /* if a handle exists for this buffer just take a reference to it */
1178         handle = ion_handle_lookup(client, buffer);
1179         if (!IS_ERR(handle)) {
1180                 ion_handle_get(handle);
1181                 mutex_unlock(&client->lock);
1182                 goto end;
1183         }
1184
1185         handle = ion_handle_create(client, buffer);
1186         if (IS_ERR(handle)) {
1187                 mutex_unlock(&client->lock);
1188                 goto end;
1189         }
1190
1191         ret = ion_handle_add(client, handle);
1192         mutex_unlock(&client->lock);
1193         if (ret) {
1194                 ion_handle_put(handle);
1195                 handle = ERR_PTR(ret);
1196         }
1197
1198 end:
1199         dma_buf_put(dmabuf);
1200         return handle;
1201 }
1202 EXPORT_SYMBOL(ion_import_dma_buf);
1203
1204 static int ion_sync_for_device(struct ion_client *client, int fd)
1205 {
1206         struct dma_buf *dmabuf;
1207         struct ion_buffer *buffer;
1208
1209         dmabuf = dma_buf_get(fd);
1210         if (IS_ERR(dmabuf))
1211                 return PTR_ERR(dmabuf);
1212
1213         /* if this memory came from ion */
1214         if (dmabuf->ops != &dma_buf_ops) {
1215                 pr_err("%s: can not sync dmabuf from another exporter\n",
1216                        __func__);
1217                 dma_buf_put(dmabuf);
1218                 return -EINVAL;
1219         }
1220         buffer = dmabuf->priv;
1221
1222         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1223                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1224         dma_buf_put(dmabuf);
1225         return 0;
1226 }
1227
1228 /* fix up the cases where the ioctl direction bits are incorrect */
1229 static unsigned int ion_ioctl_dir(unsigned int cmd)
1230 {
1231         switch (cmd) {
1232         case ION_IOC_SYNC:
1233         case ION_IOC_FREE:
1234         case ION_IOC_CUSTOM:
1235                 return _IOC_WRITE;
1236         default:
1237                 return _IOC_DIR(cmd);
1238         }
1239 }
1240
1241 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1242 {
1243         struct ion_client *client = filp->private_data;
1244         struct ion_device *dev = client->dev;
1245         struct ion_handle *cleanup_handle = NULL;
1246         int ret = 0;
1247         unsigned int dir;
1248
1249         union {
1250                 struct ion_fd_data fd;
1251                 struct ion_allocation_data allocation;
1252                 struct ion_handle_data handle;
1253                 struct ion_custom_data custom;
1254         } data;
1255
1256         dir = ion_ioctl_dir(cmd);
1257
1258         if (_IOC_SIZE(cmd) > sizeof(data))
1259                 return -EINVAL;
1260
1261         if (dir & _IOC_WRITE)
1262                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1263                         return -EFAULT;
1264
1265         switch (cmd) {
1266         case ION_IOC_ALLOC:
1267         {
1268                 struct ion_handle *handle;
1269
1270                 handle = ion_alloc(client, data.allocation.len,
1271                                                 data.allocation.align,
1272                                                 data.allocation.heap_id_mask,
1273                                                 data.allocation.flags);
1274                 if (IS_ERR(handle))
1275                         return PTR_ERR(handle);
1276
1277                 data.allocation.handle = handle->id;
1278
1279                 cleanup_handle = handle;
1280                 break;
1281         }
1282         case ION_IOC_FREE:
1283         {
1284                 struct ion_handle *handle;
1285
1286                 handle = ion_handle_get_by_id(client, data.handle.handle);
1287                 if (IS_ERR(handle))
1288                         return PTR_ERR(handle);
1289                 ion_free(client, handle);
1290                 ion_handle_put(handle);
1291                 break;
1292         }
1293         case ION_IOC_SHARE:
1294         case ION_IOC_MAP:
1295         {
1296                 struct ion_handle *handle;
1297
1298                 handle = ion_handle_get_by_id(client, data.handle.handle);
1299                 if (IS_ERR(handle))
1300                         return PTR_ERR(handle);
1301                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1302                 ion_handle_put(handle);
1303                 if (data.fd.fd < 0)
1304                         ret = data.fd.fd;
1305                 break;
1306         }
1307         case ION_IOC_IMPORT:
1308         {
1309                 struct ion_handle *handle;
1310
1311                 handle = ion_import_dma_buf(client, data.fd.fd);
1312                 if (IS_ERR(handle))
1313                         ret = PTR_ERR(handle);
1314                 else
1315                         data.handle.handle = handle->id;
1316                 break;
1317         }
1318         case ION_IOC_SYNC:
1319         {
1320                 ret = ion_sync_for_device(client, data.fd.fd);
1321                 break;
1322         }
1323         case ION_IOC_CUSTOM:
1324         {
1325                 if (!dev->custom_ioctl)
1326                         return -ENOTTY;
1327                 ret = dev->custom_ioctl(client, data.custom.cmd,
1328                                                 data.custom.arg);
1329                 break;
1330         }
1331         default:
1332                 return -ENOTTY;
1333         }
1334
1335         if (dir & _IOC_READ) {
1336                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1337                         if (cleanup_handle)
1338                                 ion_free(client, cleanup_handle);
1339                         return -EFAULT;
1340                 }
1341         }
1342         return ret;
1343 }
1344
1345 static int ion_release(struct inode *inode, struct file *file)
1346 {
1347         struct ion_client *client = file->private_data;
1348
1349         pr_debug("%s: %d\n", __func__, __LINE__);
1350         ion_client_destroy(client);
1351         return 0;
1352 }
1353
1354 static int ion_open(struct inode *inode, struct file *file)
1355 {
1356         struct miscdevice *miscdev = file->private_data;
1357         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1358         struct ion_client *client;
1359         char debug_name[64];
1360
1361         pr_debug("%s: %d\n", __func__, __LINE__);
1362         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1363         client = ion_client_create(dev, debug_name);
1364         if (IS_ERR(client))
1365                 return PTR_ERR(client);
1366         file->private_data = client;
1367
1368         return 0;
1369 }
1370
1371 static const struct file_operations ion_fops = {
1372         .owner          = THIS_MODULE,
1373         .open           = ion_open,
1374         .release        = ion_release,
1375         .unlocked_ioctl = ion_ioctl,
1376         .compat_ioctl   = compat_ion_ioctl,
1377 };
1378
1379 static size_t ion_debug_heap_total(struct ion_client *client,
1380                                    unsigned int id)
1381 {
1382         size_t size = 0;
1383         struct rb_node *n;
1384
1385         mutex_lock(&client->lock);
1386         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1387                 struct ion_handle *handle = rb_entry(n,
1388                                                      struct ion_handle,
1389                                                      node);
1390                 if (handle->buffer->heap->id == id)
1391                         size += handle->buffer->size;
1392         }
1393         mutex_unlock(&client->lock);
1394         return size;
1395 }
1396
1397 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1398 {
1399         struct ion_heap *heap = s->private;
1400         struct ion_device *dev = heap->dev;
1401         struct rb_node *n;
1402         size_t total_size = 0;
1403         size_t total_orphaned_size = 0;
1404
1405         seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1406         seq_puts(s, "----------------------------------------------------\n");
1407
1408         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1409                 struct ion_client *client = rb_entry(n, struct ion_client,
1410                                                      node);
1411                 size_t size = ion_debug_heap_total(client, heap->id);
1412
1413                 if (!size)
1414                         continue;
1415                 if (client->task) {
1416                         char task_comm[TASK_COMM_LEN];
1417
1418                         get_task_comm(task_comm, client->task);
1419                         seq_printf(s, "%16s %16u %16zu\n", task_comm,
1420                                    client->pid, size);
1421                 } else {
1422                         seq_printf(s, "%16s %16u %16zu\n", client->name,
1423                                    client->pid, size);
1424                 }
1425         }
1426         seq_puts(s, "----------------------------------------------------\n");
1427         seq_puts(s, "orphaned allocations (info is from last known client):\n");
1428         mutex_lock(&dev->buffer_lock);
1429         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1430                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1431                                                      node);
1432                 if (buffer->heap->id != heap->id)
1433                         continue;
1434                 total_size += buffer->size;
1435                 if (!buffer->handle_count) {
1436                         seq_printf(s, "%16s %16u %16zu %d %d\n",
1437                                    buffer->task_comm, buffer->pid,
1438                                    buffer->size, buffer->kmap_cnt,
1439                                    atomic_read(&buffer->ref.refcount));
1440                         total_orphaned_size += buffer->size;
1441                 }
1442         }
1443         mutex_unlock(&dev->buffer_lock);
1444         seq_puts(s, "----------------------------------------------------\n");
1445         seq_printf(s, "%16s %16zu\n", "total orphaned",
1446                    total_orphaned_size);
1447         seq_printf(s, "%16s %16zu\n", "total ", total_size);
1448         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1449                 seq_printf(s, "%16s %16zu\n", "deferred free",
1450                                 heap->free_list_size);
1451         seq_puts(s, "----------------------------------------------------\n");
1452
1453         if (heap->debug_show)
1454                 heap->debug_show(heap, s, unused);
1455
1456         return 0;
1457 }
1458
1459 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1460 {
1461         return single_open(file, ion_debug_heap_show, inode->i_private);
1462 }
1463
1464 static const struct file_operations debug_heap_fops = {
1465         .open = ion_debug_heap_open,
1466         .read = seq_read,
1467         .llseek = seq_lseek,
1468         .release = single_release,
1469 };
1470
1471 static int debug_shrink_set(void *data, u64 val)
1472 {
1473         struct ion_heap *heap = data;
1474         struct shrink_control sc;
1475         int objs;
1476
1477         sc.gfp_mask = -1;
1478         sc.nr_to_scan = val;
1479
1480         if (!val) {
1481                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1482                 sc.nr_to_scan = objs;
1483         }
1484
1485         heap->shrinker.scan_objects(&heap->shrinker, &sc);
1486         return 0;
1487 }
1488
1489 static int debug_shrink_get(void *data, u64 *val)
1490 {
1491         struct ion_heap *heap = data;
1492         struct shrink_control sc;
1493         int objs;
1494
1495         sc.gfp_mask = -1;
1496         sc.nr_to_scan = 0;
1497
1498         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1499         *val = objs;
1500         return 0;
1501 }
1502
1503 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1504                         debug_shrink_set, "%llu\n");
1505
1506 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1507 {
1508         struct dentry *debug_file;
1509
1510         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1511             !heap->ops->unmap_dma)
1512                 pr_err("%s: can not add heap with invalid ops struct.\n",
1513                        __func__);
1514
1515         spin_lock_init(&heap->free_lock);
1516         heap->free_list_size = 0;
1517
1518         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1519                 ion_heap_init_deferred_free(heap);
1520
1521         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1522                 ion_heap_init_shrinker(heap);
1523
1524         heap->dev = dev;
1525         down_write(&dev->lock);
1526         /*
1527          * use negative heap->id to reverse the priority -- when traversing
1528          * the list later attempt higher id numbers first
1529          */
1530         plist_node_init(&heap->node, -heap->id);
1531         plist_add(&heap->node, &dev->heaps);
1532         debug_file = debugfs_create_file(heap->name, 0664,
1533                                         dev->heaps_debug_root, heap,
1534                                         &debug_heap_fops);
1535
1536         if (!debug_file) {
1537                 char buf[256], *path;
1538
1539                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1540                 pr_err("Failed to create heap debugfs at %s/%s\n",
1541                         path, heap->name);
1542         }
1543
1544         if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1545                 char debug_name[64];
1546
1547                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1548                 debug_file = debugfs_create_file(
1549                         debug_name, 0644, dev->heaps_debug_root, heap,
1550                         &debug_shrink_fops);
1551                 if (!debug_file) {
1552                         char buf[256], *path;
1553
1554                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1555                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1556                                 path, debug_name);
1557                 }
1558         }
1559
1560         up_write(&dev->lock);
1561 }
1562 EXPORT_SYMBOL(ion_device_add_heap);
1563
1564 struct ion_device *ion_device_create(long (*custom_ioctl)
1565                                      (struct ion_client *client,
1566                                       unsigned int cmd,
1567                                       unsigned long arg))
1568 {
1569         struct ion_device *idev;
1570         int ret;
1571
1572         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1573         if (!idev)
1574                 return ERR_PTR(-ENOMEM);
1575
1576         idev->dev.minor = MISC_DYNAMIC_MINOR;
1577         idev->dev.name = "ion";
1578         idev->dev.fops = &ion_fops;
1579         idev->dev.parent = NULL;
1580         ret = misc_register(&idev->dev);
1581         if (ret) {
1582                 pr_err("ion: failed to register misc device.\n");
1583                 kfree(idev);
1584                 return ERR_PTR(ret);
1585         }
1586
1587         idev->debug_root = debugfs_create_dir("ion", NULL);
1588         if (!idev->debug_root) {
1589                 pr_err("ion: failed to create debugfs root directory.\n");
1590                 goto debugfs_done;
1591         }
1592         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1593         if (!idev->heaps_debug_root) {
1594                 pr_err("ion: failed to create debugfs heaps directory.\n");
1595                 goto debugfs_done;
1596         }
1597         idev->clients_debug_root = debugfs_create_dir("clients",
1598                                                 idev->debug_root);
1599         if (!idev->clients_debug_root)
1600                 pr_err("ion: failed to create debugfs clients directory.\n");
1601
1602 debugfs_done:
1603
1604         idev->custom_ioctl = custom_ioctl;
1605         idev->buffers = RB_ROOT;
1606         mutex_init(&idev->buffer_lock);
1607         init_rwsem(&idev->lock);
1608         plist_head_init(&idev->heaps);
1609         idev->clients = RB_ROOT;
1610         return idev;
1611 }
1612 EXPORT_SYMBOL(ion_device_create);
1613
1614 void ion_device_destroy(struct ion_device *dev)
1615 {
1616         misc_deregister(&dev->dev);
1617         debugfs_remove_recursive(dev->debug_root);
1618         /* XXX need to free the heaps and clients ? */
1619         kfree(dev);
1620 }
1621 EXPORT_SYMBOL(ion_device_destroy);
1622
1623 void __init ion_reserve(struct ion_platform_data *data)
1624 {
1625         int i;
1626
1627         for (i = 0; i < data->nr; i++) {
1628                 if (data->heaps[i].size == 0)
1629                         continue;
1630
1631                 if (data->heaps[i].base == 0) {
1632                         phys_addr_t paddr;
1633
1634                         paddr = memblock_alloc_base(data->heaps[i].size,
1635                                                     data->heaps[i].align,
1636                                                     MEMBLOCK_ALLOC_ANYWHERE);
1637                         if (!paddr) {
1638                                 pr_err("%s: error allocating memblock for heap %d\n",
1639                                         __func__, i);
1640                                 continue;
1641                         }
1642                         data->heaps[i].base = paddr;
1643                 } else {
1644                         int ret = memblock_reserve(data->heaps[i].base,
1645                                                data->heaps[i].size);
1646                         if (ret)
1647                                 pr_err("memblock reserve of %zx@%lx failed\n",
1648                                        data->heaps[i].size,
1649                                        data->heaps[i].base);
1650                 }
1651                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1652                         data->heaps[i].name,
1653                         data->heaps[i].base,
1654                         data->heaps[i].size);
1655         }
1656 }