Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14
15 #include <linux/shmem_fs.h>
16 #include <drm/exynos_drm.h>
17
18 #include "exynos_drm_drv.h"
19 #include "exynos_drm_gem.h"
20 #include "exynos_drm_buf.h"
21 #include "exynos_drm_iommu.h"
22
23 static unsigned int convert_to_vm_err_msg(int msg)
24 {
25         unsigned int out_msg;
26
27         switch (msg) {
28         case 0:
29         case -ERESTARTSYS:
30         case -EINTR:
31                 out_msg = VM_FAULT_NOPAGE;
32                 break;
33
34         case -ENOMEM:
35                 out_msg = VM_FAULT_OOM;
36                 break;
37
38         default:
39                 out_msg = VM_FAULT_SIGBUS;
40                 break;
41         }
42
43         return out_msg;
44 }
45
46 static int check_gem_flags(unsigned int flags)
47 {
48         if (flags & ~(EXYNOS_BO_MASK)) {
49                 DRM_ERROR("invalid flags.\n");
50                 return -EINVAL;
51         }
52
53         return 0;
54 }
55
56 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
57                                         struct vm_area_struct *vma)
58 {
59         DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
60
61         /* non-cachable as default. */
62         if (obj->flags & EXYNOS_BO_CACHABLE)
63                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64         else if (obj->flags & EXYNOS_BO_WC)
65                 vma->vm_page_prot =
66                         pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
67         else
68                 vma->vm_page_prot =
69                         pgprot_noncached(vm_get_page_prot(vma->vm_flags));
70 }
71
72 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
73 {
74         /* TODO */
75
76         return roundup(size, PAGE_SIZE);
77 }
78
79 static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
80                                         struct vm_area_struct *vma,
81                                         unsigned long f_vaddr,
82                                         pgoff_t page_offset)
83 {
84         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
85         struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
86         struct scatterlist *sgl;
87         unsigned long pfn;
88         int i;
89
90         if (!buf->sgt)
91                 return -EINTR;
92
93         if (page_offset >= (buf->size >> PAGE_SHIFT)) {
94                 DRM_ERROR("invalid page offset\n");
95                 return -EINVAL;
96         }
97
98         sgl = buf->sgt->sgl;
99         for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
100                 if (page_offset < (sgl->length >> PAGE_SHIFT))
101                         break;
102                 page_offset -=  (sgl->length >> PAGE_SHIFT);
103         }
104
105         pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
106
107         return vm_insert_mixed(vma, f_vaddr, pfn);
108 }
109
110 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
111                                         struct drm_file *file_priv,
112                                         unsigned int *handle)
113 {
114         int ret;
115
116         /*
117          * allocate a id of idr table where the obj is registered
118          * and handle has the id what user can see.
119          */
120         ret = drm_gem_handle_create(file_priv, obj, handle);
121         if (ret)
122                 return ret;
123
124         DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
125
126         /* drop reference from allocate - handle holds it now. */
127         drm_gem_object_unreference_unlocked(obj);
128
129         return 0;
130 }
131
132 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
133 {
134         struct drm_gem_object *obj;
135         struct exynos_drm_gem_buf *buf;
136
137         obj = &exynos_gem_obj->base;
138         buf = exynos_gem_obj->buffer;
139
140         DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
141
142         /*
143          * do not release memory region from exporter.
144          *
145          * the region will be released by exporter
146          * once dmabuf's refcount becomes 0.
147          */
148         if (obj->import_attach)
149                 goto out;
150
151         exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
152
153 out:
154         exynos_drm_fini_buf(obj->dev, buf);
155         exynos_gem_obj->buffer = NULL;
156
157         drm_gem_free_mmap_offset(obj);
158
159         /* release file pointer to gem object. */
160         drm_gem_object_release(obj);
161
162         kfree(exynos_gem_obj);
163         exynos_gem_obj = NULL;
164 }
165
166 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
167                                                 unsigned int gem_handle,
168                                                 struct drm_file *file_priv)
169 {
170         struct exynos_drm_gem_obj *exynos_gem_obj;
171         struct drm_gem_object *obj;
172
173         obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
174         if (!obj) {
175                 DRM_ERROR("failed to lookup gem object.\n");
176                 return 0;
177         }
178
179         exynos_gem_obj = to_exynos_gem_obj(obj);
180
181         drm_gem_object_unreference_unlocked(obj);
182
183         return exynos_gem_obj->buffer->size;
184 }
185
186
187 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
188                                                       unsigned long size)
189 {
190         struct exynos_drm_gem_obj *exynos_gem_obj;
191         struct drm_gem_object *obj;
192         int ret;
193
194         exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
195         if (!exynos_gem_obj)
196                 return NULL;
197
198         exynos_gem_obj->size = size;
199         obj = &exynos_gem_obj->base;
200
201         ret = drm_gem_object_init(dev, obj, size);
202         if (ret < 0) {
203                 DRM_ERROR("failed to initialize gem object\n");
204                 kfree(exynos_gem_obj);
205                 return NULL;
206         }
207
208         DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
209
210         return exynos_gem_obj;
211 }
212
213 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
214                                                 unsigned int flags,
215                                                 unsigned long size)
216 {
217         struct exynos_drm_gem_obj *exynos_gem_obj;
218         struct exynos_drm_gem_buf *buf;
219         int ret;
220
221         if (!size) {
222                 DRM_ERROR("invalid size.\n");
223                 return ERR_PTR(-EINVAL);
224         }
225
226         size = roundup_gem_size(size, flags);
227
228         ret = check_gem_flags(flags);
229         if (ret)
230                 return ERR_PTR(ret);
231
232         buf = exynos_drm_init_buf(dev, size);
233         if (!buf)
234                 return ERR_PTR(-ENOMEM);
235
236         exynos_gem_obj = exynos_drm_gem_init(dev, size);
237         if (!exynos_gem_obj) {
238                 ret = -ENOMEM;
239                 goto err_fini_buf;
240         }
241
242         exynos_gem_obj->buffer = buf;
243
244         /* set memory type and cache attribute from user side. */
245         exynos_gem_obj->flags = flags;
246
247         ret = exynos_drm_alloc_buf(dev, buf, flags);
248         if (ret < 0)
249                 goto err_gem_fini;
250
251         return exynos_gem_obj;
252
253 err_gem_fini:
254         drm_gem_object_release(&exynos_gem_obj->base);
255         kfree(exynos_gem_obj);
256 err_fini_buf:
257         exynos_drm_fini_buf(dev, buf);
258         return ERR_PTR(ret);
259 }
260
261 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
262                                 struct drm_file *file_priv)
263 {
264         struct drm_exynos_gem_create *args = data;
265         struct exynos_drm_gem_obj *exynos_gem_obj;
266         int ret;
267
268         exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
269         if (IS_ERR(exynos_gem_obj))
270                 return PTR_ERR(exynos_gem_obj);
271
272         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
273                         &args->handle);
274         if (ret) {
275                 exynos_drm_gem_destroy(exynos_gem_obj);
276                 return ret;
277         }
278
279         return 0;
280 }
281
282 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
283                                         unsigned int gem_handle,
284                                         struct drm_file *filp)
285 {
286         struct exynos_drm_gem_obj *exynos_gem_obj;
287         struct drm_gem_object *obj;
288
289         obj = drm_gem_object_lookup(dev, filp, gem_handle);
290         if (!obj) {
291                 DRM_ERROR("failed to lookup gem object.\n");
292                 return ERR_PTR(-EINVAL);
293         }
294
295         exynos_gem_obj = to_exynos_gem_obj(obj);
296
297         return &exynos_gem_obj->buffer->dma_addr;
298 }
299
300 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
301                                         unsigned int gem_handle,
302                                         struct drm_file *filp)
303 {
304         struct drm_gem_object *obj;
305
306         obj = drm_gem_object_lookup(dev, filp, gem_handle);
307         if (!obj) {
308                 DRM_ERROR("failed to lookup gem object.\n");
309                 return;
310         }
311
312         drm_gem_object_unreference_unlocked(obj);
313
314         /*
315          * decrease obj->refcount one more time because we has already
316          * increased it at exynos_drm_gem_get_dma_addr().
317          */
318         drm_gem_object_unreference_unlocked(obj);
319 }
320
321 int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
322                                       struct vm_area_struct *vma)
323 {
324         struct drm_device *drm_dev = exynos_gem_obj->base.dev;
325         struct exynos_drm_gem_buf *buffer;
326         unsigned long vm_size;
327         int ret;
328
329         vma->vm_flags &= ~VM_PFNMAP;
330         vma->vm_pgoff = 0;
331
332         vm_size = vma->vm_end - vma->vm_start;
333
334         /*
335          * a buffer contains information to physically continuous memory
336          * allocated by user request or at framebuffer creation.
337          */
338         buffer = exynos_gem_obj->buffer;
339
340         /* check if user-requested size is valid. */
341         if (vm_size > buffer->size)
342                 return -EINVAL;
343
344         ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
345                                 buffer->dma_addr, buffer->size,
346                                 &buffer->dma_attrs);
347         if (ret < 0) {
348                 DRM_ERROR("failed to mmap.\n");
349                 return ret;
350         }
351
352         return 0;
353 }
354
355 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
356                                       struct drm_file *file_priv)
357 {       struct exynos_drm_gem_obj *exynos_gem_obj;
358         struct drm_exynos_gem_info *args = data;
359         struct drm_gem_object *obj;
360
361         mutex_lock(&dev->struct_mutex);
362
363         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
364         if (!obj) {
365                 DRM_ERROR("failed to lookup gem object.\n");
366                 mutex_unlock(&dev->struct_mutex);
367                 return -EINVAL;
368         }
369
370         exynos_gem_obj = to_exynos_gem_obj(obj);
371
372         args->flags = exynos_gem_obj->flags;
373         args->size = exynos_gem_obj->size;
374
375         drm_gem_object_unreference(obj);
376         mutex_unlock(&dev->struct_mutex);
377
378         return 0;
379 }
380
381 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
382 {
383         struct vm_area_struct *vma_copy;
384
385         vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
386         if (!vma_copy)
387                 return NULL;
388
389         if (vma->vm_ops && vma->vm_ops->open)
390                 vma->vm_ops->open(vma);
391
392         if (vma->vm_file)
393                 get_file(vma->vm_file);
394
395         memcpy(vma_copy, vma, sizeof(*vma));
396
397         vma_copy->vm_mm = NULL;
398         vma_copy->vm_next = NULL;
399         vma_copy->vm_prev = NULL;
400
401         return vma_copy;
402 }
403
404 void exynos_gem_put_vma(struct vm_area_struct *vma)
405 {
406         if (!vma)
407                 return;
408
409         if (vma->vm_ops && vma->vm_ops->close)
410                 vma->vm_ops->close(vma);
411
412         if (vma->vm_file)
413                 fput(vma->vm_file);
414
415         kfree(vma);
416 }
417
418 int exynos_gem_get_pages_from_userptr(unsigned long start,
419                                                 unsigned int npages,
420                                                 struct page **pages,
421                                                 struct vm_area_struct *vma)
422 {
423         int get_npages;
424
425         /* the memory region mmaped with VM_PFNMAP. */
426         if (vma_is_io(vma)) {
427                 unsigned int i;
428
429                 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
430                         unsigned long pfn;
431                         int ret = follow_pfn(vma, start, &pfn);
432                         if (ret)
433                                 return ret;
434
435                         pages[i] = pfn_to_page(pfn);
436                 }
437
438                 if (i != npages) {
439                         DRM_ERROR("failed to get user_pages.\n");
440                         return -EINVAL;
441                 }
442
443                 return 0;
444         }
445
446         get_npages = get_user_pages(current, current->mm, start,
447                                         npages, 1, 1, pages, NULL);
448         get_npages = max(get_npages, 0);
449         if (get_npages != npages) {
450                 DRM_ERROR("failed to get user_pages.\n");
451                 while (get_npages)
452                         put_page(pages[--get_npages]);
453                 return -EFAULT;
454         }
455
456         return 0;
457 }
458
459 void exynos_gem_put_pages_to_userptr(struct page **pages,
460                                         unsigned int npages,
461                                         struct vm_area_struct *vma)
462 {
463         if (!vma_is_io(vma)) {
464                 unsigned int i;
465
466                 for (i = 0; i < npages; i++) {
467                         set_page_dirty_lock(pages[i]);
468
469                         /*
470                          * undo the reference we took when populating
471                          * the table.
472                          */
473                         put_page(pages[i]);
474                 }
475         }
476 }
477
478 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
479                                 struct sg_table *sgt,
480                                 enum dma_data_direction dir)
481 {
482         int nents;
483
484         mutex_lock(&drm_dev->struct_mutex);
485
486         nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
487         if (!nents) {
488                 DRM_ERROR("failed to map sgl with dma.\n");
489                 mutex_unlock(&drm_dev->struct_mutex);
490                 return nents;
491         }
492
493         mutex_unlock(&drm_dev->struct_mutex);
494         return 0;
495 }
496
497 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
498                                 struct sg_table *sgt,
499                                 enum dma_data_direction dir)
500 {
501         dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
502 }
503
504 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
505 {
506         struct exynos_drm_gem_obj *exynos_gem_obj;
507         struct exynos_drm_gem_buf *buf;
508
509         exynos_gem_obj = to_exynos_gem_obj(obj);
510         buf = exynos_gem_obj->buffer;
511
512         if (obj->import_attach)
513                 drm_prime_gem_destroy(obj, buf->sgt);
514
515         exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
516 }
517
518 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
519                                struct drm_device *dev,
520                                struct drm_mode_create_dumb *args)
521 {
522         struct exynos_drm_gem_obj *exynos_gem_obj;
523         int ret;
524
525         /*
526          * allocate memory to be used for framebuffer.
527          * - this callback would be called by user application
528          *      with DRM_IOCTL_MODE_CREATE_DUMB command.
529          */
530
531         args->pitch = args->width * ((args->bpp + 7) / 8);
532         args->size = args->pitch * args->height;
533
534         if (is_drm_iommu_supported(dev)) {
535                 exynos_gem_obj = exynos_drm_gem_create(dev,
536                         EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
537                         args->size);
538         } else {
539                 exynos_gem_obj = exynos_drm_gem_create(dev,
540                         EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
541                         args->size);
542         }
543
544         if (IS_ERR(exynos_gem_obj)) {
545                 dev_warn(dev->dev, "FB allocation failed.\n");
546                 return PTR_ERR(exynos_gem_obj);
547         }
548
549         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
550                         &args->handle);
551         if (ret) {
552                 exynos_drm_gem_destroy(exynos_gem_obj);
553                 return ret;
554         }
555
556         return 0;
557 }
558
559 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
560                                    struct drm_device *dev, uint32_t handle,
561                                    uint64_t *offset)
562 {
563         struct drm_gem_object *obj;
564         int ret = 0;
565
566         mutex_lock(&dev->struct_mutex);
567
568         /*
569          * get offset of memory allocated for drm framebuffer.
570          * - this callback would be called by user application
571          *      with DRM_IOCTL_MODE_MAP_DUMB command.
572          */
573
574         obj = drm_gem_object_lookup(dev, file_priv, handle);
575         if (!obj) {
576                 DRM_ERROR("failed to lookup gem object.\n");
577                 ret = -EINVAL;
578                 goto unlock;
579         }
580
581         ret = drm_gem_create_mmap_offset(obj);
582         if (ret)
583                 goto out;
584
585         *offset = drm_vma_node_offset_addr(&obj->vma_node);
586         DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
587
588 out:
589         drm_gem_object_unreference(obj);
590 unlock:
591         mutex_unlock(&dev->struct_mutex);
592         return ret;
593 }
594
595 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
596 {
597         struct drm_gem_object *obj = vma->vm_private_data;
598         struct drm_device *dev = obj->dev;
599         unsigned long f_vaddr;
600         pgoff_t page_offset;
601         int ret;
602
603         page_offset = ((unsigned long)vmf->virtual_address -
604                         vma->vm_start) >> PAGE_SHIFT;
605         f_vaddr = (unsigned long)vmf->virtual_address;
606
607         mutex_lock(&dev->struct_mutex);
608
609         ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
610         if (ret < 0)
611                 DRM_ERROR("failed to map a buffer with user.\n");
612
613         mutex_unlock(&dev->struct_mutex);
614
615         return convert_to_vm_err_msg(ret);
616 }
617
618 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
619 {
620         struct exynos_drm_gem_obj *exynos_gem_obj;
621         struct drm_gem_object *obj;
622         int ret;
623
624         /* set vm_area_struct. */
625         ret = drm_gem_mmap(filp, vma);
626         if (ret < 0) {
627                 DRM_ERROR("failed to mmap.\n");
628                 return ret;
629         }
630
631         obj = vma->vm_private_data;
632         exynos_gem_obj = to_exynos_gem_obj(obj);
633
634         ret = check_gem_flags(exynos_gem_obj->flags);
635         if (ret)
636                 goto err_close_vm;
637
638         update_vm_cache_attr(exynos_gem_obj, vma);
639
640         ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
641         if (ret)
642                 goto err_close_vm;
643
644         return ret;
645
646 err_close_vm:
647         drm_gem_vm_close(vma);
648         drm_gem_free_mmap_offset(obj);
649
650         return ret;
651 }