Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / gpu / drm / udl / udl_dmabuf.c
1 /*
2  * udl_dmabuf.c
3  *
4  * Copyright (c) 2014 The Chromium OS Authors
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program. If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <drm/drmP.h>
21 #include "udl_drv.h"
22 #include <linux/shmem_fs.h>
23 #include <linux/dma-buf.h>
24
25 struct udl_drm_dmabuf_attachment {
26         struct sg_table sgt;
27         enum dma_data_direction dir;
28         bool is_mapped;
29 };
30
31 static int udl_attach_dma_buf(struct dma_buf *dmabuf,
32                               struct device *dev,
33                               struct dma_buf_attachment *attach)
34 {
35         struct udl_drm_dmabuf_attachment *udl_attach;
36
37         DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
38                         attach->dmabuf->size);
39
40         udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
41         if (!udl_attach)
42                 return -ENOMEM;
43
44         udl_attach->dir = DMA_NONE;
45         attach->priv = udl_attach;
46
47         return 0;
48 }
49
50 static void udl_detach_dma_buf(struct dma_buf *dmabuf,
51                                struct dma_buf_attachment *attach)
52 {
53         struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
54         struct sg_table *sgt;
55
56         if (!udl_attach)
57                 return;
58
59         DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
60                         attach->dmabuf->size);
61
62         sgt = &udl_attach->sgt;
63
64         if (udl_attach->dir != DMA_NONE)
65                 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
66                                 udl_attach->dir);
67
68         sg_free_table(sgt);
69         kfree(udl_attach);
70         attach->priv = NULL;
71 }
72
73 static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
74                                         enum dma_data_direction dir)
75 {
76         struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
77         struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
78         struct drm_device *dev = obj->base.dev;
79         struct scatterlist *rd, *wr;
80         struct sg_table *sgt = NULL;
81         unsigned int i;
82         int page_count;
83         int nents, ret;
84
85         DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
86                         attach->dmabuf->size, dir);
87
88         /* just return current sgt if already requested. */
89         if (udl_attach->dir == dir && udl_attach->is_mapped)
90                 return &udl_attach->sgt;
91
92         if (!obj->pages) {
93                 ret = udl_gem_get_pages(obj);
94                 if (ret) {
95                         DRM_ERROR("failed to map pages.\n");
96                         return ERR_PTR(ret);
97                 }
98         }
99
100         page_count = obj->base.size / PAGE_SIZE;
101         obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
102         if (IS_ERR(obj->sg)) {
103                 DRM_ERROR("failed to allocate sgt.\n");
104                 return ERR_CAST(obj->sg);
105         }
106
107         sgt = &udl_attach->sgt;
108
109         ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
110         if (ret) {
111                 DRM_ERROR("failed to alloc sgt.\n");
112                 return ERR_PTR(-ENOMEM);
113         }
114
115         mutex_lock(&dev->struct_mutex);
116
117         rd = obj->sg->sgl;
118         wr = sgt->sgl;
119         for (i = 0; i < sgt->orig_nents; ++i) {
120                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
121                 rd = sg_next(rd);
122                 wr = sg_next(wr);
123         }
124
125         if (dir != DMA_NONE) {
126                 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
127                 if (!nents) {
128                         DRM_ERROR("failed to map sgl with iommu.\n");
129                         sg_free_table(sgt);
130                         sgt = ERR_PTR(-EIO);
131                         goto err_unlock;
132                 }
133         }
134
135         udl_attach->is_mapped = true;
136         udl_attach->dir = dir;
137         attach->priv = udl_attach;
138
139 err_unlock:
140         mutex_unlock(&dev->struct_mutex);
141         return sgt;
142 }
143
144 static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
145                               struct sg_table *sgt,
146                               enum dma_data_direction dir)
147 {
148         /* Nothing to do. */
149         DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
150                         attach->dmabuf->size, dir);
151 }
152
153 static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
154 {
155         /* TODO */
156
157         return NULL;
158 }
159
160 static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
161                                     unsigned long page_num)
162 {
163         /* TODO */
164
165         return NULL;
166 }
167
168 static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
169                               unsigned long page_num, void *addr)
170 {
171         /* TODO */
172 }
173
174 static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
175                                      unsigned long page_num,
176                                      void *addr)
177 {
178         /* TODO */
179 }
180
181 static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
182                            struct vm_area_struct *vma)
183 {
184         /* TODO */
185
186         return -EINVAL;
187 }
188
189 static struct dma_buf_ops udl_dmabuf_ops = {
190         .attach                 = udl_attach_dma_buf,
191         .detach                 = udl_detach_dma_buf,
192         .map_dma_buf            = udl_map_dma_buf,
193         .unmap_dma_buf          = udl_unmap_dma_buf,
194         .kmap                   = udl_dmabuf_kmap,
195         .kmap_atomic            = udl_dmabuf_kmap_atomic,
196         .kunmap                 = udl_dmabuf_kunmap,
197         .kunmap_atomic          = udl_dmabuf_kunmap_atomic,
198         .mmap                   = udl_dmabuf_mmap,
199         .release                = drm_gem_dmabuf_release,
200 };
201
202 struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
203                                      struct drm_gem_object *obj, int flags)
204 {
205         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
206
207         exp_info.ops = &udl_dmabuf_ops;
208         exp_info.size = obj->size;
209         exp_info.flags = flags;
210         exp_info.priv = obj;
211
212         return dma_buf_export(&exp_info);
213 }
214
215 static int udl_prime_create(struct drm_device *dev,
216                             size_t size,
217                             struct sg_table *sg,
218                             struct udl_gem_object **obj_p)
219 {
220         struct udl_gem_object *obj;
221         int npages;
222
223         npages = size / PAGE_SIZE;
224
225         *obj_p = NULL;
226         obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
227         if (!obj)
228                 return -ENOMEM;
229
230         obj->sg = sg;
231         obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
232         if (obj->pages == NULL) {
233                 DRM_ERROR("obj pages is NULL %d\n", npages);
234                 return -ENOMEM;
235         }
236
237         drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
238
239         *obj_p = obj;
240         return 0;
241 }
242
243 struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
244                                 struct dma_buf *dma_buf)
245 {
246         struct dma_buf_attachment *attach;
247         struct sg_table *sg;
248         struct udl_gem_object *uobj;
249         int ret;
250
251         /* need to attach */
252         get_device(dev->dev);
253         attach = dma_buf_attach(dma_buf, dev->dev);
254         if (IS_ERR(attach)) {
255                 put_device(dev->dev);
256                 return ERR_CAST(attach);
257         }
258
259         get_dma_buf(dma_buf);
260
261         sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
262         if (IS_ERR(sg)) {
263                 ret = PTR_ERR(sg);
264                 goto fail_detach;
265         }
266
267         ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
268         if (ret)
269                 goto fail_unmap;
270
271         uobj->base.import_attach = attach;
272         uobj->flags = UDL_BO_WC;
273
274         return &uobj->base;
275
276 fail_unmap:
277         dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
278 fail_detach:
279         dma_buf_detach(dma_buf, attach);
280         dma_buf_put(dma_buf);
281         put_device(dev->dev);
282         return ERR_PTR(ret);
283 }