These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / gpu / drm / qxl / qxl_object.c
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28
29 #include <linux/io-mapping.h>
30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31 {
32         struct qxl_bo *bo;
33         struct qxl_device *qdev;
34
35         bo = container_of(tbo, struct qxl_bo, tbo);
36         qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
37
38         qxl_surface_evict(qdev, bo, false);
39         mutex_lock(&qdev->gem.mutex);
40         list_del_init(&bo->list);
41         mutex_unlock(&qdev->gem.mutex);
42         drm_gem_object_release(&bo->gem_base);
43         kfree(bo);
44 }
45
46 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
47 {
48         if (bo->destroy == &qxl_ttm_bo_destroy)
49                 return true;
50         return false;
51 }
52
53 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
54 {
55         u32 c = 0;
56         u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
57         unsigned i;
58
59         qbo->placement.placement = qbo->placements;
60         qbo->placement.busy_placement = qbo->placements;
61         if (domain == QXL_GEM_DOMAIN_VRAM)
62                 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
63         if (domain == QXL_GEM_DOMAIN_SURFACE)
64                 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
65         if (domain == QXL_GEM_DOMAIN_CPU)
66                 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
67         if (!c)
68                 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
69         qbo->placement.num_placement = c;
70         qbo->placement.num_busy_placement = c;
71         for (i = 0; i < c; ++i) {
72                 qbo->placements[i].fpfn = 0;
73                 qbo->placements[i].lpfn = 0;
74         }
75 }
76
77
78 int qxl_bo_create(struct qxl_device *qdev,
79                   unsigned long size, bool kernel, bool pinned, u32 domain,
80                   struct qxl_surface *surf,
81                   struct qxl_bo **bo_ptr)
82 {
83         struct qxl_bo *bo;
84         enum ttm_bo_type type;
85         int r;
86
87         if (kernel)
88                 type = ttm_bo_type_kernel;
89         else
90                 type = ttm_bo_type_device;
91         *bo_ptr = NULL;
92         bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
93         if (bo == NULL)
94                 return -ENOMEM;
95         size = roundup(size, PAGE_SIZE);
96         r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
97         if (unlikely(r)) {
98                 kfree(bo);
99                 return r;
100         }
101         bo->type = domain;
102         bo->pin_count = pinned ? 1 : 0;
103         bo->surface_id = 0;
104         INIT_LIST_HEAD(&bo->list);
105
106         if (surf)
107                 bo->surf = *surf;
108
109         qxl_ttm_placement_from_domain(bo, domain, pinned);
110
111         r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
112                         &bo->placement, 0, !kernel, NULL, size,
113                         NULL, NULL, &qxl_ttm_bo_destroy);
114         if (unlikely(r != 0)) {
115                 if (r != -ERESTARTSYS)
116                         dev_err(qdev->dev,
117                                 "object_init failed for (%lu, 0x%08X)\n",
118                                 size, domain);
119                 return r;
120         }
121         *bo_ptr = bo;
122         return 0;
123 }
124
125 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
126 {
127         bool is_iomem;
128         int r;
129
130         if (bo->kptr) {
131                 if (ptr)
132                         *ptr = bo->kptr;
133                 return 0;
134         }
135         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
136         if (r)
137                 return r;
138         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
139         if (ptr)
140                 *ptr = bo->kptr;
141         return 0;
142 }
143
144 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
145                               struct qxl_bo *bo, int page_offset)
146 {
147         struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
148         void *rptr;
149         int ret;
150         struct io_mapping *map;
151
152         if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
153                 map = qdev->vram_mapping;
154         else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
155                 map = qdev->surface_mapping;
156         else
157                 goto fallback;
158
159         (void) ttm_mem_io_lock(man, false);
160         ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
161         ttm_mem_io_unlock(man);
162
163         return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
164 fallback:
165         if (bo->kptr) {
166                 rptr = bo->kptr + (page_offset * PAGE_SIZE);
167                 return rptr;
168         }
169
170         ret = qxl_bo_kmap(bo, &rptr);
171         if (ret)
172                 return NULL;
173
174         rptr += page_offset * PAGE_SIZE;
175         return rptr;
176 }
177
178 void qxl_bo_kunmap(struct qxl_bo *bo)
179 {
180         if (bo->kptr == NULL)
181                 return;
182         bo->kptr = NULL;
183         ttm_bo_kunmap(&bo->kmap);
184 }
185
186 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
187                                struct qxl_bo *bo, void *pmap)
188 {
189         struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
190         struct io_mapping *map;
191
192         if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
193                 map = qdev->vram_mapping;
194         else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
195                 map = qdev->surface_mapping;
196         else
197                 goto fallback;
198
199         io_mapping_unmap_atomic(pmap);
200
201         (void) ttm_mem_io_lock(man, false);
202         ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
203         ttm_mem_io_unlock(man);
204         return ;
205  fallback:
206         qxl_bo_kunmap(bo);
207 }
208
209 void qxl_bo_unref(struct qxl_bo **bo)
210 {
211         if ((*bo) == NULL)
212                 return;
213
214         drm_gem_object_unreference_unlocked(&(*bo)->gem_base);
215         *bo = NULL;
216 }
217
218 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
219 {
220         drm_gem_object_reference(&bo->gem_base);
221         return bo;
222 }
223
224 int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
225 {
226         struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
227         int r;
228
229         if (bo->pin_count) {
230                 bo->pin_count++;
231                 if (gpu_addr)
232                         *gpu_addr = qxl_bo_gpu_offset(bo);
233                 return 0;
234         }
235         qxl_ttm_placement_from_domain(bo, domain, true);
236         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
237         if (likely(r == 0)) {
238                 bo->pin_count = 1;
239                 if (gpu_addr != NULL)
240                         *gpu_addr = qxl_bo_gpu_offset(bo);
241         }
242         if (unlikely(r != 0))
243                 dev_err(qdev->dev, "%p pin failed\n", bo);
244         return r;
245 }
246
247 int qxl_bo_unpin(struct qxl_bo *bo)
248 {
249         struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
250         int r, i;
251
252         if (!bo->pin_count) {
253                 dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
254                 return 0;
255         }
256         bo->pin_count--;
257         if (bo->pin_count)
258                 return 0;
259         for (i = 0; i < bo->placement.num_placement; i++)
260                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
261         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
262         if (unlikely(r != 0))
263                 dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
264         return r;
265 }
266
267 void qxl_bo_force_delete(struct qxl_device *qdev)
268 {
269         struct qxl_bo *bo, *n;
270
271         if (list_empty(&qdev->gem.objects))
272                 return;
273         dev_err(qdev->dev, "Userspace still has active objects !\n");
274         list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
275                 dev_err(qdev->dev, "%p %p %lu %lu force free\n",
276                         &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
277                         *((unsigned long *)&bo->gem_base.refcount));
278                 mutex_lock(&qdev->gem.mutex);
279                 list_del_init(&bo->list);
280                 mutex_unlock(&qdev->gem.mutex);
281                 /* this should unref the ttm bo */
282                 drm_gem_object_unreference_unlocked(&bo->gem_base);
283         }
284 }
285
286 int qxl_bo_init(struct qxl_device *qdev)
287 {
288         return qxl_ttm_init(qdev);
289 }
290
291 void qxl_bo_fini(struct qxl_device *qdev)
292 {
293         qxl_ttm_fini(qdev);
294 }
295
296 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
297 {
298         int ret;
299         if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
300                 /* allocate a surface id for this surface now */
301                 ret = qxl_surface_id_alloc(qdev, bo);
302                 if (ret)
303                         return ret;
304
305                 ret = qxl_hw_surface_alloc(qdev, bo, NULL);
306                 if (ret)
307                         return ret;
308         }
309         return 0;
310 }
311
312 int qxl_surf_evict(struct qxl_device *qdev)
313 {
314         return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
315 }
316
317 int qxl_vram_evict(struct qxl_device *qdev)
318 {
319         return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
320 }