Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / gpu / drm / ttm / ttm_execbuf_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
34
35 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
36                                               struct ttm_validate_buffer *entry)
37 {
38         list_for_each_entry_continue_reverse(entry, list, head) {
39                 struct ttm_buffer_object *bo = entry->bo;
40
41                 __ttm_bo_unreserve(bo);
42         }
43 }
44
45 static void ttm_eu_del_from_lru_locked(struct list_head *list)
46 {
47         struct ttm_validate_buffer *entry;
48
49         list_for_each_entry(entry, list, head) {
50                 struct ttm_buffer_object *bo = entry->bo;
51                 unsigned put_count = ttm_bo_del_from_lru(bo);
52
53                 ttm_bo_list_ref_sub(bo, put_count, true);
54         }
55 }
56
57 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
58                                 struct list_head *list)
59 {
60         struct ttm_validate_buffer *entry;
61         struct ttm_bo_global *glob;
62
63         if (list_empty(list))
64                 return;
65
66         entry = list_first_entry(list, struct ttm_validate_buffer, head);
67         glob = entry->bo->glob;
68
69         spin_lock(&glob->lru_lock);
70         list_for_each_entry(entry, list, head) {
71                 struct ttm_buffer_object *bo = entry->bo;
72
73                 ttm_bo_add_to_lru(bo);
74                 __ttm_bo_unreserve(bo);
75         }
76         spin_unlock(&glob->lru_lock);
77
78         if (ticket)
79                 ww_acquire_fini(ticket);
80 }
81 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
82
83 /*
84  * Reserve buffers for validation.
85  *
86  * If a buffer in the list is marked for CPU access, we back off and
87  * wait for that buffer to become free for GPU access.
88  *
89  * If a buffer is reserved for another validation, the validator with
90  * the highest validation sequence backs off and waits for that buffer
91  * to become unreserved. This prevents deadlocks when validating multiple
92  * buffers in different orders.
93  */
94
95 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
96                            struct list_head *list, bool intr,
97                            struct list_head *dups)
98 {
99         struct ttm_bo_global *glob;
100         struct ttm_validate_buffer *entry;
101         int ret;
102
103         if (list_empty(list))
104                 return 0;
105
106         entry = list_first_entry(list, struct ttm_validate_buffer, head);
107         glob = entry->bo->glob;
108
109         if (ticket)
110                 ww_acquire_init(ticket, &reservation_ww_class);
111
112         list_for_each_entry(entry, list, head) {
113                 struct ttm_buffer_object *bo = entry->bo;
114
115                 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
116                                        ticket);
117                 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
118                         __ttm_bo_unreserve(bo);
119
120                         ret = -EBUSY;
121
122                 } else if (ret == -EALREADY && dups) {
123                         struct ttm_validate_buffer *safe = entry;
124                         entry = list_prev_entry(entry, head);
125                         list_del(&safe->head);
126                         list_add(&safe->head, dups);
127                         continue;
128                 }
129
130                 if (!ret) {
131                         if (!entry->shared)
132                                 continue;
133
134                         ret = reservation_object_reserve_shared(bo->resv);
135                         if (!ret)
136                                 continue;
137                 }
138
139                 /* uh oh, we lost out, drop every reservation and try
140                  * to only reserve this buffer, then start over if
141                  * this succeeds.
142                  */
143                 ttm_eu_backoff_reservation_reverse(list, entry);
144
145                 if (ret == -EDEADLK && intr) {
146                         ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
147                                                                ticket);
148                 } else if (ret == -EDEADLK) {
149                         ww_mutex_lock_slow(&bo->resv->lock, ticket);
150                         ret = 0;
151                 }
152
153                 if (!ret && entry->shared)
154                         ret = reservation_object_reserve_shared(bo->resv);
155
156                 if (unlikely(ret != 0)) {
157                         if (ret == -EINTR)
158                                 ret = -ERESTARTSYS;
159                         if (ticket) {
160                                 ww_acquire_done(ticket);
161                                 ww_acquire_fini(ticket);
162                         }
163                         return ret;
164                 }
165
166                 /* move this item to the front of the list,
167                  * forces correct iteration of the loop without keeping track
168                  */
169                 list_del(&entry->head);
170                 list_add(&entry->head, list);
171         }
172
173         if (ticket)
174                 ww_acquire_done(ticket);
175         spin_lock(&glob->lru_lock);
176         ttm_eu_del_from_lru_locked(list);
177         spin_unlock(&glob->lru_lock);
178         return 0;
179 }
180 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
181
182 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
183                                  struct list_head *list, struct fence *fence)
184 {
185         struct ttm_validate_buffer *entry;
186         struct ttm_buffer_object *bo;
187         struct ttm_bo_global *glob;
188         struct ttm_bo_device *bdev;
189         struct ttm_bo_driver *driver;
190
191         if (list_empty(list))
192                 return;
193
194         bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
195         bdev = bo->bdev;
196         driver = bdev->driver;
197         glob = bo->glob;
198
199         spin_lock(&glob->lru_lock);
200
201         list_for_each_entry(entry, list, head) {
202                 bo = entry->bo;
203                 if (entry->shared)
204                         reservation_object_add_shared_fence(bo->resv, fence);
205                 else
206                         reservation_object_add_excl_fence(bo->resv, fence);
207                 ttm_bo_add_to_lru(bo);
208                 __ttm_bo_unreserve(bo);
209         }
210         spin_unlock(&glob->lru_lock);
211         if (ticket)
212                 ww_acquire_fini(ticket);
213 }
214 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);