Kernel bump from 4.1.3-rt to 4.1.7-rt.
[kvmfornfv.git] / kernel / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_vgpu.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35 #include <linux/shmem_fs.h>
36 #include <linux/slab.h>
37 #include <linux/swap.h>
38 #include <linux/pci.h>
39 #include <linux/dma-buf.h>
40
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
43 static __must_check int
44 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45                                bool readonly);
46 static void
47 i915_gem_object_retire(struct drm_i915_gem_object *obj);
48
49 static void i915_gem_write_fence(struct drm_device *dev, int reg,
50                                  struct drm_i915_gem_object *obj);
51 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
52                                          struct drm_i915_fence_reg *fence,
53                                          bool enable);
54
55 static bool cpu_cache_is_coherent(struct drm_device *dev,
56                                   enum i915_cache_level level)
57 {
58         return HAS_LLC(dev) || level != I915_CACHE_NONE;
59 }
60
61 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
62 {
63         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
64                 return true;
65
66         return obj->pin_display;
67 }
68
69 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
70 {
71         if (obj->tiling_mode)
72                 i915_gem_release_mmap(obj);
73
74         /* As we do not have an associated fence register, we will force
75          * a tiling change if we ever need to acquire one.
76          */
77         obj->fence_dirty = false;
78         obj->fence_reg = I915_FENCE_REG_NONE;
79 }
80
81 /* some bookkeeping */
82 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
83                                   size_t size)
84 {
85         spin_lock(&dev_priv->mm.object_stat_lock);
86         dev_priv->mm.object_count++;
87         dev_priv->mm.object_memory += size;
88         spin_unlock(&dev_priv->mm.object_stat_lock);
89 }
90
91 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
92                                      size_t size)
93 {
94         spin_lock(&dev_priv->mm.object_stat_lock);
95         dev_priv->mm.object_count--;
96         dev_priv->mm.object_memory -= size;
97         spin_unlock(&dev_priv->mm.object_stat_lock);
98 }
99
100 static int
101 i915_gem_wait_for_error(struct i915_gpu_error *error)
102 {
103         int ret;
104
105 #define EXIT_COND (!i915_reset_in_progress(error) || \
106                    i915_terminally_wedged(error))
107         if (EXIT_COND)
108                 return 0;
109
110         /*
111          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
112          * userspace. If it takes that long something really bad is going on and
113          * we should simply try to bail out and fail as gracefully as possible.
114          */
115         ret = wait_event_interruptible_timeout(error->reset_queue,
116                                                EXIT_COND,
117                                                10*HZ);
118         if (ret == 0) {
119                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
120                 return -EIO;
121         } else if (ret < 0) {
122                 return ret;
123         }
124 #undef EXIT_COND
125
126         return 0;
127 }
128
129 int i915_mutex_lock_interruptible(struct drm_device *dev)
130 {
131         struct drm_i915_private *dev_priv = dev->dev_private;
132         int ret;
133
134         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
135         if (ret)
136                 return ret;
137
138         ret = mutex_lock_interruptible(&dev->struct_mutex);
139         if (ret)
140                 return ret;
141
142         WARN_ON(i915_verify_lists(dev));
143         return 0;
144 }
145
146 int
147 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
148                             struct drm_file *file)
149 {
150         struct drm_i915_private *dev_priv = dev->dev_private;
151         struct drm_i915_gem_get_aperture *args = data;
152         struct drm_i915_gem_object *obj;
153         size_t pinned;
154
155         pinned = 0;
156         mutex_lock(&dev->struct_mutex);
157         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
158                 if (i915_gem_obj_is_pinned(obj))
159                         pinned += i915_gem_obj_ggtt_size(obj);
160         mutex_unlock(&dev->struct_mutex);
161
162         args->aper_size = dev_priv->gtt.base.total;
163         args->aper_available_size = args->aper_size - pinned;
164
165         return 0;
166 }
167
168 static int
169 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
170 {
171         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
172         char *vaddr = obj->phys_handle->vaddr;
173         struct sg_table *st;
174         struct scatterlist *sg;
175         int i;
176
177         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
178                 return -EINVAL;
179
180         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
181                 struct page *page;
182                 char *src;
183
184                 page = shmem_read_mapping_page(mapping, i);
185                 if (IS_ERR(page))
186                         return PTR_ERR(page);
187
188                 src = kmap_atomic(page);
189                 memcpy(vaddr, src, PAGE_SIZE);
190                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
191                 kunmap_atomic(src);
192
193                 page_cache_release(page);
194                 vaddr += PAGE_SIZE;
195         }
196
197         i915_gem_chipset_flush(obj->base.dev);
198
199         st = kmalloc(sizeof(*st), GFP_KERNEL);
200         if (st == NULL)
201                 return -ENOMEM;
202
203         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
204                 kfree(st);
205                 return -ENOMEM;
206         }
207
208         sg = st->sgl;
209         sg->offset = 0;
210         sg->length = obj->base.size;
211
212         sg_dma_address(sg) = obj->phys_handle->busaddr;
213         sg_dma_len(sg) = obj->base.size;
214
215         obj->pages = st;
216         obj->has_dma_mapping = true;
217         return 0;
218 }
219
220 static void
221 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
222 {
223         int ret;
224
225         BUG_ON(obj->madv == __I915_MADV_PURGED);
226
227         ret = i915_gem_object_set_to_cpu_domain(obj, true);
228         if (ret) {
229                 /* In the event of a disaster, abandon all caches and
230                  * hope for the best.
231                  */
232                 WARN_ON(ret != -EIO);
233                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
234         }
235
236         if (obj->madv == I915_MADV_DONTNEED)
237                 obj->dirty = 0;
238
239         if (obj->dirty) {
240                 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
241                 char *vaddr = obj->phys_handle->vaddr;
242                 int i;
243
244                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
245                         struct page *page;
246                         char *dst;
247
248                         page = shmem_read_mapping_page(mapping, i);
249                         if (IS_ERR(page))
250                                 continue;
251
252                         dst = kmap_atomic(page);
253                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
254                         memcpy(dst, vaddr, PAGE_SIZE);
255                         kunmap_atomic(dst);
256
257                         set_page_dirty(page);
258                         if (obj->madv == I915_MADV_WILLNEED)
259                                 mark_page_accessed(page);
260                         page_cache_release(page);
261                         vaddr += PAGE_SIZE;
262                 }
263                 obj->dirty = 0;
264         }
265
266         sg_free_table(obj->pages);
267         kfree(obj->pages);
268
269         obj->has_dma_mapping = false;
270 }
271
272 static void
273 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
274 {
275         drm_pci_free(obj->base.dev, obj->phys_handle);
276 }
277
278 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
279         .get_pages = i915_gem_object_get_pages_phys,
280         .put_pages = i915_gem_object_put_pages_phys,
281         .release = i915_gem_object_release_phys,
282 };
283
284 static int
285 drop_pages(struct drm_i915_gem_object *obj)
286 {
287         struct i915_vma *vma, *next;
288         int ret;
289
290         drm_gem_object_reference(&obj->base);
291         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
292                 if (i915_vma_unbind(vma))
293                         break;
294
295         ret = i915_gem_object_put_pages(obj);
296         drm_gem_object_unreference(&obj->base);
297
298         return ret;
299 }
300
301 int
302 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
303                             int align)
304 {
305         drm_dma_handle_t *phys;
306         int ret;
307
308         if (obj->phys_handle) {
309                 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
310                         return -EBUSY;
311
312                 return 0;
313         }
314
315         if (obj->madv != I915_MADV_WILLNEED)
316                 return -EFAULT;
317
318         if (obj->base.filp == NULL)
319                 return -EINVAL;
320
321         ret = drop_pages(obj);
322         if (ret)
323                 return ret;
324
325         /* create a new object */
326         phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
327         if (!phys)
328                 return -ENOMEM;
329
330         obj->phys_handle = phys;
331         obj->ops = &i915_gem_phys_ops;
332
333         return i915_gem_object_get_pages(obj);
334 }
335
336 static int
337 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
338                      struct drm_i915_gem_pwrite *args,
339                      struct drm_file *file_priv)
340 {
341         struct drm_device *dev = obj->base.dev;
342         void *vaddr = obj->phys_handle->vaddr + args->offset;
343         char __user *user_data = to_user_ptr(args->data_ptr);
344         int ret = 0;
345
346         /* We manually control the domain here and pretend that it
347          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
348          */
349         ret = i915_gem_object_wait_rendering(obj, false);
350         if (ret)
351                 return ret;
352
353         intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
354         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
355                 unsigned long unwritten;
356
357                 /* The physical object once assigned is fixed for the lifetime
358                  * of the obj, so we can safely drop the lock and continue
359                  * to access vaddr.
360                  */
361                 mutex_unlock(&dev->struct_mutex);
362                 unwritten = copy_from_user(vaddr, user_data, args->size);
363                 mutex_lock(&dev->struct_mutex);
364                 if (unwritten) {
365                         ret = -EFAULT;
366                         goto out;
367                 }
368         }
369
370         drm_clflush_virt_range(vaddr, args->size);
371         i915_gem_chipset_flush(dev);
372
373 out:
374         intel_fb_obj_flush(obj, false);
375         return ret;
376 }
377
378 void *i915_gem_object_alloc(struct drm_device *dev)
379 {
380         struct drm_i915_private *dev_priv = dev->dev_private;
381         return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
382 }
383
384 void i915_gem_object_free(struct drm_i915_gem_object *obj)
385 {
386         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
387         kmem_cache_free(dev_priv->slab, obj);
388 }
389
390 static int
391 i915_gem_create(struct drm_file *file,
392                 struct drm_device *dev,
393                 uint64_t size,
394                 uint32_t *handle_p)
395 {
396         struct drm_i915_gem_object *obj;
397         int ret;
398         u32 handle;
399
400         size = roundup(size, PAGE_SIZE);
401         if (size == 0)
402                 return -EINVAL;
403
404         /* Allocate the new object */
405         obj = i915_gem_alloc_object(dev, size);
406         if (obj == NULL)
407                 return -ENOMEM;
408
409         ret = drm_gem_handle_create(file, &obj->base, &handle);
410         /* drop reference from allocate - handle holds it now */
411         drm_gem_object_unreference_unlocked(&obj->base);
412         if (ret)
413                 return ret;
414
415         *handle_p = handle;
416         return 0;
417 }
418
419 int
420 i915_gem_dumb_create(struct drm_file *file,
421                      struct drm_device *dev,
422                      struct drm_mode_create_dumb *args)
423 {
424         /* have to work out size/pitch and return them */
425         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
426         args->size = args->pitch * args->height;
427         return i915_gem_create(file, dev,
428                                args->size, &args->handle);
429 }
430
431 /**
432  * Creates a new mm object and returns a handle to it.
433  */
434 int
435 i915_gem_create_ioctl(struct drm_device *dev, void *data,
436                       struct drm_file *file)
437 {
438         struct drm_i915_gem_create *args = data;
439
440         return i915_gem_create(file, dev,
441                                args->size, &args->handle);
442 }
443
444 static inline int
445 __copy_to_user_swizzled(char __user *cpu_vaddr,
446                         const char *gpu_vaddr, int gpu_offset,
447                         int length)
448 {
449         int ret, cpu_offset = 0;
450
451         while (length > 0) {
452                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
453                 int this_length = min(cacheline_end - gpu_offset, length);
454                 int swizzled_gpu_offset = gpu_offset ^ 64;
455
456                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
457                                      gpu_vaddr + swizzled_gpu_offset,
458                                      this_length);
459                 if (ret)
460                         return ret + length;
461
462                 cpu_offset += this_length;
463                 gpu_offset += this_length;
464                 length -= this_length;
465         }
466
467         return 0;
468 }
469
470 static inline int
471 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
472                           const char __user *cpu_vaddr,
473                           int length)
474 {
475         int ret, cpu_offset = 0;
476
477         while (length > 0) {
478                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
479                 int this_length = min(cacheline_end - gpu_offset, length);
480                 int swizzled_gpu_offset = gpu_offset ^ 64;
481
482                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
483                                        cpu_vaddr + cpu_offset,
484                                        this_length);
485                 if (ret)
486                         return ret + length;
487
488                 cpu_offset += this_length;
489                 gpu_offset += this_length;
490                 length -= this_length;
491         }
492
493         return 0;
494 }
495
496 /*
497  * Pins the specified object's pages and synchronizes the object with
498  * GPU accesses. Sets needs_clflush to non-zero if the caller should
499  * flush the object from the CPU cache.
500  */
501 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
502                                     int *needs_clflush)
503 {
504         int ret;
505
506         *needs_clflush = 0;
507
508         if (!obj->base.filp)
509                 return -EINVAL;
510
511         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
512                 /* If we're not in the cpu read domain, set ourself into the gtt
513                  * read domain and manually flush cachelines (if required). This
514                  * optimizes for the case when the gpu will dirty the data
515                  * anyway again before the next pread happens. */
516                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
517                                                         obj->cache_level);
518                 ret = i915_gem_object_wait_rendering(obj, true);
519                 if (ret)
520                         return ret;
521
522                 i915_gem_object_retire(obj);
523         }
524
525         ret = i915_gem_object_get_pages(obj);
526         if (ret)
527                 return ret;
528
529         i915_gem_object_pin_pages(obj);
530
531         return ret;
532 }
533
534 /* Per-page copy function for the shmem pread fastpath.
535  * Flushes invalid cachelines before reading the target if
536  * needs_clflush is set. */
537 static int
538 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
539                  char __user *user_data,
540                  bool page_do_bit17_swizzling, bool needs_clflush)
541 {
542         char *vaddr;
543         int ret;
544
545         if (unlikely(page_do_bit17_swizzling))
546                 return -EINVAL;
547
548         vaddr = kmap_atomic(page);
549         if (needs_clflush)
550                 drm_clflush_virt_range(vaddr + shmem_page_offset,
551                                        page_length);
552         ret = __copy_to_user_inatomic(user_data,
553                                       vaddr + shmem_page_offset,
554                                       page_length);
555         kunmap_atomic(vaddr);
556
557         return ret ? -EFAULT : 0;
558 }
559
560 static void
561 shmem_clflush_swizzled_range(char *addr, unsigned long length,
562                              bool swizzled)
563 {
564         if (unlikely(swizzled)) {
565                 unsigned long start = (unsigned long) addr;
566                 unsigned long end = (unsigned long) addr + length;
567
568                 /* For swizzling simply ensure that we always flush both
569                  * channels. Lame, but simple and it works. Swizzled
570                  * pwrite/pread is far from a hotpath - current userspace
571                  * doesn't use it at all. */
572                 start = round_down(start, 128);
573                 end = round_up(end, 128);
574
575                 drm_clflush_virt_range((void *)start, end - start);
576         } else {
577                 drm_clflush_virt_range(addr, length);
578         }
579
580 }
581
582 /* Only difference to the fast-path function is that this can handle bit17
583  * and uses non-atomic copy and kmap functions. */
584 static int
585 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
586                  char __user *user_data,
587                  bool page_do_bit17_swizzling, bool needs_clflush)
588 {
589         char *vaddr;
590         int ret;
591
592         vaddr = kmap(page);
593         if (needs_clflush)
594                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
595                                              page_length,
596                                              page_do_bit17_swizzling);
597
598         if (page_do_bit17_swizzling)
599                 ret = __copy_to_user_swizzled(user_data,
600                                               vaddr, shmem_page_offset,
601                                               page_length);
602         else
603                 ret = __copy_to_user(user_data,
604                                      vaddr + shmem_page_offset,
605                                      page_length);
606         kunmap(page);
607
608         return ret ? - EFAULT : 0;
609 }
610
611 static int
612 i915_gem_shmem_pread(struct drm_device *dev,
613                      struct drm_i915_gem_object *obj,
614                      struct drm_i915_gem_pread *args,
615                      struct drm_file *file)
616 {
617         char __user *user_data;
618         ssize_t remain;
619         loff_t offset;
620         int shmem_page_offset, page_length, ret = 0;
621         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
622         int prefaulted = 0;
623         int needs_clflush = 0;
624         struct sg_page_iter sg_iter;
625
626         user_data = to_user_ptr(args->data_ptr);
627         remain = args->size;
628
629         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
630
631         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
632         if (ret)
633                 return ret;
634
635         offset = args->offset;
636
637         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
638                          offset >> PAGE_SHIFT) {
639                 struct page *page = sg_page_iter_page(&sg_iter);
640
641                 if (remain <= 0)
642                         break;
643
644                 /* Operation in this page
645                  *
646                  * shmem_page_offset = offset within page in shmem file
647                  * page_length = bytes to copy for this page
648                  */
649                 shmem_page_offset = offset_in_page(offset);
650                 page_length = remain;
651                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
652                         page_length = PAGE_SIZE - shmem_page_offset;
653
654                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
655                         (page_to_phys(page) & (1 << 17)) != 0;
656
657                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
658                                        user_data, page_do_bit17_swizzling,
659                                        needs_clflush);
660                 if (ret == 0)
661                         goto next_page;
662
663                 mutex_unlock(&dev->struct_mutex);
664
665                 if (likely(!i915.prefault_disable) && !prefaulted) {
666                         ret = fault_in_multipages_writeable(user_data, remain);
667                         /* Userspace is tricking us, but we've already clobbered
668                          * its pages with the prefault and promised to write the
669                          * data up to the first fault. Hence ignore any errors
670                          * and just continue. */
671                         (void)ret;
672                         prefaulted = 1;
673                 }
674
675                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
676                                        user_data, page_do_bit17_swizzling,
677                                        needs_clflush);
678
679                 mutex_lock(&dev->struct_mutex);
680
681                 if (ret)
682                         goto out;
683
684 next_page:
685                 remain -= page_length;
686                 user_data += page_length;
687                 offset += page_length;
688         }
689
690 out:
691         i915_gem_object_unpin_pages(obj);
692
693         return ret;
694 }
695
696 /**
697  * Reads data from the object referenced by handle.
698  *
699  * On error, the contents of *data are undefined.
700  */
701 int
702 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
703                      struct drm_file *file)
704 {
705         struct drm_i915_gem_pread *args = data;
706         struct drm_i915_gem_object *obj;
707         int ret = 0;
708
709         if (args->size == 0)
710                 return 0;
711
712         if (!access_ok(VERIFY_WRITE,
713                        to_user_ptr(args->data_ptr),
714                        args->size))
715                 return -EFAULT;
716
717         ret = i915_mutex_lock_interruptible(dev);
718         if (ret)
719                 return ret;
720
721         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
722         if (&obj->base == NULL) {
723                 ret = -ENOENT;
724                 goto unlock;
725         }
726
727         /* Bounds check source.  */
728         if (args->offset > obj->base.size ||
729             args->size > obj->base.size - args->offset) {
730                 ret = -EINVAL;
731                 goto out;
732         }
733
734         /* prime objects have no backing filp to GEM pread/pwrite
735          * pages from.
736          */
737         if (!obj->base.filp) {
738                 ret = -EINVAL;
739                 goto out;
740         }
741
742         trace_i915_gem_object_pread(obj, args->offset, args->size);
743
744         ret = i915_gem_shmem_pread(dev, obj, args, file);
745
746 out:
747         drm_gem_object_unreference(&obj->base);
748 unlock:
749         mutex_unlock(&dev->struct_mutex);
750         return ret;
751 }
752
753 /* This is the fast write path which cannot handle
754  * page faults in the source data
755  */
756
757 static inline int
758 fast_user_write(struct io_mapping *mapping,
759                 loff_t page_base, int page_offset,
760                 char __user *user_data,
761                 int length)
762 {
763         void __iomem *vaddr_atomic;
764         void *vaddr;
765         unsigned long unwritten;
766
767         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
768         /* We can use the cpu mem copy function because this is X86. */
769         vaddr = (void __force*)vaddr_atomic + page_offset;
770         unwritten = __copy_from_user_inatomic_nocache(vaddr,
771                                                       user_data, length);
772         io_mapping_unmap_atomic(vaddr_atomic);
773         return unwritten;
774 }
775
776 /**
777  * This is the fast pwrite path, where we copy the data directly from the
778  * user into the GTT, uncached.
779  */
780 static int
781 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
782                          struct drm_i915_gem_object *obj,
783                          struct drm_i915_gem_pwrite *args,
784                          struct drm_file *file)
785 {
786         struct drm_i915_private *dev_priv = dev->dev_private;
787         ssize_t remain;
788         loff_t offset, page_base;
789         char __user *user_data;
790         int page_offset, page_length, ret;
791
792         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
793         if (ret)
794                 goto out;
795
796         ret = i915_gem_object_set_to_gtt_domain(obj, true);
797         if (ret)
798                 goto out_unpin;
799
800         ret = i915_gem_object_put_fence(obj);
801         if (ret)
802                 goto out_unpin;
803
804         user_data = to_user_ptr(args->data_ptr);
805         remain = args->size;
806
807         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
808
809         intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
810
811         while (remain > 0) {
812                 /* Operation in this page
813                  *
814                  * page_base = page offset within aperture
815                  * page_offset = offset within page
816                  * page_length = bytes to copy for this page
817                  */
818                 page_base = offset & PAGE_MASK;
819                 page_offset = offset_in_page(offset);
820                 page_length = remain;
821                 if ((page_offset + remain) > PAGE_SIZE)
822                         page_length = PAGE_SIZE - page_offset;
823
824                 /* If we get a fault while copying data, then (presumably) our
825                  * source page isn't available.  Return the error and we'll
826                  * retry in the slow path.
827                  */
828                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
829                                     page_offset, user_data, page_length)) {
830                         ret = -EFAULT;
831                         goto out_flush;
832                 }
833
834                 remain -= page_length;
835                 user_data += page_length;
836                 offset += page_length;
837         }
838
839 out_flush:
840         intel_fb_obj_flush(obj, false);
841 out_unpin:
842         i915_gem_object_ggtt_unpin(obj);
843 out:
844         return ret;
845 }
846
847 /* Per-page copy function for the shmem pwrite fastpath.
848  * Flushes invalid cachelines before writing to the target if
849  * needs_clflush_before is set and flushes out any written cachelines after
850  * writing if needs_clflush is set. */
851 static int
852 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
853                   char __user *user_data,
854                   bool page_do_bit17_swizzling,
855                   bool needs_clflush_before,
856                   bool needs_clflush_after)
857 {
858         char *vaddr;
859         int ret;
860
861         if (unlikely(page_do_bit17_swizzling))
862                 return -EINVAL;
863
864         vaddr = kmap_atomic(page);
865         if (needs_clflush_before)
866                 drm_clflush_virt_range(vaddr + shmem_page_offset,
867                                        page_length);
868         ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
869                                         user_data, page_length);
870         if (needs_clflush_after)
871                 drm_clflush_virt_range(vaddr + shmem_page_offset,
872                                        page_length);
873         kunmap_atomic(vaddr);
874
875         return ret ? -EFAULT : 0;
876 }
877
878 /* Only difference to the fast-path function is that this can handle bit17
879  * and uses non-atomic copy and kmap functions. */
880 static int
881 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
882                   char __user *user_data,
883                   bool page_do_bit17_swizzling,
884                   bool needs_clflush_before,
885                   bool needs_clflush_after)
886 {
887         char *vaddr;
888         int ret;
889
890         vaddr = kmap(page);
891         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
892                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
893                                              page_length,
894                                              page_do_bit17_swizzling);
895         if (page_do_bit17_swizzling)
896                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
897                                                 user_data,
898                                                 page_length);
899         else
900                 ret = __copy_from_user(vaddr + shmem_page_offset,
901                                        user_data,
902                                        page_length);
903         if (needs_clflush_after)
904                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
905                                              page_length,
906                                              page_do_bit17_swizzling);
907         kunmap(page);
908
909         return ret ? -EFAULT : 0;
910 }
911
912 static int
913 i915_gem_shmem_pwrite(struct drm_device *dev,
914                       struct drm_i915_gem_object *obj,
915                       struct drm_i915_gem_pwrite *args,
916                       struct drm_file *file)
917 {
918         ssize_t remain;
919         loff_t offset;
920         char __user *user_data;
921         int shmem_page_offset, page_length, ret = 0;
922         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
923         int hit_slowpath = 0;
924         int needs_clflush_after = 0;
925         int needs_clflush_before = 0;
926         struct sg_page_iter sg_iter;
927
928         user_data = to_user_ptr(args->data_ptr);
929         remain = args->size;
930
931         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
932
933         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
934                 /* If we're not in the cpu write domain, set ourself into the gtt
935                  * write domain and manually flush cachelines (if required). This
936                  * optimizes for the case when the gpu will use the data
937                  * right away and we therefore have to clflush anyway. */
938                 needs_clflush_after = cpu_write_needs_clflush(obj);
939                 ret = i915_gem_object_wait_rendering(obj, false);
940                 if (ret)
941                         return ret;
942
943                 i915_gem_object_retire(obj);
944         }
945         /* Same trick applies to invalidate partially written cachelines read
946          * before writing. */
947         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
948                 needs_clflush_before =
949                         !cpu_cache_is_coherent(dev, obj->cache_level);
950
951         ret = i915_gem_object_get_pages(obj);
952         if (ret)
953                 return ret;
954
955         intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
956
957         i915_gem_object_pin_pages(obj);
958
959         offset = args->offset;
960         obj->dirty = 1;
961
962         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
963                          offset >> PAGE_SHIFT) {
964                 struct page *page = sg_page_iter_page(&sg_iter);
965                 int partial_cacheline_write;
966
967                 if (remain <= 0)
968                         break;
969
970                 /* Operation in this page
971                  *
972                  * shmem_page_offset = offset within page in shmem file
973                  * page_length = bytes to copy for this page
974                  */
975                 shmem_page_offset = offset_in_page(offset);
976
977                 page_length = remain;
978                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
979                         page_length = PAGE_SIZE - shmem_page_offset;
980
981                 /* If we don't overwrite a cacheline completely we need to be
982                  * careful to have up-to-date data by first clflushing. Don't
983                  * overcomplicate things and flush the entire patch. */
984                 partial_cacheline_write = needs_clflush_before &&
985                         ((shmem_page_offset | page_length)
986                                 & (boot_cpu_data.x86_clflush_size - 1));
987
988                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
989                         (page_to_phys(page) & (1 << 17)) != 0;
990
991                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
992                                         user_data, page_do_bit17_swizzling,
993                                         partial_cacheline_write,
994                                         needs_clflush_after);
995                 if (ret == 0)
996                         goto next_page;
997
998                 hit_slowpath = 1;
999                 mutex_unlock(&dev->struct_mutex);
1000                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1001                                         user_data, page_do_bit17_swizzling,
1002                                         partial_cacheline_write,
1003                                         needs_clflush_after);
1004
1005                 mutex_lock(&dev->struct_mutex);
1006
1007                 if (ret)
1008                         goto out;
1009
1010 next_page:
1011                 remain -= page_length;
1012                 user_data += page_length;
1013                 offset += page_length;
1014         }
1015
1016 out:
1017         i915_gem_object_unpin_pages(obj);
1018
1019         if (hit_slowpath) {
1020                 /*
1021                  * Fixup: Flush cpu caches in case we didn't flush the dirty
1022                  * cachelines in-line while writing and the object moved
1023                  * out of the cpu write domain while we've dropped the lock.
1024                  */
1025                 if (!needs_clflush_after &&
1026                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1027                         if (i915_gem_clflush_object(obj, obj->pin_display))
1028                                 i915_gem_chipset_flush(dev);
1029                 }
1030         }
1031
1032         if (needs_clflush_after)
1033                 i915_gem_chipset_flush(dev);
1034
1035         intel_fb_obj_flush(obj, false);
1036         return ret;
1037 }
1038
1039 /**
1040  * Writes data to the object referenced by handle.
1041  *
1042  * On error, the contents of the buffer that were to be modified are undefined.
1043  */
1044 int
1045 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1046                       struct drm_file *file)
1047 {
1048         struct drm_i915_private *dev_priv = dev->dev_private;
1049         struct drm_i915_gem_pwrite *args = data;
1050         struct drm_i915_gem_object *obj;
1051         int ret;
1052
1053         if (args->size == 0)
1054                 return 0;
1055
1056         if (!access_ok(VERIFY_READ,
1057                        to_user_ptr(args->data_ptr),
1058                        args->size))
1059                 return -EFAULT;
1060
1061         if (likely(!i915.prefault_disable)) {
1062                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1063                                                    args->size);
1064                 if (ret)
1065                         return -EFAULT;
1066         }
1067
1068         intel_runtime_pm_get(dev_priv);
1069
1070         ret = i915_mutex_lock_interruptible(dev);
1071         if (ret)
1072                 goto put_rpm;
1073
1074         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1075         if (&obj->base == NULL) {
1076                 ret = -ENOENT;
1077                 goto unlock;
1078         }
1079
1080         /* Bounds check destination. */
1081         if (args->offset > obj->base.size ||
1082             args->size > obj->base.size - args->offset) {
1083                 ret = -EINVAL;
1084                 goto out;
1085         }
1086
1087         /* prime objects have no backing filp to GEM pread/pwrite
1088          * pages from.
1089          */
1090         if (!obj->base.filp) {
1091                 ret = -EINVAL;
1092                 goto out;
1093         }
1094
1095         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1096
1097         ret = -EFAULT;
1098         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1099          * it would end up going through the fenced access, and we'll get
1100          * different detiling behavior between reading and writing.
1101          * pread/pwrite currently are reading and writing from the CPU
1102          * perspective, requiring manual detiling by the client.
1103          */
1104         if (obj->tiling_mode == I915_TILING_NONE &&
1105             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1106             cpu_write_needs_clflush(obj)) {
1107                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1108                 /* Note that the gtt paths might fail with non-page-backed user
1109                  * pointers (e.g. gtt mappings when moving data between
1110                  * textures). Fallback to the shmem path in that case. */
1111         }
1112
1113         if (ret == -EFAULT || ret == -ENOSPC) {
1114                 if (obj->phys_handle)
1115                         ret = i915_gem_phys_pwrite(obj, args, file);
1116                 else
1117                         ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1118         }
1119
1120 out:
1121         drm_gem_object_unreference(&obj->base);
1122 unlock:
1123         mutex_unlock(&dev->struct_mutex);
1124 put_rpm:
1125         intel_runtime_pm_put(dev_priv);
1126
1127         return ret;
1128 }
1129
1130 int
1131 i915_gem_check_wedge(struct i915_gpu_error *error,
1132                      bool interruptible)
1133 {
1134         if (i915_reset_in_progress(error)) {
1135                 /* Non-interruptible callers can't handle -EAGAIN, hence return
1136                  * -EIO unconditionally for these. */
1137                 if (!interruptible)
1138                         return -EIO;
1139
1140                 /* Recovery complete, but the reset failed ... */
1141                 if (i915_terminally_wedged(error))
1142                         return -EIO;
1143
1144                 /*
1145                  * Check if GPU Reset is in progress - we need intel_ring_begin
1146                  * to work properly to reinit the hw state while the gpu is
1147                  * still marked as reset-in-progress. Handle this with a flag.
1148                  */
1149                 if (!error->reload_in_reset)
1150                         return -EAGAIN;
1151         }
1152
1153         return 0;
1154 }
1155
1156 /*
1157  * Compare arbitrary request against outstanding lazy request. Emit on match.
1158  */
1159 int
1160 i915_gem_check_olr(struct drm_i915_gem_request *req)
1161 {
1162         int ret;
1163
1164         WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
1165
1166         ret = 0;
1167         if (req == req->ring->outstanding_lazy_request)
1168                 ret = i915_add_request(req->ring);
1169
1170         return ret;
1171 }
1172
1173 static void fake_irq(unsigned long data)
1174 {
1175         wake_up_process((struct task_struct *)data);
1176 }
1177
1178 static bool missed_irq(struct drm_i915_private *dev_priv,
1179                        struct intel_engine_cs *ring)
1180 {
1181         return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1182 }
1183
1184 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1185 {
1186         if (file_priv == NULL)
1187                 return true;
1188
1189         return !atomic_xchg(&file_priv->rps_wait_boost, true);
1190 }
1191
1192 /**
1193  * __i915_wait_request - wait until execution of request has finished
1194  * @req: duh!
1195  * @reset_counter: reset sequence associated with the given request
1196  * @interruptible: do an interruptible wait (normally yes)
1197  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1198  *
1199  * Note: It is of utmost importance that the passed in seqno and reset_counter
1200  * values have been read by the caller in an smp safe manner. Where read-side
1201  * locks are involved, it is sufficient to read the reset_counter before
1202  * unlocking the lock that protects the seqno. For lockless tricks, the
1203  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1204  * inserted.
1205  *
1206  * Returns 0 if the request was found within the alloted time. Else returns the
1207  * errno with remaining time filled in timeout argument.
1208  */
1209 int __i915_wait_request(struct drm_i915_gem_request *req,
1210                         unsigned reset_counter,
1211                         bool interruptible,
1212                         s64 *timeout,
1213                         struct drm_i915_file_private *file_priv)
1214 {
1215         struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
1216         struct drm_device *dev = ring->dev;
1217         struct drm_i915_private *dev_priv = dev->dev_private;
1218         const bool irq_test_in_progress =
1219                 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1220         DEFINE_WAIT(wait);
1221         unsigned long timeout_expire;
1222         s64 before, now;
1223         int ret;
1224
1225         WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1226
1227         if (i915_gem_request_completed(req, true))
1228                 return 0;
1229
1230         timeout_expire = timeout ?
1231                 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
1232
1233         if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1234                 gen6_rps_boost(dev_priv);
1235                 if (file_priv)
1236                         mod_delayed_work(dev_priv->wq,
1237                                          &file_priv->mm.idle_work,
1238                                          msecs_to_jiffies(100));
1239         }
1240
1241         if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1242                 return -ENODEV;
1243
1244         /* Record current time in case interrupted by signal, or wedged */
1245         trace_i915_gem_request_wait_begin(req);
1246         before = ktime_get_raw_ns();
1247         for (;;) {
1248                 struct timer_list timer;
1249
1250                 prepare_to_wait(&ring->irq_queue, &wait,
1251                                 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1252
1253                 /* We need to check whether any gpu reset happened in between
1254                  * the caller grabbing the seqno and now ... */
1255                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1256                         /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1257                          * is truely gone. */
1258                         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1259                         if (ret == 0)
1260                                 ret = -EAGAIN;
1261                         break;
1262                 }
1263
1264                 if (i915_gem_request_completed(req, false)) {
1265                         ret = 0;
1266                         break;
1267                 }
1268
1269                 if (interruptible && signal_pending(current)) {
1270                         ret = -ERESTARTSYS;
1271                         break;
1272                 }
1273
1274                 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1275                         ret = -ETIME;
1276                         break;
1277                 }
1278
1279                 timer.function = NULL;
1280                 if (timeout || missed_irq(dev_priv, ring)) {
1281                         unsigned long expire;
1282
1283                         setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1284                         expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1285                         mod_timer(&timer, expire);
1286                 }
1287
1288                 io_schedule();
1289
1290                 if (timer.function) {
1291                         del_singleshot_timer_sync(&timer);
1292                         destroy_timer_on_stack(&timer);
1293                 }
1294         }
1295         now = ktime_get_raw_ns();
1296         trace_i915_gem_request_wait_end(req);
1297
1298         if (!irq_test_in_progress)
1299                 ring->irq_put(ring);
1300
1301         finish_wait(&ring->irq_queue, &wait);
1302
1303         if (timeout) {
1304                 s64 tres = *timeout - (now - before);
1305
1306                 *timeout = tres < 0 ? 0 : tres;
1307
1308                 /*
1309                  * Apparently ktime isn't accurate enough and occasionally has a
1310                  * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
1311                  * things up to make the test happy. We allow up to 1 jiffy.
1312                  *
1313                  * This is a regrssion from the timespec->ktime conversion.
1314                  */
1315                 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
1316                         *timeout = 0;
1317         }
1318
1319         return ret;
1320 }
1321
1322 /**
1323  * Waits for a request to be signaled, and cleans up the
1324  * request and object lists appropriately for that event.
1325  */
1326 int
1327 i915_wait_request(struct drm_i915_gem_request *req)
1328 {
1329         struct drm_device *dev;
1330         struct drm_i915_private *dev_priv;
1331         bool interruptible;
1332         unsigned reset_counter;
1333         int ret;
1334
1335         BUG_ON(req == NULL);
1336
1337         dev = req->ring->dev;
1338         dev_priv = dev->dev_private;
1339         interruptible = dev_priv->mm.interruptible;
1340
1341         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1342
1343         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1344         if (ret)
1345                 return ret;
1346
1347         ret = i915_gem_check_olr(req);
1348         if (ret)
1349                 return ret;
1350
1351         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1352         i915_gem_request_reference(req);
1353         ret = __i915_wait_request(req, reset_counter,
1354                                   interruptible, NULL, NULL);
1355         i915_gem_request_unreference(req);
1356         return ret;
1357 }
1358
1359 static int
1360 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
1361 {
1362         if (!obj->active)
1363                 return 0;
1364
1365         /* Manually manage the write flush as we may have not yet
1366          * retired the buffer.
1367          *
1368          * Note that the last_write_req is always the earlier of
1369          * the two (read/write) requests, so if we haved successfully waited,
1370          * we know we have passed the last write.
1371          */
1372         i915_gem_request_assign(&obj->last_write_req, NULL);
1373
1374         return 0;
1375 }
1376
1377 /**
1378  * Ensures that all rendering to the object has completed and the object is
1379  * safe to unbind from the GTT or access from the CPU.
1380  */
1381 static __must_check int
1382 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1383                                bool readonly)
1384 {
1385         struct drm_i915_gem_request *req;
1386         int ret;
1387
1388         req = readonly ? obj->last_write_req : obj->last_read_req;
1389         if (!req)
1390                 return 0;
1391
1392         ret = i915_wait_request(req);
1393         if (ret)
1394                 return ret;
1395
1396         return i915_gem_object_wait_rendering__tail(obj);
1397 }
1398
1399 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1400  * as the object state may change during this call.
1401  */
1402 static __must_check int
1403 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1404                                             struct drm_i915_file_private *file_priv,
1405                                             bool readonly)
1406 {
1407         struct drm_i915_gem_request *req;
1408         struct drm_device *dev = obj->base.dev;
1409         struct drm_i915_private *dev_priv = dev->dev_private;
1410         unsigned reset_counter;
1411         int ret;
1412
1413         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1414         BUG_ON(!dev_priv->mm.interruptible);
1415
1416         req = readonly ? obj->last_write_req : obj->last_read_req;
1417         if (!req)
1418                 return 0;
1419
1420         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1421         if (ret)
1422                 return ret;
1423
1424         ret = i915_gem_check_olr(req);
1425         if (ret)
1426                 return ret;
1427
1428         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1429         i915_gem_request_reference(req);
1430         mutex_unlock(&dev->struct_mutex);
1431         ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
1432         mutex_lock(&dev->struct_mutex);
1433         i915_gem_request_unreference(req);
1434         if (ret)
1435                 return ret;
1436
1437         return i915_gem_object_wait_rendering__tail(obj);
1438 }
1439
1440 /**
1441  * Called when user space prepares to use an object with the CPU, either
1442  * through the mmap ioctl's mapping or a GTT mapping.
1443  */
1444 int
1445 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1446                           struct drm_file *file)
1447 {
1448         struct drm_i915_gem_set_domain *args = data;
1449         struct drm_i915_gem_object *obj;
1450         uint32_t read_domains = args->read_domains;
1451         uint32_t write_domain = args->write_domain;
1452         int ret;
1453
1454         /* Only handle setting domains to types used by the CPU. */
1455         if (write_domain & I915_GEM_GPU_DOMAINS)
1456                 return -EINVAL;
1457
1458         if (read_domains & I915_GEM_GPU_DOMAINS)
1459                 return -EINVAL;
1460
1461         /* Having something in the write domain implies it's in the read
1462          * domain, and only that read domain.  Enforce that in the request.
1463          */
1464         if (write_domain != 0 && read_domains != write_domain)
1465                 return -EINVAL;
1466
1467         ret = i915_mutex_lock_interruptible(dev);
1468         if (ret)
1469                 return ret;
1470
1471         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1472         if (&obj->base == NULL) {
1473                 ret = -ENOENT;
1474                 goto unlock;
1475         }
1476
1477         /* Try to flush the object off the GPU without holding the lock.
1478          * We will repeat the flush holding the lock in the normal manner
1479          * to catch cases where we are gazumped.
1480          */
1481         ret = i915_gem_object_wait_rendering__nonblocking(obj,
1482                                                           file->driver_priv,
1483                                                           !write_domain);
1484         if (ret)
1485                 goto unref;
1486
1487         if (read_domains & I915_GEM_DOMAIN_GTT)
1488                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1489         else
1490                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1491
1492 unref:
1493         drm_gem_object_unreference(&obj->base);
1494 unlock:
1495         mutex_unlock(&dev->struct_mutex);
1496         return ret;
1497 }
1498
1499 /**
1500  * Called when user space has done writes to this buffer
1501  */
1502 int
1503 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1504                          struct drm_file *file)
1505 {
1506         struct drm_i915_gem_sw_finish *args = data;
1507         struct drm_i915_gem_object *obj;
1508         int ret = 0;
1509
1510         ret = i915_mutex_lock_interruptible(dev);
1511         if (ret)
1512                 return ret;
1513
1514         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1515         if (&obj->base == NULL) {
1516                 ret = -ENOENT;
1517                 goto unlock;
1518         }
1519
1520         /* Pinned buffers may be scanout, so flush the cache */
1521         if (obj->pin_display)
1522                 i915_gem_object_flush_cpu_write_domain(obj);
1523
1524         drm_gem_object_unreference(&obj->base);
1525 unlock:
1526         mutex_unlock(&dev->struct_mutex);
1527         return ret;
1528 }
1529
1530 /**
1531  * Maps the contents of an object, returning the address it is mapped
1532  * into.
1533  *
1534  * While the mapping holds a reference on the contents of the object, it doesn't
1535  * imply a ref on the object itself.
1536  *
1537  * IMPORTANT:
1538  *
1539  * DRM driver writers who look a this function as an example for how to do GEM
1540  * mmap support, please don't implement mmap support like here. The modern way
1541  * to implement DRM mmap support is with an mmap offset ioctl (like
1542  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1543  * That way debug tooling like valgrind will understand what's going on, hiding
1544  * the mmap call in a driver private ioctl will break that. The i915 driver only
1545  * does cpu mmaps this way because we didn't know better.
1546  */
1547 int
1548 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1549                     struct drm_file *file)
1550 {
1551         struct drm_i915_gem_mmap *args = data;
1552         struct drm_gem_object *obj;
1553         unsigned long addr;
1554
1555         if (args->flags & ~(I915_MMAP_WC))
1556                 return -EINVAL;
1557
1558         if (args->flags & I915_MMAP_WC && !cpu_has_pat)
1559                 return -ENODEV;
1560
1561         obj = drm_gem_object_lookup(dev, file, args->handle);
1562         if (obj == NULL)
1563                 return -ENOENT;
1564
1565         /* prime objects have no backing filp to GEM mmap
1566          * pages from.
1567          */
1568         if (!obj->filp) {
1569                 drm_gem_object_unreference_unlocked(obj);
1570                 return -EINVAL;
1571         }
1572
1573         addr = vm_mmap(obj->filp, 0, args->size,
1574                        PROT_READ | PROT_WRITE, MAP_SHARED,
1575                        args->offset);
1576         if (args->flags & I915_MMAP_WC) {
1577                 struct mm_struct *mm = current->mm;
1578                 struct vm_area_struct *vma;
1579
1580                 down_write(&mm->mmap_sem);
1581                 vma = find_vma(mm, addr);
1582                 if (vma)
1583                         vma->vm_page_prot =
1584                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1585                 else
1586                         addr = -ENOMEM;
1587                 up_write(&mm->mmap_sem);
1588         }
1589         drm_gem_object_unreference_unlocked(obj);
1590         if (IS_ERR((void *)addr))
1591                 return addr;
1592
1593         args->addr_ptr = (uint64_t) addr;
1594
1595         return 0;
1596 }
1597
1598 /**
1599  * i915_gem_fault - fault a page into the GTT
1600  * vma: VMA in question
1601  * vmf: fault info
1602  *
1603  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1604  * from userspace.  The fault handler takes care of binding the object to
1605  * the GTT (if needed), allocating and programming a fence register (again,
1606  * only if needed based on whether the old reg is still valid or the object
1607  * is tiled) and inserting a new PTE into the faulting process.
1608  *
1609  * Note that the faulting process may involve evicting existing objects
1610  * from the GTT and/or fence registers to make room.  So performance may
1611  * suffer if the GTT working set is large or there are few fence registers
1612  * left.
1613  */
1614 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1615 {
1616         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1617         struct drm_device *dev = obj->base.dev;
1618         struct drm_i915_private *dev_priv = dev->dev_private;
1619         pgoff_t page_offset;
1620         unsigned long pfn;
1621         int ret = 0;
1622         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1623
1624         intel_runtime_pm_get(dev_priv);
1625
1626         /* We don't use vmf->pgoff since that has the fake offset */
1627         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1628                 PAGE_SHIFT;
1629
1630         ret = i915_mutex_lock_interruptible(dev);
1631         if (ret)
1632                 goto out;
1633
1634         trace_i915_gem_object_fault(obj, page_offset, true, write);
1635
1636         /* Try to flush the object off the GPU first without holding the lock.
1637          * Upon reacquiring the lock, we will perform our sanity checks and then
1638          * repeat the flush holding the lock in the normal manner to catch cases
1639          * where we are gazumped.
1640          */
1641         ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1642         if (ret)
1643                 goto unlock;
1644
1645         /* Access to snoopable pages through the GTT is incoherent. */
1646         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1647                 ret = -EFAULT;
1648                 goto unlock;
1649         }
1650
1651         /* Now bind it into the GTT if needed */
1652         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1653         if (ret)
1654                 goto unlock;
1655
1656         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1657         if (ret)
1658                 goto unpin;
1659
1660         ret = i915_gem_object_get_fence(obj);
1661         if (ret)
1662                 goto unpin;
1663
1664         /* Finally, remap it using the new GTT offset */
1665         pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1666         pfn >>= PAGE_SHIFT;
1667
1668         if (!obj->fault_mappable) {
1669                 unsigned long size = min_t(unsigned long,
1670                                            vma->vm_end - vma->vm_start,
1671                                            obj->base.size);
1672                 int i;
1673
1674                 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1675                         ret = vm_insert_pfn(vma,
1676                                             (unsigned long)vma->vm_start + i * PAGE_SIZE,
1677                                             pfn + i);
1678                         if (ret)
1679                                 break;
1680                 }
1681
1682                 obj->fault_mappable = true;
1683         } else
1684                 ret = vm_insert_pfn(vma,
1685                                     (unsigned long)vmf->virtual_address,
1686                                     pfn + page_offset);
1687 unpin:
1688         i915_gem_object_ggtt_unpin(obj);
1689 unlock:
1690         mutex_unlock(&dev->struct_mutex);
1691 out:
1692         switch (ret) {
1693         case -EIO:
1694                 /*
1695                  * We eat errors when the gpu is terminally wedged to avoid
1696                  * userspace unduly crashing (gl has no provisions for mmaps to
1697                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1698                  * and so needs to be reported.
1699                  */
1700                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1701                         ret = VM_FAULT_SIGBUS;
1702                         break;
1703                 }
1704         case -EAGAIN:
1705                 /*
1706                  * EAGAIN means the gpu is hung and we'll wait for the error
1707                  * handler to reset everything when re-faulting in
1708                  * i915_mutex_lock_interruptible.
1709                  */
1710         case 0:
1711         case -ERESTARTSYS:
1712         case -EINTR:
1713         case -EBUSY:
1714                 /*
1715                  * EBUSY is ok: this just means that another thread
1716                  * already did the job.
1717                  */
1718                 ret = VM_FAULT_NOPAGE;
1719                 break;
1720         case -ENOMEM:
1721                 ret = VM_FAULT_OOM;
1722                 break;
1723         case -ENOSPC:
1724         case -EFAULT:
1725                 ret = VM_FAULT_SIGBUS;
1726                 break;
1727         default:
1728                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1729                 ret = VM_FAULT_SIGBUS;
1730                 break;
1731         }
1732
1733         intel_runtime_pm_put(dev_priv);
1734         return ret;
1735 }
1736
1737 /**
1738  * i915_gem_release_mmap - remove physical page mappings
1739  * @obj: obj in question
1740  *
1741  * Preserve the reservation of the mmapping with the DRM core code, but
1742  * relinquish ownership of the pages back to the system.
1743  *
1744  * It is vital that we remove the page mapping if we have mapped a tiled
1745  * object through the GTT and then lose the fence register due to
1746  * resource pressure. Similarly if the object has been moved out of the
1747  * aperture, than pages mapped into userspace must be revoked. Removing the
1748  * mapping will then trigger a page fault on the next user access, allowing
1749  * fixup by i915_gem_fault().
1750  */
1751 void
1752 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1753 {
1754         if (!obj->fault_mappable)
1755                 return;
1756
1757         drm_vma_node_unmap(&obj->base.vma_node,
1758                            obj->base.dev->anon_inode->i_mapping);
1759         obj->fault_mappable = false;
1760 }
1761
1762 void
1763 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1764 {
1765         struct drm_i915_gem_object *obj;
1766
1767         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1768                 i915_gem_release_mmap(obj);
1769 }
1770
1771 uint32_t
1772 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1773 {
1774         uint32_t gtt_size;
1775
1776         if (INTEL_INFO(dev)->gen >= 4 ||
1777             tiling_mode == I915_TILING_NONE)
1778                 return size;
1779
1780         /* Previous chips need a power-of-two fence region when tiling */
1781         if (INTEL_INFO(dev)->gen == 3)
1782                 gtt_size = 1024*1024;
1783         else
1784                 gtt_size = 512*1024;
1785
1786         while (gtt_size < size)
1787                 gtt_size <<= 1;
1788
1789         return gtt_size;
1790 }
1791
1792 /**
1793  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1794  * @obj: object to check
1795  *
1796  * Return the required GTT alignment for an object, taking into account
1797  * potential fence register mapping.
1798  */
1799 uint32_t
1800 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1801                            int tiling_mode, bool fenced)
1802 {
1803         /*
1804          * Minimum alignment is 4k (GTT page size), but might be greater
1805          * if a fence register is needed for the object.
1806          */
1807         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1808             tiling_mode == I915_TILING_NONE)
1809                 return 4096;
1810
1811         /*
1812          * Previous chips need to be aligned to the size of the smallest
1813          * fence register that can contain the object.
1814          */
1815         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1816 }
1817
1818 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1819 {
1820         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1821         int ret;
1822
1823         if (drm_vma_node_has_offset(&obj->base.vma_node))
1824                 return 0;
1825
1826         dev_priv->mm.shrinker_no_lock_stealing = true;
1827
1828         ret = drm_gem_create_mmap_offset(&obj->base);
1829         if (ret != -ENOSPC)
1830                 goto out;
1831
1832         /* Badly fragmented mmap space? The only way we can recover
1833          * space is by destroying unwanted objects. We can't randomly release
1834          * mmap_offsets as userspace expects them to be persistent for the
1835          * lifetime of the objects. The closest we can is to release the
1836          * offsets on purgeable objects by truncating it and marking it purged,
1837          * which prevents userspace from ever using that object again.
1838          */
1839         i915_gem_shrink(dev_priv,
1840                         obj->base.size >> PAGE_SHIFT,
1841                         I915_SHRINK_BOUND |
1842                         I915_SHRINK_UNBOUND |
1843                         I915_SHRINK_PURGEABLE);
1844         ret = drm_gem_create_mmap_offset(&obj->base);
1845         if (ret != -ENOSPC)
1846                 goto out;
1847
1848         i915_gem_shrink_all(dev_priv);
1849         ret = drm_gem_create_mmap_offset(&obj->base);
1850 out:
1851         dev_priv->mm.shrinker_no_lock_stealing = false;
1852
1853         return ret;
1854 }
1855
1856 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1857 {
1858         drm_gem_free_mmap_offset(&obj->base);
1859 }
1860
1861 int
1862 i915_gem_mmap_gtt(struct drm_file *file,
1863                   struct drm_device *dev,
1864                   uint32_t handle,
1865                   uint64_t *offset)
1866 {
1867         struct drm_i915_private *dev_priv = dev->dev_private;
1868         struct drm_i915_gem_object *obj;
1869         int ret;
1870
1871         ret = i915_mutex_lock_interruptible(dev);
1872         if (ret)
1873                 return ret;
1874
1875         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1876         if (&obj->base == NULL) {
1877                 ret = -ENOENT;
1878                 goto unlock;
1879         }
1880
1881         if (obj->base.size > dev_priv->gtt.mappable_end) {
1882                 ret = -E2BIG;
1883                 goto out;
1884         }
1885
1886         if (obj->madv != I915_MADV_WILLNEED) {
1887                 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1888                 ret = -EFAULT;
1889                 goto out;
1890         }
1891
1892         ret = i915_gem_object_create_mmap_offset(obj);
1893         if (ret)
1894                 goto out;
1895
1896         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1897
1898 out:
1899         drm_gem_object_unreference(&obj->base);
1900 unlock:
1901         mutex_unlock(&dev->struct_mutex);
1902         return ret;
1903 }
1904
1905 /**
1906  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1907  * @dev: DRM device
1908  * @data: GTT mapping ioctl data
1909  * @file: GEM object info
1910  *
1911  * Simply returns the fake offset to userspace so it can mmap it.
1912  * The mmap call will end up in drm_gem_mmap(), which will set things
1913  * up so we can get faults in the handler above.
1914  *
1915  * The fault handler will take care of binding the object into the GTT
1916  * (since it may have been evicted to make room for something), allocating
1917  * a fence register, and mapping the appropriate aperture address into
1918  * userspace.
1919  */
1920 int
1921 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1922                         struct drm_file *file)
1923 {
1924         struct drm_i915_gem_mmap_gtt *args = data;
1925
1926         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1927 }
1928
1929 /* Immediately discard the backing storage */
1930 static void
1931 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1932 {
1933         i915_gem_object_free_mmap_offset(obj);
1934
1935         if (obj->base.filp == NULL)
1936                 return;
1937
1938         /* Our goal here is to return as much of the memory as
1939          * is possible back to the system as we are called from OOM.
1940          * To do this we must instruct the shmfs to drop all of its
1941          * backing pages, *now*.
1942          */
1943         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
1944         obj->madv = __I915_MADV_PURGED;
1945 }
1946
1947 /* Try to discard unwanted pages */
1948 static void
1949 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
1950 {
1951         struct address_space *mapping;
1952
1953         switch (obj->madv) {
1954         case I915_MADV_DONTNEED:
1955                 i915_gem_object_truncate(obj);
1956         case __I915_MADV_PURGED:
1957                 return;
1958         }
1959
1960         if (obj->base.filp == NULL)
1961                 return;
1962
1963         mapping = file_inode(obj->base.filp)->i_mapping,
1964         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
1965 }
1966
1967 static void
1968 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1969 {
1970         struct sg_page_iter sg_iter;
1971         int ret;
1972
1973         BUG_ON(obj->madv == __I915_MADV_PURGED);
1974
1975         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1976         if (ret) {
1977                 /* In the event of a disaster, abandon all caches and
1978                  * hope for the best.
1979                  */
1980                 WARN_ON(ret != -EIO);
1981                 i915_gem_clflush_object(obj, true);
1982                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1983         }
1984
1985         if (i915_gem_object_needs_bit17_swizzle(obj))
1986                 i915_gem_object_save_bit_17_swizzle(obj);
1987
1988         if (obj->madv == I915_MADV_DONTNEED)
1989                 obj->dirty = 0;
1990
1991         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1992                 struct page *page = sg_page_iter_page(&sg_iter);
1993
1994                 if (obj->dirty)
1995                         set_page_dirty(page);
1996
1997                 if (obj->madv == I915_MADV_WILLNEED)
1998                         mark_page_accessed(page);
1999
2000                 page_cache_release(page);
2001         }
2002         obj->dirty = 0;
2003
2004         sg_free_table(obj->pages);
2005         kfree(obj->pages);
2006 }
2007
2008 int
2009 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2010 {
2011         const struct drm_i915_gem_object_ops *ops = obj->ops;
2012
2013         if (obj->pages == NULL)
2014                 return 0;
2015
2016         if (obj->pages_pin_count)
2017                 return -EBUSY;
2018
2019         BUG_ON(i915_gem_obj_bound_any(obj));
2020
2021         /* ->put_pages might need to allocate memory for the bit17 swizzle
2022          * array, hence protect them from being reaped by removing them from gtt
2023          * lists early. */
2024         list_del(&obj->global_list);
2025
2026         ops->put_pages(obj);
2027         obj->pages = NULL;
2028
2029         i915_gem_object_invalidate(obj);
2030
2031         return 0;
2032 }
2033
2034 static int
2035 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2036 {
2037         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2038         int page_count, i;
2039         struct address_space *mapping;
2040         struct sg_table *st;
2041         struct scatterlist *sg;
2042         struct sg_page_iter sg_iter;
2043         struct page *page;
2044         unsigned long last_pfn = 0;     /* suppress gcc warning */
2045         gfp_t gfp;
2046
2047         /* Assert that the object is not currently in any GPU domain. As it
2048          * wasn't in the GTT, there shouldn't be any way it could have been in
2049          * a GPU cache
2050          */
2051         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2052         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2053
2054         st = kmalloc(sizeof(*st), GFP_KERNEL);
2055         if (st == NULL)
2056                 return -ENOMEM;
2057
2058         page_count = obj->base.size / PAGE_SIZE;
2059         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2060                 kfree(st);
2061                 return -ENOMEM;
2062         }
2063
2064         /* Get the list of pages out of our struct file.  They'll be pinned
2065          * at this point until we release them.
2066          *
2067          * Fail silently without starting the shrinker
2068          */
2069         mapping = file_inode(obj->base.filp)->i_mapping;
2070         gfp = mapping_gfp_mask(mapping);
2071         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
2072         gfp &= ~(__GFP_IO | __GFP_WAIT);
2073         sg = st->sgl;
2074         st->nents = 0;
2075         for (i = 0; i < page_count; i++) {
2076                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2077                 if (IS_ERR(page)) {
2078                         i915_gem_shrink(dev_priv,
2079                                         page_count,
2080                                         I915_SHRINK_BOUND |
2081                                         I915_SHRINK_UNBOUND |
2082                                         I915_SHRINK_PURGEABLE);
2083                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2084                 }
2085                 if (IS_ERR(page)) {
2086                         /* We've tried hard to allocate the memory by reaping
2087                          * our own buffer, now let the real VM do its job and
2088                          * go down in flames if truly OOM.
2089                          */
2090                         i915_gem_shrink_all(dev_priv);
2091                         page = shmem_read_mapping_page(mapping, i);
2092                         if (IS_ERR(page))
2093                                 goto err_pages;
2094                 }
2095 #ifdef CONFIG_SWIOTLB
2096                 if (swiotlb_nr_tbl()) {
2097                         st->nents++;
2098                         sg_set_page(sg, page, PAGE_SIZE, 0);
2099                         sg = sg_next(sg);
2100                         continue;
2101                 }
2102 #endif
2103                 if (!i || page_to_pfn(page) != last_pfn + 1) {
2104                         if (i)
2105                                 sg = sg_next(sg);
2106                         st->nents++;
2107                         sg_set_page(sg, page, PAGE_SIZE, 0);
2108                 } else {
2109                         sg->length += PAGE_SIZE;
2110                 }
2111                 last_pfn = page_to_pfn(page);
2112
2113                 /* Check that the i965g/gm workaround works. */
2114                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2115         }
2116 #ifdef CONFIG_SWIOTLB
2117         if (!swiotlb_nr_tbl())
2118 #endif
2119                 sg_mark_end(sg);
2120         obj->pages = st;
2121
2122         if (i915_gem_object_needs_bit17_swizzle(obj))
2123                 i915_gem_object_do_bit_17_swizzle(obj);
2124
2125         if (obj->tiling_mode != I915_TILING_NONE &&
2126             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2127                 i915_gem_object_pin_pages(obj);
2128
2129         return 0;
2130
2131 err_pages:
2132         sg_mark_end(sg);
2133         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2134                 page_cache_release(sg_page_iter_page(&sg_iter));
2135         sg_free_table(st);
2136         kfree(st);
2137
2138         /* shmemfs first checks if there is enough memory to allocate the page
2139          * and reports ENOSPC should there be insufficient, along with the usual
2140          * ENOMEM for a genuine allocation failure.
2141          *
2142          * We use ENOSPC in our driver to mean that we have run out of aperture
2143          * space and so want to translate the error from shmemfs back to our
2144          * usual understanding of ENOMEM.
2145          */
2146         if (PTR_ERR(page) == -ENOSPC)
2147                 return -ENOMEM;
2148         else
2149                 return PTR_ERR(page);
2150 }
2151
2152 /* Ensure that the associated pages are gathered from the backing storage
2153  * and pinned into our object. i915_gem_object_get_pages() may be called
2154  * multiple times before they are released by a single call to
2155  * i915_gem_object_put_pages() - once the pages are no longer referenced
2156  * either as a result of memory pressure (reaping pages under the shrinker)
2157  * or as the object is itself released.
2158  */
2159 int
2160 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2161 {
2162         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2163         const struct drm_i915_gem_object_ops *ops = obj->ops;
2164         int ret;
2165
2166         if (obj->pages)
2167                 return 0;
2168
2169         if (obj->madv != I915_MADV_WILLNEED) {
2170                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2171                 return -EFAULT;
2172         }
2173
2174         BUG_ON(obj->pages_pin_count);
2175
2176         ret = ops->get_pages(obj);
2177         if (ret)
2178                 return ret;
2179
2180         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2181         return 0;
2182 }
2183
2184 static void
2185 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2186                                struct intel_engine_cs *ring)
2187 {
2188         struct drm_i915_gem_request *req;
2189         struct intel_engine_cs *old_ring;
2190
2191         BUG_ON(ring == NULL);
2192
2193         req = intel_ring_get_request(ring);
2194         old_ring = i915_gem_request_get_ring(obj->last_read_req);
2195
2196         if (old_ring != ring && obj->last_write_req) {
2197                 /* Keep the request relative to the current ring */
2198                 i915_gem_request_assign(&obj->last_write_req, req);
2199         }
2200
2201         /* Add a reference if we're newly entering the active list. */
2202         if (!obj->active) {
2203                 drm_gem_object_reference(&obj->base);
2204                 obj->active = 1;
2205         }
2206
2207         list_move_tail(&obj->ring_list, &ring->active_list);
2208
2209         i915_gem_request_assign(&obj->last_read_req, req);
2210 }
2211
2212 void i915_vma_move_to_active(struct i915_vma *vma,
2213                              struct intel_engine_cs *ring)
2214 {
2215         list_move_tail(&vma->mm_list, &vma->vm->active_list);
2216         return i915_gem_object_move_to_active(vma->obj, ring);
2217 }
2218
2219 static void
2220 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2221 {
2222         struct i915_vma *vma;
2223
2224         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2225         BUG_ON(!obj->active);
2226
2227         list_for_each_entry(vma, &obj->vma_list, vma_link) {
2228                 if (!list_empty(&vma->mm_list))
2229                         list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
2230         }
2231
2232         intel_fb_obj_flush(obj, true);
2233
2234         list_del_init(&obj->ring_list);
2235
2236         i915_gem_request_assign(&obj->last_read_req, NULL);
2237         i915_gem_request_assign(&obj->last_write_req, NULL);
2238         obj->base.write_domain = 0;
2239
2240         i915_gem_request_assign(&obj->last_fenced_req, NULL);
2241
2242         obj->active = 0;
2243         drm_gem_object_unreference(&obj->base);
2244
2245         WARN_ON(i915_verify_lists(dev));
2246 }
2247
2248 static void
2249 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2250 {
2251         if (obj->last_read_req == NULL)
2252                 return;
2253
2254         if (i915_gem_request_completed(obj->last_read_req, true))
2255                 i915_gem_object_move_to_inactive(obj);
2256 }
2257
2258 static int
2259 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2260 {
2261         struct drm_i915_private *dev_priv = dev->dev_private;
2262         struct intel_engine_cs *ring;
2263         int ret, i, j;
2264
2265         /* Carefully retire all requests without writing to the rings */
2266         for_each_ring(ring, dev_priv, i) {
2267                 ret = intel_ring_idle(ring);
2268                 if (ret)
2269                         return ret;
2270         }
2271         i915_gem_retire_requests(dev);
2272
2273         /* Finally reset hw state */
2274         for_each_ring(ring, dev_priv, i) {
2275                 intel_ring_init_seqno(ring, seqno);
2276
2277                 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2278                         ring->semaphore.sync_seqno[j] = 0;
2279         }
2280
2281         return 0;
2282 }
2283
2284 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2285 {
2286         struct drm_i915_private *dev_priv = dev->dev_private;
2287         int ret;
2288
2289         if (seqno == 0)
2290                 return -EINVAL;
2291
2292         /* HWS page needs to be set less than what we
2293          * will inject to ring
2294          */
2295         ret = i915_gem_init_seqno(dev, seqno - 1);
2296         if (ret)
2297                 return ret;
2298
2299         /* Carefully set the last_seqno value so that wrap
2300          * detection still works
2301          */
2302         dev_priv->next_seqno = seqno;
2303         dev_priv->last_seqno = seqno - 1;
2304         if (dev_priv->last_seqno == 0)
2305                 dev_priv->last_seqno--;
2306
2307         return 0;
2308 }
2309
2310 int
2311 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2312 {
2313         struct drm_i915_private *dev_priv = dev->dev_private;
2314
2315         /* reserve 0 for non-seqno */
2316         if (dev_priv->next_seqno == 0) {
2317                 int ret = i915_gem_init_seqno(dev, 0);
2318                 if (ret)
2319                         return ret;
2320
2321                 dev_priv->next_seqno = 1;
2322         }
2323
2324         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2325         return 0;
2326 }
2327
2328 int __i915_add_request(struct intel_engine_cs *ring,
2329                        struct drm_file *file,
2330                        struct drm_i915_gem_object *obj)
2331 {
2332         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2333         struct drm_i915_gem_request *request;
2334         struct intel_ringbuffer *ringbuf;
2335         u32 request_start;
2336         int ret;
2337
2338         request = ring->outstanding_lazy_request;
2339         if (WARN_ON(request == NULL))
2340                 return -ENOMEM;
2341
2342         if (i915.enable_execlists) {
2343                 ringbuf = request->ctx->engine[ring->id].ringbuf;
2344         } else
2345                 ringbuf = ring->buffer;
2346
2347         request_start = intel_ring_get_tail(ringbuf);
2348         /*
2349          * Emit any outstanding flushes - execbuf can fail to emit the flush
2350          * after having emitted the batchbuffer command. Hence we need to fix
2351          * things up similar to emitting the lazy request. The difference here
2352          * is that the flush _must_ happen before the next request, no matter
2353          * what.
2354          */
2355         if (i915.enable_execlists) {
2356                 ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
2357                 if (ret)
2358                         return ret;
2359         } else {
2360                 ret = intel_ring_flush_all_caches(ring);
2361                 if (ret)
2362                         return ret;
2363         }
2364
2365         /* Record the position of the start of the request so that
2366          * should we detect the updated seqno part-way through the
2367          * GPU processing the request, we never over-estimate the
2368          * position of the head.
2369          */
2370         request->postfix = intel_ring_get_tail(ringbuf);
2371
2372         if (i915.enable_execlists) {
2373                 ret = ring->emit_request(ringbuf, request);
2374                 if (ret)
2375                         return ret;
2376         } else {
2377                 ret = ring->add_request(ring);
2378                 if (ret)
2379                         return ret;
2380
2381                 request->tail = intel_ring_get_tail(ringbuf);
2382         }
2383
2384         request->head = request_start;
2385
2386         /* Whilst this request exists, batch_obj will be on the
2387          * active_list, and so will hold the active reference. Only when this
2388          * request is retired will the the batch_obj be moved onto the
2389          * inactive_list and lose its active reference. Hence we do not need
2390          * to explicitly hold another reference here.
2391          */
2392         request->batch_obj = obj;
2393
2394         if (!i915.enable_execlists) {
2395                 /* Hold a reference to the current context so that we can inspect
2396                  * it later in case a hangcheck error event fires.
2397                  */
2398                 request->ctx = ring->last_context;
2399                 if (request->ctx)
2400                         i915_gem_context_reference(request->ctx);
2401         }
2402
2403         request->emitted_jiffies = jiffies;
2404         ring->last_submitted_seqno = request->seqno;
2405         list_add_tail(&request->list, &ring->request_list);
2406         request->file_priv = NULL;
2407
2408         if (file) {
2409                 struct drm_i915_file_private *file_priv = file->driver_priv;
2410
2411                 spin_lock(&file_priv->mm.lock);
2412                 request->file_priv = file_priv;
2413                 list_add_tail(&request->client_list,
2414                               &file_priv->mm.request_list);
2415                 spin_unlock(&file_priv->mm.lock);
2416
2417                 request->pid = get_pid(task_pid(current));
2418         }
2419
2420         trace_i915_gem_request_add(request);
2421         ring->outstanding_lazy_request = NULL;
2422
2423         i915_queue_hangcheck(ring->dev);
2424
2425         cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2426         queue_delayed_work(dev_priv->wq,
2427                            &dev_priv->mm.retire_work,
2428                            round_jiffies_up_relative(HZ));
2429         intel_mark_busy(dev_priv->dev);
2430
2431         return 0;
2432 }
2433
2434 static inline void
2435 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2436 {
2437         struct drm_i915_file_private *file_priv = request->file_priv;
2438
2439         if (!file_priv)
2440                 return;
2441
2442         spin_lock(&file_priv->mm.lock);
2443         list_del(&request->client_list);
2444         request->file_priv = NULL;
2445         spin_unlock(&file_priv->mm.lock);
2446 }
2447
2448 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2449                                    const struct intel_context *ctx)
2450 {
2451         unsigned long elapsed;
2452
2453         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2454
2455         if (ctx->hang_stats.banned)
2456                 return true;
2457
2458         if (ctx->hang_stats.ban_period_seconds &&
2459             elapsed <= ctx->hang_stats.ban_period_seconds) {
2460                 if (!i915_gem_context_is_default(ctx)) {
2461                         DRM_DEBUG("context hanging too fast, banning!\n");
2462                         return true;
2463                 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2464                         if (i915_stop_ring_allow_warn(dev_priv))
2465                                 DRM_ERROR("gpu hanging too fast, banning!\n");
2466                         return true;
2467                 }
2468         }
2469
2470         return false;
2471 }
2472
2473 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2474                                   struct intel_context *ctx,
2475                                   const bool guilty)
2476 {
2477         struct i915_ctx_hang_stats *hs;
2478
2479         if (WARN_ON(!ctx))
2480                 return;
2481
2482         hs = &ctx->hang_stats;
2483
2484         if (guilty) {
2485                 hs->banned = i915_context_is_banned(dev_priv, ctx);
2486                 hs->batch_active++;
2487                 hs->guilty_ts = get_seconds();
2488         } else {
2489                 hs->batch_pending++;
2490         }
2491 }
2492
2493 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2494 {
2495         list_del(&request->list);
2496         i915_gem_request_remove_from_client(request);
2497
2498         put_pid(request->pid);
2499
2500         i915_gem_request_unreference(request);
2501 }
2502
2503 void i915_gem_request_free(struct kref *req_ref)
2504 {
2505         struct drm_i915_gem_request *req = container_of(req_ref,
2506                                                  typeof(*req), ref);
2507         struct intel_context *ctx = req->ctx;
2508
2509         if (ctx) {
2510                 if (i915.enable_execlists) {
2511                         struct intel_engine_cs *ring = req->ring;
2512
2513                         if (ctx != ring->default_context)
2514                                 intel_lr_context_unpin(ring, ctx);
2515                 }
2516
2517                 i915_gem_context_unreference(ctx);
2518         }
2519
2520         kfree(req);
2521 }
2522
2523 struct drm_i915_gem_request *
2524 i915_gem_find_active_request(struct intel_engine_cs *ring)
2525 {
2526         struct drm_i915_gem_request *request;
2527
2528         list_for_each_entry(request, &ring->request_list, list) {
2529                 if (i915_gem_request_completed(request, false))
2530                         continue;
2531
2532                 return request;
2533         }
2534
2535         return NULL;
2536 }
2537
2538 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2539                                        struct intel_engine_cs *ring)
2540 {
2541         struct drm_i915_gem_request *request;
2542         bool ring_hung;
2543
2544         request = i915_gem_find_active_request(ring);
2545
2546         if (request == NULL)
2547                 return;
2548
2549         ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2550
2551         i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2552
2553         list_for_each_entry_continue(request, &ring->request_list, list)
2554                 i915_set_reset_status(dev_priv, request->ctx, false);
2555 }
2556
2557 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2558                                         struct intel_engine_cs *ring)
2559 {
2560         while (!list_empty(&ring->active_list)) {
2561                 struct drm_i915_gem_object *obj;
2562
2563                 obj = list_first_entry(&ring->active_list,
2564                                        struct drm_i915_gem_object,
2565                                        ring_list);
2566
2567                 i915_gem_object_move_to_inactive(obj);
2568         }
2569
2570         /*
2571          * Clear the execlists queue up before freeing the requests, as those
2572          * are the ones that keep the context and ringbuffer backing objects
2573          * pinned in place.
2574          */
2575         while (!list_empty(&ring->execlist_queue)) {
2576                 struct drm_i915_gem_request *submit_req;
2577
2578                 submit_req = list_first_entry(&ring->execlist_queue,
2579                                 struct drm_i915_gem_request,
2580                                 execlist_link);
2581                 list_del(&submit_req->execlist_link);
2582                 intel_runtime_pm_put(dev_priv);
2583
2584                 if (submit_req->ctx != ring->default_context)
2585                         intel_lr_context_unpin(ring, submit_req->ctx);
2586
2587                 i915_gem_request_unreference(submit_req);
2588         }
2589
2590         /*
2591          * We must free the requests after all the corresponding objects have
2592          * been moved off active lists. Which is the same order as the normal
2593          * retire_requests function does. This is important if object hold
2594          * implicit references on things like e.g. ppgtt address spaces through
2595          * the request.
2596          */
2597         while (!list_empty(&ring->request_list)) {
2598                 struct drm_i915_gem_request *request;
2599
2600                 request = list_first_entry(&ring->request_list,
2601                                            struct drm_i915_gem_request,
2602                                            list);
2603
2604                 i915_gem_free_request(request);
2605         }
2606
2607         /* This may not have been flushed before the reset, so clean it now */
2608         i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
2609 }
2610
2611 void i915_gem_restore_fences(struct drm_device *dev)
2612 {
2613         struct drm_i915_private *dev_priv = dev->dev_private;
2614         int i;
2615
2616         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2617                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2618
2619                 /*
2620                  * Commit delayed tiling changes if we have an object still
2621                  * attached to the fence, otherwise just clear the fence.
2622                  */
2623                 if (reg->obj) {
2624                         i915_gem_object_update_fence(reg->obj, reg,
2625                                                      reg->obj->tiling_mode);
2626                 } else {
2627                         i915_gem_write_fence(dev, i, NULL);
2628                 }
2629         }
2630 }
2631
2632 void i915_gem_reset(struct drm_device *dev)
2633 {
2634         struct drm_i915_private *dev_priv = dev->dev_private;
2635         struct intel_engine_cs *ring;
2636         int i;
2637
2638         /*
2639          * Before we free the objects from the requests, we need to inspect
2640          * them for finding the guilty party. As the requests only borrow
2641          * their reference to the objects, the inspection must be done first.
2642          */
2643         for_each_ring(ring, dev_priv, i)
2644                 i915_gem_reset_ring_status(dev_priv, ring);
2645
2646         for_each_ring(ring, dev_priv, i)
2647                 i915_gem_reset_ring_cleanup(dev_priv, ring);
2648
2649         i915_gem_context_reset(dev);
2650
2651         i915_gem_restore_fences(dev);
2652 }
2653
2654 /**
2655  * This function clears the request list as sequence numbers are passed.
2656  */
2657 void
2658 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2659 {
2660         if (list_empty(&ring->request_list))
2661                 return;
2662
2663         WARN_ON(i915_verify_lists(ring->dev));
2664
2665         /* Retire requests first as we use it above for the early return.
2666          * If we retire requests last, we may use a later seqno and so clear
2667          * the requests lists without clearing the active list, leading to
2668          * confusion.
2669          */
2670         while (!list_empty(&ring->request_list)) {
2671                 struct drm_i915_gem_request *request;
2672
2673                 request = list_first_entry(&ring->request_list,
2674                                            struct drm_i915_gem_request,
2675                                            list);
2676
2677                 if (!i915_gem_request_completed(request, true))
2678                         break;
2679
2680                 trace_i915_gem_request_retire(request);
2681
2682                 /* We know the GPU must have read the request to have
2683                  * sent us the seqno + interrupt, so use the position
2684                  * of tail of the request to update the last known position
2685                  * of the GPU head.
2686                  */
2687                 request->ringbuf->last_retired_head = request->postfix;
2688
2689                 i915_gem_free_request(request);
2690         }
2691
2692         /* Move any buffers on the active list that are no longer referenced
2693          * by the ringbuffer to the flushing/inactive lists as appropriate,
2694          * before we free the context associated with the requests.
2695          */
2696         while (!list_empty(&ring->active_list)) {
2697                 struct drm_i915_gem_object *obj;
2698
2699                 obj = list_first_entry(&ring->active_list,
2700                                       struct drm_i915_gem_object,
2701                                       ring_list);
2702
2703                 if (!i915_gem_request_completed(obj->last_read_req, true))
2704                         break;
2705
2706                 i915_gem_object_move_to_inactive(obj);
2707         }
2708
2709         if (unlikely(ring->trace_irq_req &&
2710                      i915_gem_request_completed(ring->trace_irq_req, true))) {
2711                 ring->irq_put(ring);
2712                 i915_gem_request_assign(&ring->trace_irq_req, NULL);
2713         }
2714
2715         WARN_ON(i915_verify_lists(ring->dev));
2716 }
2717
2718 bool
2719 i915_gem_retire_requests(struct drm_device *dev)
2720 {
2721         struct drm_i915_private *dev_priv = dev->dev_private;
2722         struct intel_engine_cs *ring;
2723         bool idle = true;
2724         int i;
2725
2726         for_each_ring(ring, dev_priv, i) {
2727                 i915_gem_retire_requests_ring(ring);
2728                 idle &= list_empty(&ring->request_list);
2729                 if (i915.enable_execlists) {
2730                         unsigned long flags;
2731
2732                         spin_lock_irqsave(&ring->execlist_lock, flags);
2733                         idle &= list_empty(&ring->execlist_queue);
2734                         spin_unlock_irqrestore(&ring->execlist_lock, flags);
2735
2736                         intel_execlists_retire_requests(ring);
2737                 }
2738         }
2739
2740         if (idle)
2741                 mod_delayed_work(dev_priv->wq,
2742                                    &dev_priv->mm.idle_work,
2743                                    msecs_to_jiffies(100));
2744
2745         return idle;
2746 }
2747
2748 static void
2749 i915_gem_retire_work_handler(struct work_struct *work)
2750 {
2751         struct drm_i915_private *dev_priv =
2752                 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2753         struct drm_device *dev = dev_priv->dev;
2754         bool idle;
2755
2756         /* Come back later if the device is busy... */
2757         idle = false;
2758         if (mutex_trylock(&dev->struct_mutex)) {
2759                 idle = i915_gem_retire_requests(dev);
2760                 mutex_unlock(&dev->struct_mutex);
2761         }
2762         if (!idle)
2763                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2764                                    round_jiffies_up_relative(HZ));
2765 }
2766
2767 static void
2768 i915_gem_idle_work_handler(struct work_struct *work)
2769 {
2770         struct drm_i915_private *dev_priv =
2771                 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2772
2773         intel_mark_idle(dev_priv->dev);
2774 }
2775
2776 /**
2777  * Ensures that an object will eventually get non-busy by flushing any required
2778  * write domains, emitting any outstanding lazy request and retiring and
2779  * completed requests.
2780  */
2781 static int
2782 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2783 {
2784         struct intel_engine_cs *ring;
2785         int ret;
2786
2787         if (obj->active) {
2788                 ring = i915_gem_request_get_ring(obj->last_read_req);
2789
2790                 ret = i915_gem_check_olr(obj->last_read_req);
2791                 if (ret)
2792                         return ret;
2793
2794                 i915_gem_retire_requests_ring(ring);
2795         }
2796
2797         return 0;
2798 }
2799
2800 /**
2801  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2802  * @DRM_IOCTL_ARGS: standard ioctl arguments
2803  *
2804  * Returns 0 if successful, else an error is returned with the remaining time in
2805  * the timeout parameter.
2806  *  -ETIME: object is still busy after timeout
2807  *  -ERESTARTSYS: signal interrupted the wait
2808  *  -ENONENT: object doesn't exist
2809  * Also possible, but rare:
2810  *  -EAGAIN: GPU wedged
2811  *  -ENOMEM: damn
2812  *  -ENODEV: Internal IRQ fail
2813  *  -E?: The add request failed
2814  *
2815  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2816  * non-zero timeout parameter the wait ioctl will wait for the given number of
2817  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2818  * without holding struct_mutex the object may become re-busied before this
2819  * function completes. A similar but shorter * race condition exists in the busy
2820  * ioctl
2821  */
2822 int
2823 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2824 {
2825         struct drm_i915_private *dev_priv = dev->dev_private;
2826         struct drm_i915_gem_wait *args = data;
2827         struct drm_i915_gem_object *obj;
2828         struct drm_i915_gem_request *req;
2829         unsigned reset_counter;
2830         int ret = 0;
2831
2832         if (args->flags != 0)
2833                 return -EINVAL;
2834
2835         ret = i915_mutex_lock_interruptible(dev);
2836         if (ret)
2837                 return ret;
2838
2839         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2840         if (&obj->base == NULL) {
2841                 mutex_unlock(&dev->struct_mutex);
2842                 return -ENOENT;
2843         }
2844
2845         /* Need to make sure the object gets inactive eventually. */
2846         ret = i915_gem_object_flush_active(obj);
2847         if (ret)
2848                 goto out;
2849
2850         if (!obj->active || !obj->last_read_req)
2851                 goto out;
2852
2853         req = obj->last_read_req;
2854
2855         /* Do this after OLR check to make sure we make forward progress polling
2856          * on this IOCTL with a timeout == 0 (like busy ioctl)
2857          */
2858         if (args->timeout_ns == 0) {
2859                 ret = -ETIME;
2860                 goto out;
2861         }
2862
2863         drm_gem_object_unreference(&obj->base);
2864         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2865         i915_gem_request_reference(req);
2866         mutex_unlock(&dev->struct_mutex);
2867
2868         ret = __i915_wait_request(req, reset_counter, true,
2869                                   args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2870                                   file->driver_priv);
2871         mutex_lock(&dev->struct_mutex);
2872         i915_gem_request_unreference(req);
2873         mutex_unlock(&dev->struct_mutex);
2874         return ret;
2875
2876 out:
2877         drm_gem_object_unreference(&obj->base);
2878         mutex_unlock(&dev->struct_mutex);
2879         return ret;
2880 }
2881
2882 /**
2883  * i915_gem_object_sync - sync an object to a ring.
2884  *
2885  * @obj: object which may be in use on another ring.
2886  * @to: ring we wish to use the object on. May be NULL.
2887  *
2888  * This code is meant to abstract object synchronization with the GPU.
2889  * Calling with NULL implies synchronizing the object with the CPU
2890  * rather than a particular GPU ring.
2891  *
2892  * Returns 0 if successful, else propagates up the lower layer error.
2893  */
2894 int
2895 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2896                      struct intel_engine_cs *to)
2897 {
2898         struct intel_engine_cs *from;
2899         u32 seqno;
2900         int ret, idx;
2901
2902         from = i915_gem_request_get_ring(obj->last_read_req);
2903
2904         if (from == NULL || to == from)
2905                 return 0;
2906
2907         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2908                 return i915_gem_object_wait_rendering(obj, false);
2909
2910         idx = intel_ring_sync_index(from, to);
2911
2912         seqno = i915_gem_request_get_seqno(obj->last_read_req);
2913         /* Optimization: Avoid semaphore sync when we are sure we already
2914          * waited for an object with higher seqno */
2915         if (seqno <= from->semaphore.sync_seqno[idx])
2916                 return 0;
2917
2918         ret = i915_gem_check_olr(obj->last_read_req);
2919         if (ret)
2920                 return ret;
2921
2922         trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
2923         ret = to->semaphore.sync_to(to, from, seqno);
2924         if (!ret)
2925                 /* We use last_read_req because sync_to()
2926                  * might have just caused seqno wrap under
2927                  * the radar.
2928                  */
2929                 from->semaphore.sync_seqno[idx] =
2930                                 i915_gem_request_get_seqno(obj->last_read_req);
2931
2932         return ret;
2933 }
2934
2935 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2936 {
2937         u32 old_write_domain, old_read_domains;
2938
2939         /* Force a pagefault for domain tracking on next user access */
2940         i915_gem_release_mmap(obj);
2941
2942         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2943                 return;
2944
2945         /* Wait for any direct GTT access to complete */
2946         mb();
2947
2948         old_read_domains = obj->base.read_domains;
2949         old_write_domain = obj->base.write_domain;
2950
2951         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2952         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2953
2954         trace_i915_gem_object_change_domain(obj,
2955                                             old_read_domains,
2956                                             old_write_domain);
2957 }
2958
2959 int i915_vma_unbind(struct i915_vma *vma)
2960 {
2961         struct drm_i915_gem_object *obj = vma->obj;
2962         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2963         int ret;
2964
2965         if (list_empty(&vma->vma_link))
2966                 return 0;
2967
2968         if (!drm_mm_node_allocated(&vma->node)) {
2969                 i915_gem_vma_destroy(vma);
2970                 return 0;
2971         }
2972
2973         if (vma->pin_count)
2974                 return -EBUSY;
2975
2976         BUG_ON(obj->pages == NULL);
2977
2978         ret = i915_gem_object_finish_gpu(obj);
2979         if (ret)
2980                 return ret;
2981         /* Continue on if we fail due to EIO, the GPU is hung so we
2982          * should be safe and we need to cleanup or else we might
2983          * cause memory corruption through use-after-free.
2984          */
2985
2986         if (i915_is_ggtt(vma->vm) &&
2987             vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2988                 i915_gem_object_finish_gtt(obj);
2989
2990                 /* release the fence reg _after_ flushing */
2991                 ret = i915_gem_object_put_fence(obj);
2992                 if (ret)
2993                         return ret;
2994         }
2995
2996         trace_i915_vma_unbind(vma);
2997
2998         vma->unbind_vma(vma);
2999
3000         list_del_init(&vma->mm_list);
3001         if (i915_is_ggtt(vma->vm)) {
3002                 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3003                         obj->map_and_fenceable = false;
3004                 } else if (vma->ggtt_view.pages) {
3005                         sg_free_table(vma->ggtt_view.pages);
3006                         kfree(vma->ggtt_view.pages);
3007                 }
3008                 vma->ggtt_view.pages = NULL;
3009         }
3010
3011         drm_mm_remove_node(&vma->node);
3012         i915_gem_vma_destroy(vma);
3013
3014         /* Since the unbound list is global, only move to that list if
3015          * no more VMAs exist. */
3016         if (list_empty(&obj->vma_list)) {
3017                 /* Throw away the active reference before
3018                  * moving to the unbound list. */
3019                 i915_gem_object_retire(obj);
3020
3021                 i915_gem_gtt_finish_object(obj);
3022                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3023         }
3024
3025         /* And finally now the object is completely decoupled from this vma,
3026          * we can drop its hold on the backing storage and allow it to be
3027          * reaped by the shrinker.
3028          */
3029         i915_gem_object_unpin_pages(obj);
3030
3031         return 0;
3032 }
3033
3034 int i915_gpu_idle(struct drm_device *dev)
3035 {
3036         struct drm_i915_private *dev_priv = dev->dev_private;
3037         struct intel_engine_cs *ring;
3038         int ret, i;
3039
3040         /* Flush everything onto the inactive list. */
3041         for_each_ring(ring, dev_priv, i) {
3042                 if (!i915.enable_execlists) {
3043                         ret = i915_switch_context(ring, ring->default_context);
3044                         if (ret)
3045                                 return ret;
3046                 }
3047
3048                 ret = intel_ring_idle(ring);
3049                 if (ret)
3050                         return ret;
3051         }
3052
3053         return 0;
3054 }
3055
3056 static void i965_write_fence_reg(struct drm_device *dev, int reg,
3057                                  struct drm_i915_gem_object *obj)
3058 {
3059         struct drm_i915_private *dev_priv = dev->dev_private;
3060         int fence_reg;
3061         int fence_pitch_shift;
3062
3063         if (INTEL_INFO(dev)->gen >= 6) {
3064                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
3065                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
3066         } else {
3067                 fence_reg = FENCE_REG_965_0;
3068                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
3069         }
3070
3071         fence_reg += reg * 8;
3072
3073         /* To w/a incoherency with non-atomic 64-bit register updates,
3074          * we split the 64-bit update into two 32-bit writes. In order
3075          * for a partial fence not to be evaluated between writes, we
3076          * precede the update with write to turn off the fence register,
3077          * and only enable the fence as the last step.
3078          *
3079          * For extra levels of paranoia, we make sure each step lands
3080          * before applying the next step.
3081          */
3082         I915_WRITE(fence_reg, 0);
3083         POSTING_READ(fence_reg);
3084
3085         if (obj) {
3086                 u32 size = i915_gem_obj_ggtt_size(obj);
3087                 uint64_t val;
3088
3089                 /* Adjust fence size to match tiled area */
3090                 if (obj->tiling_mode != I915_TILING_NONE) {
3091                         uint32_t row_size = obj->stride *
3092                                 (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
3093                         size = (size / row_size) * row_size;
3094                 }
3095
3096                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3097                                  0xfffff000) << 32;
3098                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3099                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3100                 if (obj->tiling_mode == I915_TILING_Y)
3101                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3102                 val |= I965_FENCE_REG_VALID;
3103
3104                 I915_WRITE(fence_reg + 4, val >> 32);
3105                 POSTING_READ(fence_reg + 4);
3106
3107                 I915_WRITE(fence_reg + 0, val);
3108                 POSTING_READ(fence_reg);
3109         } else {
3110                 I915_WRITE(fence_reg + 4, 0);
3111                 POSTING_READ(fence_reg + 4);
3112         }
3113 }
3114
3115 static void i915_write_fence_reg(struct drm_device *dev, int reg,
3116                                  struct drm_i915_gem_object *obj)
3117 {
3118         struct drm_i915_private *dev_priv = dev->dev_private;
3119         u32 val;
3120
3121         if (obj) {
3122                 u32 size = i915_gem_obj_ggtt_size(obj);
3123                 int pitch_val;
3124                 int tile_width;
3125
3126                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3127                      (size & -size) != size ||
3128                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3129                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3130                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
3131
3132                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3133                         tile_width = 128;
3134                 else
3135                         tile_width = 512;
3136
3137                 /* Note: pitch better be a power of two tile widths */
3138                 pitch_val = obj->stride / tile_width;
3139                 pitch_val = ffs(pitch_val) - 1;
3140
3141                 val = i915_gem_obj_ggtt_offset(obj);
3142                 if (obj->tiling_mode == I915_TILING_Y)
3143                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3144                 val |= I915_FENCE_SIZE_BITS(size);
3145                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3146                 val |= I830_FENCE_REG_VALID;
3147         } else
3148                 val = 0;
3149
3150         if (reg < 8)
3151                 reg = FENCE_REG_830_0 + reg * 4;
3152         else
3153                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3154
3155         I915_WRITE(reg, val);
3156         POSTING_READ(reg);
3157 }
3158
3159 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3160                                 struct drm_i915_gem_object *obj)
3161 {
3162         struct drm_i915_private *dev_priv = dev->dev_private;
3163         uint32_t val;
3164
3165         if (obj) {
3166                 u32 size = i915_gem_obj_ggtt_size(obj);
3167                 uint32_t pitch_val;
3168
3169                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3170                      (size & -size) != size ||
3171                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3172                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3173                      i915_gem_obj_ggtt_offset(obj), size);
3174
3175                 pitch_val = obj->stride / 128;
3176                 pitch_val = ffs(pitch_val) - 1;
3177
3178                 val = i915_gem_obj_ggtt_offset(obj);
3179                 if (obj->tiling_mode == I915_TILING_Y)
3180                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3181                 val |= I830_FENCE_SIZE_BITS(size);
3182                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3183                 val |= I830_FENCE_REG_VALID;
3184         } else
3185                 val = 0;
3186
3187         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3188         POSTING_READ(FENCE_REG_830_0 + reg * 4);
3189 }
3190
3191 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3192 {
3193         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3194 }
3195
3196 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3197                                  struct drm_i915_gem_object *obj)
3198 {
3199         struct drm_i915_private *dev_priv = dev->dev_private;
3200
3201         /* Ensure that all CPU reads are completed before installing a fence
3202          * and all writes before removing the fence.
3203          */
3204         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3205                 mb();
3206
3207         WARN(obj && (!obj->stride || !obj->tiling_mode),
3208              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3209              obj->stride, obj->tiling_mode);
3210
3211         if (IS_GEN2(dev))
3212                 i830_write_fence_reg(dev, reg, obj);
3213         else if (IS_GEN3(dev))
3214                 i915_write_fence_reg(dev, reg, obj);
3215         else if (INTEL_INFO(dev)->gen >= 4)
3216                 i965_write_fence_reg(dev, reg, obj);
3217
3218         /* And similarly be paranoid that no direct access to this region
3219          * is reordered to before the fence is installed.
3220          */
3221         if (i915_gem_object_needs_mb(obj))
3222                 mb();
3223 }
3224
3225 static inline int fence_number(struct drm_i915_private *dev_priv,
3226                                struct drm_i915_fence_reg *fence)
3227 {
3228         return fence - dev_priv->fence_regs;
3229 }
3230
3231 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3232                                          struct drm_i915_fence_reg *fence,
3233                                          bool enable)
3234 {
3235         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3236         int reg = fence_number(dev_priv, fence);
3237
3238         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3239
3240         if (enable) {
3241                 obj->fence_reg = reg;
3242                 fence->obj = obj;
3243                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3244         } else {
3245                 obj->fence_reg = I915_FENCE_REG_NONE;
3246                 fence->obj = NULL;
3247                 list_del_init(&fence->lru_list);
3248         }
3249         obj->fence_dirty = false;
3250 }
3251
3252 static int
3253 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3254 {
3255         if (obj->last_fenced_req) {
3256                 int ret = i915_wait_request(obj->last_fenced_req);
3257                 if (ret)
3258                         return ret;
3259
3260                 i915_gem_request_assign(&obj->last_fenced_req, NULL);
3261         }
3262
3263         return 0;
3264 }
3265
3266 int
3267 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3268 {
3269         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3270         struct drm_i915_fence_reg *fence;
3271         int ret;
3272
3273         ret = i915_gem_object_wait_fence(obj);
3274         if (ret)
3275                 return ret;
3276
3277         if (obj->fence_reg == I915_FENCE_REG_NONE)
3278                 return 0;
3279
3280         fence = &dev_priv->fence_regs[obj->fence_reg];
3281
3282         if (WARN_ON(fence->pin_count))
3283                 return -EBUSY;
3284
3285         i915_gem_object_fence_lost(obj);
3286         i915_gem_object_update_fence(obj, fence, false);
3287
3288         return 0;
3289 }
3290
3291 static struct drm_i915_fence_reg *
3292 i915_find_fence_reg(struct drm_device *dev)
3293 {
3294         struct drm_i915_private *dev_priv = dev->dev_private;
3295         struct drm_i915_fence_reg *reg, *avail;
3296         int i;
3297
3298         /* First try to find a free reg */
3299         avail = NULL;
3300         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3301                 reg = &dev_priv->fence_regs[i];
3302                 if (!reg->obj)
3303                         return reg;
3304
3305                 if (!reg->pin_count)
3306                         avail = reg;
3307         }
3308
3309         if (avail == NULL)
3310                 goto deadlock;
3311
3312         /* None available, try to steal one or wait for a user to finish */
3313         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3314                 if (reg->pin_count)
3315                         continue;
3316
3317                 return reg;
3318         }
3319
3320 deadlock:
3321         /* Wait for completion of pending flips which consume fences */
3322         if (intel_has_pending_fb_unpin(dev))
3323                 return ERR_PTR(-EAGAIN);
3324
3325         return ERR_PTR(-EDEADLK);
3326 }
3327
3328 /**
3329  * i915_gem_object_get_fence - set up fencing for an object
3330  * @obj: object to map through a fence reg
3331  *
3332  * When mapping objects through the GTT, userspace wants to be able to write
3333  * to them without having to worry about swizzling if the object is tiled.
3334  * This function walks the fence regs looking for a free one for @obj,
3335  * stealing one if it can't find any.
3336  *
3337  * It then sets up the reg based on the object's properties: address, pitch
3338  * and tiling format.
3339  *
3340  * For an untiled surface, this removes any existing fence.
3341  */
3342 int
3343 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3344 {
3345         struct drm_device *dev = obj->base.dev;
3346         struct drm_i915_private *dev_priv = dev->dev_private;
3347         bool enable = obj->tiling_mode != I915_TILING_NONE;
3348         struct drm_i915_fence_reg *reg;
3349         int ret;
3350
3351         /* Have we updated the tiling parameters upon the object and so
3352          * will need to serialise the write to the associated fence register?
3353          */
3354         if (obj->fence_dirty) {
3355                 ret = i915_gem_object_wait_fence(obj);
3356                 if (ret)
3357                         return ret;
3358         }
3359
3360         /* Just update our place in the LRU if our fence is getting reused. */
3361         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3362                 reg = &dev_priv->fence_regs[obj->fence_reg];
3363                 if (!obj->fence_dirty) {
3364                         list_move_tail(&reg->lru_list,
3365                                        &dev_priv->mm.fence_list);
3366                         return 0;
3367                 }
3368         } else if (enable) {
3369                 if (WARN_ON(!obj->map_and_fenceable))
3370                         return -EINVAL;
3371
3372                 reg = i915_find_fence_reg(dev);
3373                 if (IS_ERR(reg))
3374                         return PTR_ERR(reg);
3375
3376                 if (reg->obj) {
3377                         struct drm_i915_gem_object *old = reg->obj;
3378
3379                         ret = i915_gem_object_wait_fence(old);
3380                         if (ret)
3381                                 return ret;
3382
3383                         i915_gem_object_fence_lost(old);
3384                 }
3385         } else
3386                 return 0;
3387
3388         i915_gem_object_update_fence(obj, reg, enable);
3389
3390         return 0;
3391 }
3392
3393 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3394                                      unsigned long cache_level)
3395 {
3396         struct drm_mm_node *gtt_space = &vma->node;
3397         struct drm_mm_node *other;
3398
3399         /*
3400          * On some machines we have to be careful when putting differing types
3401          * of snoopable memory together to avoid the prefetcher crossing memory
3402          * domains and dying. During vm initialisation, we decide whether or not
3403          * these constraints apply and set the drm_mm.color_adjust
3404          * appropriately.
3405          */
3406         if (vma->vm->mm.color_adjust == NULL)
3407                 return true;
3408
3409         if (!drm_mm_node_allocated(gtt_space))
3410                 return true;
3411
3412         if (list_empty(&gtt_space->node_list))
3413                 return true;
3414
3415         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3416         if (other->allocated && !other->hole_follows && other->color != cache_level)
3417                 return false;
3418
3419         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3420         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3421                 return false;
3422
3423         return true;
3424 }
3425
3426 /**
3427  * Finds free space in the GTT aperture and binds the object there.
3428  */
3429 static struct i915_vma *
3430 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3431                            struct i915_address_space *vm,
3432                            const struct i915_ggtt_view *ggtt_view,
3433                            unsigned alignment,
3434                            uint64_t flags)
3435 {
3436         struct drm_device *dev = obj->base.dev;
3437         struct drm_i915_private *dev_priv = dev->dev_private;
3438         u32 size, fence_size, fence_alignment, unfenced_alignment;
3439         unsigned long start =
3440                 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3441         unsigned long end =
3442                 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3443         struct i915_vma *vma;
3444         int ret;
3445
3446         if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3447                 return ERR_PTR(-EINVAL);
3448
3449         fence_size = i915_gem_get_gtt_size(dev,
3450                                            obj->base.size,
3451                                            obj->tiling_mode);
3452         fence_alignment = i915_gem_get_gtt_alignment(dev,
3453                                                      obj->base.size,
3454                                                      obj->tiling_mode, true);
3455         unfenced_alignment =
3456                 i915_gem_get_gtt_alignment(dev,
3457                                            obj->base.size,
3458                                            obj->tiling_mode, false);
3459
3460         if (alignment == 0)
3461                 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3462                                                 unfenced_alignment;
3463         if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3464                 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3465                 return ERR_PTR(-EINVAL);
3466         }
3467
3468         size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3469
3470         /* If the object is bigger than the entire aperture, reject it early
3471          * before evicting everything in a vain attempt to find space.
3472          */
3473         if (obj->base.size > end) {
3474                 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3475                           obj->base.size,
3476                           flags & PIN_MAPPABLE ? "mappable" : "total",
3477                           end);
3478                 return ERR_PTR(-E2BIG);
3479         }
3480
3481         ret = i915_gem_object_get_pages(obj);
3482         if (ret)
3483                 return ERR_PTR(ret);
3484
3485         i915_gem_object_pin_pages(obj);
3486
3487         vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3488                           i915_gem_obj_lookup_or_create_vma(obj, vm);
3489
3490         if (IS_ERR(vma))
3491                 goto err_unpin;
3492
3493 search_free:
3494         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3495                                                   size, alignment,
3496                                                   obj->cache_level,
3497                                                   start, end,
3498                                                   DRM_MM_SEARCH_DEFAULT,
3499                                                   DRM_MM_CREATE_DEFAULT);
3500         if (ret) {
3501                 ret = i915_gem_evict_something(dev, vm, size, alignment,
3502                                                obj->cache_level,
3503                                                start, end,
3504                                                flags);
3505                 if (ret == 0)
3506                         goto search_free;
3507
3508                 goto err_free_vma;
3509         }
3510         if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3511                 ret = -EINVAL;
3512                 goto err_remove_node;
3513         }
3514
3515         ret = i915_gem_gtt_prepare_object(obj);
3516         if (ret)
3517                 goto err_remove_node;
3518
3519         /*  allocate before insert / bind */
3520         if (vma->vm->allocate_va_range) {
3521                 trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size,
3522                                 VM_TO_TRACE_NAME(vma->vm));
3523                 ret = vma->vm->allocate_va_range(vma->vm,
3524                                                 vma->node.start,
3525                                                 vma->node.size);
3526                 if (ret)
3527                         goto err_remove_node;
3528         }
3529
3530         trace_i915_vma_bind(vma, flags);
3531         ret = i915_vma_bind(vma, obj->cache_level,
3532                             flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
3533         if (ret)
3534                 goto err_finish_gtt;
3535
3536         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3537         list_add_tail(&vma->mm_list, &vm->inactive_list);
3538
3539         return vma;
3540
3541 err_finish_gtt:
3542         i915_gem_gtt_finish_object(obj);
3543 err_remove_node:
3544         drm_mm_remove_node(&vma->node);
3545 err_free_vma:
3546         i915_gem_vma_destroy(vma);
3547         vma = ERR_PTR(ret);
3548 err_unpin:
3549         i915_gem_object_unpin_pages(obj);
3550         return vma;
3551 }
3552
3553 bool
3554 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3555                         bool force)
3556 {
3557         /* If we don't have a page list set up, then we're not pinned
3558          * to GPU, and we can ignore the cache flush because it'll happen
3559          * again at bind time.
3560          */
3561         if (obj->pages == NULL)
3562                 return false;
3563
3564         /*
3565          * Stolen memory is always coherent with the GPU as it is explicitly
3566          * marked as wc by the system, or the system is cache-coherent.
3567          */
3568         if (obj->stolen || obj->phys_handle)
3569                 return false;
3570
3571         /* If the GPU is snooping the contents of the CPU cache,
3572          * we do not need to manually clear the CPU cache lines.  However,
3573          * the caches are only snooped when the render cache is
3574          * flushed/invalidated.  As we always have to emit invalidations
3575          * and flushes when moving into and out of the RENDER domain, correct
3576          * snooping behaviour occurs naturally as the result of our domain
3577          * tracking.
3578          */
3579         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3580                 obj->cache_dirty = true;
3581                 return false;
3582         }
3583
3584         trace_i915_gem_object_clflush(obj);
3585         drm_clflush_sg(obj->pages);
3586         obj->cache_dirty = false;
3587
3588         return true;
3589 }
3590
3591 /** Flushes the GTT write domain for the object if it's dirty. */
3592 static void
3593 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3594 {
3595         uint32_t old_write_domain;
3596
3597         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3598                 return;
3599
3600         /* No actual flushing is required for the GTT write domain.  Writes
3601          * to it immediately go to main memory as far as we know, so there's
3602          * no chipset flush.  It also doesn't land in render cache.
3603          *
3604          * However, we do have to enforce the order so that all writes through
3605          * the GTT land before any writes to the device, such as updates to
3606          * the GATT itself.
3607          */
3608         wmb();
3609
3610         old_write_domain = obj->base.write_domain;
3611         obj->base.write_domain = 0;
3612
3613         intel_fb_obj_flush(obj, false);
3614
3615         trace_i915_gem_object_change_domain(obj,
3616                                             obj->base.read_domains,
3617                                             old_write_domain);
3618 }
3619
3620 /** Flushes the CPU write domain for the object if it's dirty. */
3621 static void
3622 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3623 {
3624         uint32_t old_write_domain;
3625
3626         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3627                 return;
3628
3629         if (i915_gem_clflush_object(obj, obj->pin_display))
3630                 i915_gem_chipset_flush(obj->base.dev);
3631
3632         old_write_domain = obj->base.write_domain;
3633         obj->base.write_domain = 0;
3634
3635         intel_fb_obj_flush(obj, false);
3636
3637         trace_i915_gem_object_change_domain(obj,
3638                                             obj->base.read_domains,
3639                                             old_write_domain);
3640 }
3641
3642 /**
3643  * Moves a single object to the GTT read, and possibly write domain.
3644  *
3645  * This function returns when the move is complete, including waiting on
3646  * flushes to occur.
3647  */
3648 int
3649 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3650 {
3651         uint32_t old_write_domain, old_read_domains;
3652         struct i915_vma *vma;
3653         int ret;
3654
3655         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3656                 return 0;
3657
3658         ret = i915_gem_object_wait_rendering(obj, !write);
3659         if (ret)
3660                 return ret;
3661
3662         i915_gem_object_retire(obj);
3663
3664         /* Flush and acquire obj->pages so that we are coherent through
3665          * direct access in memory with previous cached writes through
3666          * shmemfs and that our cache domain tracking remains valid.
3667          * For example, if the obj->filp was moved to swap without us
3668          * being notified and releasing the pages, we would mistakenly
3669          * continue to assume that the obj remained out of the CPU cached
3670          * domain.
3671          */
3672         ret = i915_gem_object_get_pages(obj);
3673         if (ret)
3674                 return ret;
3675
3676         i915_gem_object_flush_cpu_write_domain(obj);
3677
3678         /* Serialise direct access to this object with the barriers for
3679          * coherent writes from the GPU, by effectively invalidating the
3680          * GTT domain upon first access.
3681          */
3682         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3683                 mb();
3684
3685         old_write_domain = obj->base.write_domain;
3686         old_read_domains = obj->base.read_domains;
3687
3688         /* It should now be out of any other write domains, and we can update
3689          * the domain values for our changes.
3690          */
3691         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3692         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3693         if (write) {
3694                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3695                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3696                 obj->dirty = 1;
3697         }
3698
3699         if (write)
3700                 intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
3701
3702         trace_i915_gem_object_change_domain(obj,
3703                                             old_read_domains,
3704                                             old_write_domain);
3705
3706         /* And bump the LRU for this access */
3707         vma = i915_gem_obj_to_ggtt(obj);
3708         if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
3709                 list_move_tail(&vma->mm_list,
3710                                &to_i915(obj->base.dev)->gtt.base.inactive_list);
3711
3712         return 0;
3713 }
3714
3715 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3716                                     enum i915_cache_level cache_level)
3717 {
3718         struct drm_device *dev = obj->base.dev;
3719         struct i915_vma *vma, *next;
3720         int ret;
3721
3722         if (obj->cache_level == cache_level)
3723                 return 0;
3724
3725         if (i915_gem_obj_is_pinned(obj)) {
3726                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3727                 return -EBUSY;
3728         }
3729
3730         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3731                 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3732                         ret = i915_vma_unbind(vma);
3733                         if (ret)
3734                                 return ret;
3735                 }
3736         }
3737
3738         if (i915_gem_obj_bound_any(obj)) {
3739                 ret = i915_gem_object_finish_gpu(obj);
3740                 if (ret)
3741                         return ret;
3742
3743                 i915_gem_object_finish_gtt(obj);
3744
3745                 /* Before SandyBridge, you could not use tiling or fence
3746                  * registers with snooped memory, so relinquish any fences
3747                  * currently pointing to our region in the aperture.
3748                  */
3749                 if (INTEL_INFO(dev)->gen < 6) {
3750                         ret = i915_gem_object_put_fence(obj);
3751                         if (ret)
3752                                 return ret;
3753                 }
3754
3755                 list_for_each_entry(vma, &obj->vma_list, vma_link)
3756                         if (drm_mm_node_allocated(&vma->node)) {
3757                                 ret = i915_vma_bind(vma, cache_level,
3758                                                     vma->bound & GLOBAL_BIND);
3759                                 if (ret)
3760                                         return ret;
3761                         }
3762         }
3763
3764         list_for_each_entry(vma, &obj->vma_list, vma_link)
3765                 vma->node.color = cache_level;
3766         obj->cache_level = cache_level;
3767
3768         if (obj->cache_dirty &&
3769             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
3770             cpu_write_needs_clflush(obj)) {
3771                 if (i915_gem_clflush_object(obj, true))
3772                         i915_gem_chipset_flush(obj->base.dev);
3773         }
3774
3775         return 0;
3776 }
3777
3778 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3779                                struct drm_file *file)
3780 {
3781         struct drm_i915_gem_caching *args = data;
3782         struct drm_i915_gem_object *obj;
3783         int ret;
3784
3785         ret = i915_mutex_lock_interruptible(dev);
3786         if (ret)
3787                 return ret;
3788
3789         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3790         if (&obj->base == NULL) {
3791                 ret = -ENOENT;
3792                 goto unlock;
3793         }
3794
3795         switch (obj->cache_level) {
3796         case I915_CACHE_LLC:
3797         case I915_CACHE_L3_LLC:
3798                 args->caching = I915_CACHING_CACHED;
3799                 break;
3800
3801         case I915_CACHE_WT:
3802                 args->caching = I915_CACHING_DISPLAY;
3803                 break;
3804
3805         default:
3806                 args->caching = I915_CACHING_NONE;
3807                 break;
3808         }
3809
3810         drm_gem_object_unreference(&obj->base);
3811 unlock:
3812         mutex_unlock(&dev->struct_mutex);
3813         return ret;
3814 }
3815
3816 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3817                                struct drm_file *file)
3818 {
3819         struct drm_i915_gem_caching *args = data;
3820         struct drm_i915_gem_object *obj;
3821         enum i915_cache_level level;
3822         int ret;
3823
3824         switch (args->caching) {
3825         case I915_CACHING_NONE:
3826                 level = I915_CACHE_NONE;
3827                 break;
3828         case I915_CACHING_CACHED:
3829                 level = I915_CACHE_LLC;
3830                 break;
3831         case I915_CACHING_DISPLAY:
3832                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3833                 break;
3834         default:
3835                 return -EINVAL;
3836         }
3837
3838         ret = i915_mutex_lock_interruptible(dev);
3839         if (ret)
3840                 return ret;
3841
3842         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3843         if (&obj->base == NULL) {
3844                 ret = -ENOENT;
3845                 goto unlock;
3846         }
3847
3848         ret = i915_gem_object_set_cache_level(obj, level);
3849
3850         drm_gem_object_unreference(&obj->base);
3851 unlock:
3852         mutex_unlock(&dev->struct_mutex);
3853         return ret;
3854 }
3855
3856 static bool is_pin_display(struct drm_i915_gem_object *obj)
3857 {
3858         struct i915_vma *vma;
3859
3860         vma = i915_gem_obj_to_ggtt(obj);
3861         if (!vma)
3862                 return false;
3863
3864         /* There are 2 sources that pin objects:
3865          *   1. The display engine (scanouts, sprites, cursors);
3866          *   2. Reservations for execbuffer;
3867          *
3868          * We can ignore reservations as we hold the struct_mutex and
3869          * are only called outside of the reservation path.
3870          */
3871         return vma->pin_count;
3872 }
3873
3874 /*
3875  * Prepare buffer for display plane (scanout, cursors, etc).
3876  * Can be called from an uninterruptible phase (modesetting) and allows
3877  * any flushes to be pipelined (for pageflips).
3878  */
3879 int
3880 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3881                                      u32 alignment,
3882                                      struct intel_engine_cs *pipelined,
3883                                      const struct i915_ggtt_view *view)
3884 {
3885         u32 old_read_domains, old_write_domain;
3886         bool was_pin_display;
3887         int ret;
3888
3889         if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
3890                 ret = i915_gem_object_sync(obj, pipelined);
3891                 if (ret)
3892                         return ret;
3893         }
3894
3895         /* Mark the pin_display early so that we account for the
3896          * display coherency whilst setting up the cache domains.
3897          */
3898         was_pin_display = obj->pin_display;
3899         obj->pin_display = true;
3900
3901         /* The display engine is not coherent with the LLC cache on gen6.  As
3902          * a result, we make sure that the pinning that is about to occur is
3903          * done with uncached PTEs. This is lowest common denominator for all
3904          * chipsets.
3905          *
3906          * However for gen6+, we could do better by using the GFDT bit instead
3907          * of uncaching, which would allow us to flush all the LLC-cached data
3908          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3909          */
3910         ret = i915_gem_object_set_cache_level(obj,
3911                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3912         if (ret)
3913                 goto err_unpin_display;
3914
3915         /* As the user may map the buffer once pinned in the display plane
3916          * (e.g. libkms for the bootup splash), we have to ensure that we
3917          * always use map_and_fenceable for all scanout buffers.
3918          */
3919         ret = i915_gem_object_ggtt_pin(obj, view, alignment,
3920                                        view->type == I915_GGTT_VIEW_NORMAL ?
3921                                        PIN_MAPPABLE : 0);
3922         if (ret)
3923                 goto err_unpin_display;
3924
3925         i915_gem_object_flush_cpu_write_domain(obj);
3926
3927         old_write_domain = obj->base.write_domain;
3928         old_read_domains = obj->base.read_domains;
3929
3930         /* It should now be out of any other write domains, and we can update
3931          * the domain values for our changes.
3932          */
3933         obj->base.write_domain = 0;
3934         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3935
3936         trace_i915_gem_object_change_domain(obj,
3937                                             old_read_domains,
3938                                             old_write_domain);
3939
3940         return 0;
3941
3942 err_unpin_display:
3943         WARN_ON(was_pin_display != is_pin_display(obj));
3944         obj->pin_display = was_pin_display;
3945         return ret;
3946 }
3947
3948 void
3949 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
3950                                          const struct i915_ggtt_view *view)
3951 {
3952         i915_gem_object_ggtt_unpin_view(obj, view);
3953
3954         obj->pin_display = is_pin_display(obj);
3955 }
3956
3957 int
3958 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3959 {
3960         int ret;
3961
3962         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3963                 return 0;
3964
3965         ret = i915_gem_object_wait_rendering(obj, false);
3966         if (ret)
3967                 return ret;
3968
3969         /* Ensure that we invalidate the GPU's caches and TLBs. */
3970         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3971         return 0;
3972 }
3973
3974 /**
3975  * Moves a single object to the CPU read, and possibly write domain.
3976  *
3977  * This function returns when the move is complete, including waiting on
3978  * flushes to occur.
3979  */
3980 int
3981 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3982 {
3983         uint32_t old_write_domain, old_read_domains;
3984         int ret;
3985
3986         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3987                 return 0;
3988
3989         ret = i915_gem_object_wait_rendering(obj, !write);
3990         if (ret)
3991                 return ret;
3992
3993         i915_gem_object_retire(obj);
3994         i915_gem_object_flush_gtt_write_domain(obj);
3995
3996         old_write_domain = obj->base.write_domain;
3997         old_read_domains = obj->base.read_domains;
3998
3999         /* Flush the CPU cache if it's still invalid. */
4000         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4001                 i915_gem_clflush_object(obj, false);
4002
4003                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4004         }
4005
4006         /* It should now be out of any other write domains, and we can update
4007          * the domain values for our changes.
4008          */
4009         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
4010
4011         /* If we're writing through the CPU, then the GPU read domains will
4012          * need to be invalidated at next use.
4013          */
4014         if (write) {
4015                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4016                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4017         }
4018
4019         if (write)
4020                 intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
4021
4022         trace_i915_gem_object_change_domain(obj,
4023                                             old_read_domains,
4024                                             old_write_domain);
4025
4026         return 0;
4027 }
4028
4029 /* Throttle our rendering by waiting until the ring has completed our requests
4030  * emitted over 20 msec ago.
4031  *
4032  * Note that if we were to use the current jiffies each time around the loop,
4033  * we wouldn't escape the function with any frames outstanding if the time to
4034  * render a frame was over 20ms.
4035  *
4036  * This should get us reasonable parallelism between CPU and GPU but also
4037  * relatively low latency when blocking on a particular request to finish.
4038  */
4039 static int
4040 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4041 {
4042         struct drm_i915_private *dev_priv = dev->dev_private;
4043         struct drm_i915_file_private *file_priv = file->driver_priv;
4044         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
4045         struct drm_i915_gem_request *request, *target = NULL;
4046         unsigned reset_counter;
4047         int ret;
4048
4049         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4050         if (ret)
4051                 return ret;
4052
4053         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
4054         if (ret)
4055                 return ret;
4056
4057         spin_lock(&file_priv->mm.lock);
4058         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4059                 if (time_after_eq(request->emitted_jiffies, recent_enough))
4060                         break;
4061
4062                 target = request;
4063         }
4064         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
4065         if (target)
4066                 i915_gem_request_reference(target);
4067         spin_unlock(&file_priv->mm.lock);
4068
4069         if (target == NULL)
4070                 return 0;
4071
4072         ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
4073         if (ret == 0)
4074                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4075
4076         mutex_lock(&dev->struct_mutex);
4077         i915_gem_request_unreference(target);
4078         mutex_unlock(&dev->struct_mutex);
4079
4080         return ret;
4081 }
4082
4083 static bool
4084 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4085 {
4086         struct drm_i915_gem_object *obj = vma->obj;
4087
4088         if (alignment &&
4089             vma->node.start & (alignment - 1))
4090                 return true;
4091
4092         if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4093                 return true;
4094
4095         if (flags & PIN_OFFSET_BIAS &&
4096             vma->node.start < (flags & PIN_OFFSET_MASK))
4097                 return true;
4098
4099         return false;
4100 }
4101
4102 static int
4103 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4104                        struct i915_address_space *vm,
4105                        const struct i915_ggtt_view *ggtt_view,
4106                        uint32_t alignment,
4107                        uint64_t flags)
4108 {
4109         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4110         struct i915_vma *vma;
4111         unsigned bound;
4112         int ret;
4113
4114         if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4115                 return -ENODEV;
4116
4117         if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4118                 return -EINVAL;
4119
4120         if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4121                 return -EINVAL;
4122
4123         if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
4124                 return -EINVAL;
4125
4126         vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
4127                           i915_gem_obj_to_vma(obj, vm);
4128
4129         if (IS_ERR(vma))
4130                 return PTR_ERR(vma);
4131
4132         if (vma) {
4133                 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4134                         return -EBUSY;
4135
4136                 if (i915_vma_misplaced(vma, alignment, flags)) {
4137                         unsigned long offset;
4138                         offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
4139                                              i915_gem_obj_offset(obj, vm);
4140                         WARN(vma->pin_count,
4141                              "bo is already pinned in %s with incorrect alignment:"
4142                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4143                              " obj->map_and_fenceable=%d\n",
4144                              ggtt_view ? "ggtt" : "ppgtt",
4145                              offset,
4146                              alignment,
4147                              !!(flags & PIN_MAPPABLE),
4148                              obj->map_and_fenceable);
4149                         ret = i915_vma_unbind(vma);
4150                         if (ret)
4151                                 return ret;
4152
4153                         vma = NULL;
4154                 }
4155         }
4156
4157         bound = vma ? vma->bound : 0;
4158         if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4159                 /* In true PPGTT, bind has possibly changed PDEs, which
4160                  * means we must do a context switch before the GPU can
4161                  * accurately read some of the VMAs.
4162                  */
4163                 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
4164                                                  flags);
4165                 if (IS_ERR(vma))
4166                         return PTR_ERR(vma);
4167         }
4168
4169         if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
4170                 ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
4171                 if (ret)
4172                         return ret;
4173         }
4174
4175         if ((bound ^ vma->bound) & GLOBAL_BIND) {
4176                 bool mappable, fenceable;
4177                 u32 fence_size, fence_alignment;
4178
4179                 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4180                                                    obj->base.size,
4181                                                    obj->tiling_mode);
4182                 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4183                                                              obj->base.size,
4184                                                              obj->tiling_mode,
4185                                                              true);
4186
4187                 fenceable = (vma->node.size == fence_size &&
4188                              (vma->node.start & (fence_alignment - 1)) == 0);
4189
4190                 mappable = (vma->node.start + fence_size <=
4191                             dev_priv->gtt.mappable_end);
4192
4193                 obj->map_and_fenceable = mappable && fenceable;
4194         }
4195
4196         WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4197
4198         vma->pin_count++;
4199         if (flags & PIN_MAPPABLE)
4200                 obj->pin_mappable |= true;
4201
4202         return 0;
4203 }
4204
4205 int
4206 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4207                     struct i915_address_space *vm,
4208                     uint32_t alignment,
4209                     uint64_t flags)
4210 {
4211         return i915_gem_object_do_pin(obj, vm,
4212                                       i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
4213                                       alignment, flags);
4214 }
4215
4216 int
4217 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4218                          const struct i915_ggtt_view *view,
4219                          uint32_t alignment,
4220                          uint64_t flags)
4221 {
4222         if (WARN_ONCE(!view, "no view specified"))
4223                 return -EINVAL;
4224
4225         return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
4226                                       alignment, flags | PIN_GLOBAL);
4227 }
4228
4229 void
4230 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4231                                 const struct i915_ggtt_view *view)
4232 {
4233         struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
4234
4235         BUG_ON(!vma);
4236         WARN_ON(vma->pin_count == 0);
4237         WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
4238
4239         if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL)
4240                 obj->pin_mappable = false;
4241 }
4242
4243 bool
4244 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
4245 {
4246         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4247                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4248                 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
4249
4250                 WARN_ON(!ggtt_vma ||
4251                         dev_priv->fence_regs[obj->fence_reg].pin_count >
4252                         ggtt_vma->pin_count);
4253                 dev_priv->fence_regs[obj->fence_reg].pin_count++;
4254                 return true;
4255         } else
4256                 return false;
4257 }
4258
4259 void
4260 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
4261 {
4262         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4263                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4264                 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
4265                 dev_priv->fence_regs[obj->fence_reg].pin_count--;
4266         }
4267 }
4268
4269 int
4270 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4271                     struct drm_file *file)
4272 {
4273         struct drm_i915_gem_busy *args = data;
4274         struct drm_i915_gem_object *obj;
4275         int ret;
4276
4277         ret = i915_mutex_lock_interruptible(dev);
4278         if (ret)
4279                 return ret;
4280
4281         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4282         if (&obj->base == NULL) {
4283                 ret = -ENOENT;
4284                 goto unlock;
4285         }
4286
4287         /* Count all active objects as busy, even if they are currently not used
4288          * by the gpu. Users of this interface expect objects to eventually
4289          * become non-busy without any further actions, therefore emit any
4290          * necessary flushes here.
4291          */
4292         ret = i915_gem_object_flush_active(obj);
4293
4294         args->busy = obj->active;
4295         if (obj->last_read_req) {
4296                 struct intel_engine_cs *ring;
4297                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4298                 ring = i915_gem_request_get_ring(obj->last_read_req);
4299                 args->busy |= intel_ring_flag(ring) << 16;
4300         }
4301
4302         drm_gem_object_unreference(&obj->base);
4303 unlock:
4304         mutex_unlock(&dev->struct_mutex);
4305         return ret;
4306 }
4307
4308 int
4309 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4310                         struct drm_file *file_priv)
4311 {
4312         return i915_gem_ring_throttle(dev, file_priv);
4313 }
4314
4315 int
4316 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4317                        struct drm_file *file_priv)
4318 {
4319         struct drm_i915_private *dev_priv = dev->dev_private;
4320         struct drm_i915_gem_madvise *args = data;
4321         struct drm_i915_gem_object *obj;
4322         int ret;
4323
4324         switch (args->madv) {
4325         case I915_MADV_DONTNEED:
4326         case I915_MADV_WILLNEED:
4327             break;
4328         default:
4329             return -EINVAL;
4330         }
4331
4332         ret = i915_mutex_lock_interruptible(dev);
4333         if (ret)
4334                 return ret;
4335
4336         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4337         if (&obj->base == NULL) {
4338                 ret = -ENOENT;
4339                 goto unlock;
4340         }
4341
4342         if (i915_gem_obj_is_pinned(obj)) {
4343                 ret = -EINVAL;
4344                 goto out;
4345         }
4346
4347         if (obj->pages &&
4348             obj->tiling_mode != I915_TILING_NONE &&
4349             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4350                 if (obj->madv == I915_MADV_WILLNEED)
4351                         i915_gem_object_unpin_pages(obj);
4352                 if (args->madv == I915_MADV_WILLNEED)
4353                         i915_gem_object_pin_pages(obj);
4354         }
4355
4356         if (obj->madv != __I915_MADV_PURGED)
4357                 obj->madv = args->madv;
4358
4359         /* if the object is no longer attached, discard its backing storage */
4360         if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4361                 i915_gem_object_truncate(obj);
4362
4363         args->retained = obj->madv != __I915_MADV_PURGED;
4364
4365 out:
4366         drm_gem_object_unreference(&obj->base);
4367 unlock:
4368         mutex_unlock(&dev->struct_mutex);
4369         return ret;
4370 }
4371
4372 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4373                           const struct drm_i915_gem_object_ops *ops)
4374 {
4375         INIT_LIST_HEAD(&obj->global_list);
4376         INIT_LIST_HEAD(&obj->ring_list);
4377         INIT_LIST_HEAD(&obj->obj_exec_link);
4378         INIT_LIST_HEAD(&obj->vma_list);
4379         INIT_LIST_HEAD(&obj->batch_pool_list);
4380
4381         obj->ops = ops;
4382
4383         obj->fence_reg = I915_FENCE_REG_NONE;
4384         obj->madv = I915_MADV_WILLNEED;
4385
4386         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4387 }
4388
4389 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4390         .get_pages = i915_gem_object_get_pages_gtt,
4391         .put_pages = i915_gem_object_put_pages_gtt,
4392 };
4393
4394 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4395                                                   size_t size)
4396 {
4397         struct drm_i915_gem_object *obj;
4398         struct address_space *mapping;
4399         gfp_t mask;
4400
4401         obj = i915_gem_object_alloc(dev);
4402         if (obj == NULL)
4403                 return NULL;
4404
4405         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4406                 i915_gem_object_free(obj);
4407                 return NULL;
4408         }
4409
4410         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4411         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4412                 /* 965gm cannot relocate objects above 4GiB. */
4413                 mask &= ~__GFP_HIGHMEM;
4414                 mask |= __GFP_DMA32;
4415         }
4416
4417         mapping = file_inode(obj->base.filp)->i_mapping;
4418         mapping_set_gfp_mask(mapping, mask);
4419
4420         i915_gem_object_init(obj, &i915_gem_object_ops);
4421
4422         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4423         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4424
4425         if (HAS_LLC(dev)) {
4426                 /* On some devices, we can have the GPU use the LLC (the CPU
4427                  * cache) for about a 10% performance improvement
4428                  * compared to uncached.  Graphics requests other than
4429                  * display scanout are coherent with the CPU in
4430                  * accessing this cache.  This means in this mode we
4431                  * don't need to clflush on the CPU side, and on the
4432                  * GPU side we only need to flush internal caches to
4433                  * get data visible to the CPU.
4434                  *
4435                  * However, we maintain the display planes as UC, and so
4436                  * need to rebind when first used as such.
4437                  */
4438                 obj->cache_level = I915_CACHE_LLC;
4439         } else
4440                 obj->cache_level = I915_CACHE_NONE;
4441
4442         trace_i915_gem_object_create(obj);
4443
4444         return obj;
4445 }
4446
4447 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4448 {
4449         /* If we are the last user of the backing storage (be it shmemfs
4450          * pages or stolen etc), we know that the pages are going to be
4451          * immediately released. In this case, we can then skip copying
4452          * back the contents from the GPU.
4453          */
4454
4455         if (obj->madv != I915_MADV_WILLNEED)
4456                 return false;
4457
4458         if (obj->base.filp == NULL)
4459                 return true;
4460
4461         /* At first glance, this looks racy, but then again so would be
4462          * userspace racing mmap against close. However, the first external
4463          * reference to the filp can only be obtained through the
4464          * i915_gem_mmap_ioctl() which safeguards us against the user
4465          * acquiring such a reference whilst we are in the middle of
4466          * freeing the object.
4467          */
4468         return atomic_long_read(&obj->base.filp->f_count) == 1;
4469 }
4470
4471 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4472 {
4473         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4474         struct drm_device *dev = obj->base.dev;
4475         struct drm_i915_private *dev_priv = dev->dev_private;
4476         struct i915_vma *vma, *next;
4477
4478         intel_runtime_pm_get(dev_priv);
4479
4480         trace_i915_gem_object_destroy(obj);
4481
4482         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4483                 int ret;
4484
4485                 vma->pin_count = 0;
4486                 ret = i915_vma_unbind(vma);
4487                 if (WARN_ON(ret == -ERESTARTSYS)) {
4488                         bool was_interruptible;
4489
4490                         was_interruptible = dev_priv->mm.interruptible;
4491                         dev_priv->mm.interruptible = false;
4492
4493                         WARN_ON(i915_vma_unbind(vma));
4494
4495                         dev_priv->mm.interruptible = was_interruptible;
4496                 }
4497         }
4498
4499         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4500          * before progressing. */
4501         if (obj->stolen)
4502                 i915_gem_object_unpin_pages(obj);
4503
4504         WARN_ON(obj->frontbuffer_bits);
4505
4506         if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4507             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4508             obj->tiling_mode != I915_TILING_NONE)
4509                 i915_gem_object_unpin_pages(obj);
4510
4511         if (WARN_ON(obj->pages_pin_count))
4512                 obj->pages_pin_count = 0;
4513         if (discard_backing_storage(obj))
4514                 obj->madv = I915_MADV_DONTNEED;
4515         i915_gem_object_put_pages(obj);
4516         i915_gem_object_free_mmap_offset(obj);
4517
4518         BUG_ON(obj->pages);
4519
4520         if (obj->base.import_attach)
4521                 drm_prime_gem_destroy(&obj->base, NULL);
4522
4523         if (obj->ops->release)
4524                 obj->ops->release(obj);
4525
4526         drm_gem_object_release(&obj->base);
4527         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4528
4529         kfree(obj->bit_17);
4530         i915_gem_object_free(obj);
4531
4532         intel_runtime_pm_put(dev_priv);
4533 }
4534
4535 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4536                                      struct i915_address_space *vm)
4537 {
4538         struct i915_vma *vma;
4539         list_for_each_entry(vma, &obj->vma_list, vma_link) {
4540                 if (i915_is_ggtt(vma->vm) &&
4541                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4542                         continue;
4543                 if (vma->vm == vm)
4544                         return vma;
4545         }
4546         return NULL;
4547 }
4548
4549 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4550                                            const struct i915_ggtt_view *view)
4551 {
4552         struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
4553         struct i915_vma *vma;
4554
4555         if (WARN_ONCE(!view, "no view specified"))
4556                 return ERR_PTR(-EINVAL);
4557
4558         list_for_each_entry(vma, &obj->vma_list, vma_link)
4559                 if (vma->vm == ggtt &&
4560                     i915_ggtt_view_equal(&vma->ggtt_view, view))
4561                         return vma;
4562         return NULL;
4563 }
4564
4565 void i915_gem_vma_destroy(struct i915_vma *vma)
4566 {
4567         struct i915_address_space *vm = NULL;
4568         WARN_ON(vma->node.allocated);
4569
4570         /* Keep the vma as a placeholder in the execbuffer reservation lists */
4571         if (!list_empty(&vma->exec_list))
4572                 return;
4573
4574         vm = vma->vm;
4575
4576         if (!i915_is_ggtt(vm))
4577                 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
4578
4579         list_del(&vma->vma_link);
4580
4581         kfree(vma);
4582 }
4583
4584 static void
4585 i915_gem_stop_ringbuffers(struct drm_device *dev)
4586 {
4587         struct drm_i915_private *dev_priv = dev->dev_private;
4588         struct intel_engine_cs *ring;
4589         int i;
4590
4591         for_each_ring(ring, dev_priv, i)
4592                 dev_priv->gt.stop_ring(ring);
4593 }
4594
4595 int
4596 i915_gem_suspend(struct drm_device *dev)
4597 {
4598         struct drm_i915_private *dev_priv = dev->dev_private;
4599         int ret = 0;
4600
4601         mutex_lock(&dev->struct_mutex);
4602         ret = i915_gpu_idle(dev);
4603         if (ret)
4604                 goto err;
4605
4606         i915_gem_retire_requests(dev);
4607
4608         i915_gem_stop_ringbuffers(dev);
4609         mutex_unlock(&dev->struct_mutex);
4610
4611         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4612         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4613         flush_delayed_work(&dev_priv->mm.idle_work);
4614
4615         /* Assert that we sucessfully flushed all the work and
4616          * reset the GPU back to its idle, low power state.
4617          */
4618         WARN_ON(dev_priv->mm.busy);
4619
4620         return 0;
4621
4622 err:
4623         mutex_unlock(&dev->struct_mutex);
4624         return ret;
4625 }
4626
4627 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
4628 {
4629         struct drm_device *dev = ring->dev;
4630         struct drm_i915_private *dev_priv = dev->dev_private;
4631         u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4632         u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4633         int i, ret;
4634
4635         if (!HAS_L3_DPF(dev) || !remap_info)
4636                 return 0;
4637
4638         ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4639         if (ret)
4640                 return ret;
4641
4642         /*
4643          * Note: We do not worry about the concurrent register cacheline hang
4644          * here because no other code should access these registers other than
4645          * at initialization time.
4646          */
4647         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4648                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4649                 intel_ring_emit(ring, reg_base + i);
4650                 intel_ring_emit(ring, remap_info[i/4]);
4651         }
4652
4653         intel_ring_advance(ring);
4654
4655         return ret;
4656 }
4657
4658 void i915_gem_init_swizzling(struct drm_device *dev)
4659 {
4660         struct drm_i915_private *dev_priv = dev->dev_private;
4661
4662         if (INTEL_INFO(dev)->gen < 5 ||
4663             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4664                 return;
4665
4666         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4667                                  DISP_TILE_SURFACE_SWIZZLING);
4668
4669         if (IS_GEN5(dev))
4670                 return;
4671
4672         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4673         if (IS_GEN6(dev))
4674                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4675         else if (IS_GEN7(dev))
4676                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4677         else if (IS_GEN8(dev))
4678                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4679         else
4680                 BUG();
4681 }
4682
4683 static bool
4684 intel_enable_blt(struct drm_device *dev)
4685 {
4686         if (!HAS_BLT(dev))
4687                 return false;
4688
4689         /* The blitter was dysfunctional on early prototypes */
4690         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4691                 DRM_INFO("BLT not supported on this pre-production hardware;"
4692                          " graphics performance will be degraded.\n");
4693                 return false;
4694         }
4695
4696         return true;
4697 }
4698
4699 static void init_unused_ring(struct drm_device *dev, u32 base)
4700 {
4701         struct drm_i915_private *dev_priv = dev->dev_private;
4702
4703         I915_WRITE(RING_CTL(base), 0);
4704         I915_WRITE(RING_HEAD(base), 0);
4705         I915_WRITE(RING_TAIL(base), 0);
4706         I915_WRITE(RING_START(base), 0);
4707 }
4708
4709 static void init_unused_rings(struct drm_device *dev)
4710 {
4711         if (IS_I830(dev)) {
4712                 init_unused_ring(dev, PRB1_BASE);
4713                 init_unused_ring(dev, SRB0_BASE);
4714                 init_unused_ring(dev, SRB1_BASE);
4715                 init_unused_ring(dev, SRB2_BASE);
4716                 init_unused_ring(dev, SRB3_BASE);
4717         } else if (IS_GEN2(dev)) {
4718                 init_unused_ring(dev, SRB0_BASE);
4719                 init_unused_ring(dev, SRB1_BASE);
4720         } else if (IS_GEN3(dev)) {
4721                 init_unused_ring(dev, PRB1_BASE);
4722                 init_unused_ring(dev, PRB2_BASE);
4723         }
4724 }
4725
4726 int i915_gem_init_rings(struct drm_device *dev)
4727 {
4728         struct drm_i915_private *dev_priv = dev->dev_private;
4729         int ret;
4730
4731         ret = intel_init_render_ring_buffer(dev);
4732         if (ret)
4733                 return ret;
4734
4735         if (HAS_BSD(dev)) {
4736                 ret = intel_init_bsd_ring_buffer(dev);
4737                 if (ret)
4738                         goto cleanup_render_ring;
4739         }
4740
4741         if (intel_enable_blt(dev)) {
4742                 ret = intel_init_blt_ring_buffer(dev);
4743                 if (ret)
4744                         goto cleanup_bsd_ring;
4745         }
4746
4747         if (HAS_VEBOX(dev)) {
4748                 ret = intel_init_vebox_ring_buffer(dev);
4749                 if (ret)
4750                         goto cleanup_blt_ring;
4751         }
4752
4753         if (HAS_BSD2(dev)) {
4754                 ret = intel_init_bsd2_ring_buffer(dev);
4755                 if (ret)
4756                         goto cleanup_vebox_ring;
4757         }
4758
4759         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4760         if (ret)
4761                 goto cleanup_bsd2_ring;
4762
4763         return 0;
4764
4765 cleanup_bsd2_ring:
4766         intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4767 cleanup_vebox_ring:
4768         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4769 cleanup_blt_ring:
4770         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4771 cleanup_bsd_ring:
4772         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4773 cleanup_render_ring:
4774         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4775
4776         return ret;
4777 }
4778
4779 int
4780 i915_gem_init_hw(struct drm_device *dev)
4781 {
4782         struct drm_i915_private *dev_priv = dev->dev_private;
4783         struct intel_engine_cs *ring;
4784         int ret, i;
4785
4786         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4787                 return -EIO;
4788
4789         /* Double layer security blanket, see i915_gem_init() */
4790         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4791
4792         if (dev_priv->ellc_size)
4793                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4794
4795         if (IS_HASWELL(dev))
4796                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4797                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4798
4799         if (HAS_PCH_NOP(dev)) {
4800                 if (IS_IVYBRIDGE(dev)) {
4801                         u32 temp = I915_READ(GEN7_MSG_CTL);
4802                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4803                         I915_WRITE(GEN7_MSG_CTL, temp);
4804                 } else if (INTEL_INFO(dev)->gen >= 7) {
4805                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4806                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4807                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4808                 }
4809         }
4810
4811         i915_gem_init_swizzling(dev);
4812
4813         /*
4814          * At least 830 can leave some of the unused rings
4815          * "active" (ie. head != tail) after resume which
4816          * will prevent c3 entry. Makes sure all unused rings
4817          * are totally idle.
4818          */
4819         init_unused_rings(dev);
4820
4821         for_each_ring(ring, dev_priv, i) {
4822                 ret = ring->init_hw(ring);
4823                 if (ret)
4824                         goto out;
4825         }
4826
4827         for (i = 0; i < NUM_L3_SLICES(dev); i++)
4828                 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4829
4830         ret = i915_ppgtt_init_hw(dev);
4831         if (ret && ret != -EIO) {
4832                 DRM_ERROR("PPGTT enable failed %d\n", ret);
4833                 i915_gem_cleanup_ringbuffer(dev);
4834         }
4835
4836         ret = i915_gem_context_enable(dev_priv);
4837         if (ret && ret != -EIO) {
4838                 DRM_ERROR("Context enable failed %d\n", ret);
4839                 i915_gem_cleanup_ringbuffer(dev);
4840
4841                 goto out;
4842         }
4843
4844 out:
4845         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4846         return ret;
4847 }
4848
4849 int i915_gem_init(struct drm_device *dev)
4850 {
4851         struct drm_i915_private *dev_priv = dev->dev_private;
4852         int ret;
4853
4854         i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4855                         i915.enable_execlists);
4856
4857         mutex_lock(&dev->struct_mutex);
4858
4859         if (IS_VALLEYVIEW(dev)) {
4860                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4861                 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4862                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4863                               VLV_GTLC_ALLOWWAKEACK), 10))
4864                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4865         }
4866
4867         if (!i915.enable_execlists) {
4868                 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
4869                 dev_priv->gt.init_rings = i915_gem_init_rings;
4870                 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
4871                 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
4872         } else {
4873                 dev_priv->gt.do_execbuf = intel_execlists_submission;
4874                 dev_priv->gt.init_rings = intel_logical_rings_init;
4875                 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
4876                 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4877         }
4878
4879         /* This is just a security blanket to placate dragons.
4880          * On some systems, we very sporadically observe that the first TLBs
4881          * used by the CS may be stale, despite us poking the TLB reset. If
4882          * we hold the forcewake during initialisation these problems
4883          * just magically go away.
4884          */
4885         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4886
4887         ret = i915_gem_init_userptr(dev);
4888         if (ret)
4889                 goto out_unlock;
4890
4891         i915_gem_init_global_gtt(dev);
4892
4893         ret = i915_gem_context_init(dev);
4894         if (ret)
4895                 goto out_unlock;
4896
4897         ret = dev_priv->gt.init_rings(dev);
4898         if (ret)
4899                 goto out_unlock;
4900
4901         ret = i915_gem_init_hw(dev);
4902         if (ret == -EIO) {
4903                 /* Allow ring initialisation to fail by marking the GPU as
4904                  * wedged. But we only want to do this where the GPU is angry,
4905                  * for all other failure, such as an allocation failure, bail.
4906                  */
4907                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4908                 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4909                 ret = 0;
4910         }
4911
4912 out_unlock:
4913         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4914         mutex_unlock(&dev->struct_mutex);
4915
4916         return ret;
4917 }
4918
4919 void
4920 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4921 {
4922         struct drm_i915_private *dev_priv = dev->dev_private;
4923         struct intel_engine_cs *ring;
4924         int i;
4925
4926         for_each_ring(ring, dev_priv, i)
4927                 dev_priv->gt.cleanup_ring(ring);
4928 }
4929
4930 static void
4931 init_ring_lists(struct intel_engine_cs *ring)
4932 {
4933         INIT_LIST_HEAD(&ring->active_list);
4934         INIT_LIST_HEAD(&ring->request_list);
4935 }
4936
4937 void i915_init_vm(struct drm_i915_private *dev_priv,
4938                   struct i915_address_space *vm)
4939 {
4940         if (!i915_is_ggtt(vm))
4941                 drm_mm_init(&vm->mm, vm->start, vm->total);
4942         vm->dev = dev_priv->dev;
4943         INIT_LIST_HEAD(&vm->active_list);
4944         INIT_LIST_HEAD(&vm->inactive_list);
4945         INIT_LIST_HEAD(&vm->global_link);
4946         list_add_tail(&vm->global_link, &dev_priv->vm_list);
4947 }
4948
4949 void
4950 i915_gem_load(struct drm_device *dev)
4951 {
4952         struct drm_i915_private *dev_priv = dev->dev_private;
4953         int i;
4954
4955         dev_priv->slab =
4956                 kmem_cache_create("i915_gem_object",
4957                                   sizeof(struct drm_i915_gem_object), 0,
4958                                   SLAB_HWCACHE_ALIGN,
4959                                   NULL);
4960
4961         INIT_LIST_HEAD(&dev_priv->vm_list);
4962         i915_init_vm(dev_priv, &dev_priv->gtt.base);
4963
4964         INIT_LIST_HEAD(&dev_priv->context_list);
4965         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4966         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4967         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4968         for (i = 0; i < I915_NUM_RINGS; i++)
4969                 init_ring_lists(&dev_priv->ring[i]);
4970         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4971                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4972         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4973                           i915_gem_retire_work_handler);
4974         INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4975                           i915_gem_idle_work_handler);
4976         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4977
4978         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4979
4980         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4981                 dev_priv->num_fence_regs = 32;
4982         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4983                 dev_priv->num_fence_regs = 16;
4984         else
4985                 dev_priv->num_fence_regs = 8;
4986
4987         if (intel_vgpu_active(dev))
4988                 dev_priv->num_fence_regs =
4989                                 I915_READ(vgtif_reg(avail_rs.fence_num));
4990
4991         /* Initialize fence registers to zero */
4992         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4993         i915_gem_restore_fences(dev);
4994
4995         i915_gem_detect_bit_6_swizzle(dev);
4996         init_waitqueue_head(&dev_priv->pending_flip_queue);
4997
4998         dev_priv->mm.interruptible = true;
4999
5000         i915_gem_shrinker_init(dev_priv);
5001
5002         i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
5003
5004         mutex_init(&dev_priv->fb_tracking.lock);
5005 }
5006
5007 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5008 {
5009         struct drm_i915_file_private *file_priv = file->driver_priv;
5010
5011         cancel_delayed_work_sync(&file_priv->mm.idle_work);
5012
5013         /* Clean up our request list when the client is going away, so that
5014          * later retire_requests won't dereference our soon-to-be-gone
5015          * file_priv.
5016          */
5017         spin_lock(&file_priv->mm.lock);
5018         while (!list_empty(&file_priv->mm.request_list)) {
5019                 struct drm_i915_gem_request *request;
5020
5021                 request = list_first_entry(&file_priv->mm.request_list,
5022                                            struct drm_i915_gem_request,
5023                                            client_list);
5024                 list_del(&request->client_list);
5025                 request->file_priv = NULL;
5026         }
5027         spin_unlock(&file_priv->mm.lock);
5028 }
5029
5030 static void
5031 i915_gem_file_idle_work_handler(struct work_struct *work)
5032 {
5033         struct drm_i915_file_private *file_priv =
5034                 container_of(work, typeof(*file_priv), mm.idle_work.work);
5035
5036         atomic_set(&file_priv->rps_wait_boost, false);
5037 }
5038
5039 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5040 {
5041         struct drm_i915_file_private *file_priv;
5042         int ret;
5043
5044         DRM_DEBUG_DRIVER("\n");
5045
5046         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5047         if (!file_priv)
5048                 return -ENOMEM;
5049
5050         file->driver_priv = file_priv;
5051         file_priv->dev_priv = dev->dev_private;
5052         file_priv->file = file;
5053
5054         spin_lock_init(&file_priv->mm.lock);
5055         INIT_LIST_HEAD(&file_priv->mm.request_list);
5056         INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5057                           i915_gem_file_idle_work_handler);
5058
5059         ret = i915_gem_context_open(dev, file);
5060         if (ret)
5061                 kfree(file_priv);
5062
5063         return ret;
5064 }
5065
5066 /**
5067  * i915_gem_track_fb - update frontbuffer tracking
5068  * old: current GEM buffer for the frontbuffer slots
5069  * new: new GEM buffer for the frontbuffer slots
5070  * frontbuffer_bits: bitmask of frontbuffer slots
5071  *
5072  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5073  * from @old and setting them in @new. Both @old and @new can be NULL.
5074  */
5075 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5076                        struct drm_i915_gem_object *new,
5077                        unsigned frontbuffer_bits)
5078 {
5079         if (old) {
5080                 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5081                 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5082                 old->frontbuffer_bits &= ~frontbuffer_bits;
5083         }
5084
5085         if (new) {
5086                 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5087                 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5088                 new->frontbuffer_bits |= frontbuffer_bits;
5089         }
5090 }
5091
5092 /* All the new VM stuff */
5093 unsigned long
5094 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5095                     struct i915_address_space *vm)
5096 {
5097         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5098         struct i915_vma *vma;
5099
5100         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5101
5102         list_for_each_entry(vma, &o->vma_list, vma_link) {
5103                 if (i915_is_ggtt(vma->vm) &&
5104                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5105                         continue;
5106                 if (vma->vm == vm)
5107                         return vma->node.start;
5108         }
5109
5110         WARN(1, "%s vma for this object not found.\n",
5111              i915_is_ggtt(vm) ? "global" : "ppgtt");
5112         return -1;
5113 }
5114
5115 unsigned long
5116 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5117                               const struct i915_ggtt_view *view)
5118 {
5119         struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5120         struct i915_vma *vma;
5121
5122         list_for_each_entry(vma, &o->vma_list, vma_link)
5123                 if (vma->vm == ggtt &&
5124                     i915_ggtt_view_equal(&vma->ggtt_view, view))
5125                         return vma->node.start;
5126
5127         WARN(1, "global vma for this object not found.\n");
5128         return -1;
5129 }
5130
5131 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5132                         struct i915_address_space *vm)
5133 {
5134         struct i915_vma *vma;
5135
5136         list_for_each_entry(vma, &o->vma_list, vma_link) {
5137                 if (i915_is_ggtt(vma->vm) &&
5138                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5139                         continue;
5140                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5141                         return true;
5142         }
5143
5144         return false;
5145 }
5146
5147 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5148                                   const struct i915_ggtt_view *view)
5149 {
5150         struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5151         struct i915_vma *vma;
5152
5153         list_for_each_entry(vma, &o->vma_list, vma_link)
5154                 if (vma->vm == ggtt &&
5155                     i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5156                     drm_mm_node_allocated(&vma->node))
5157                         return true;
5158
5159         return false;
5160 }
5161
5162 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5163 {
5164         struct i915_vma *vma;
5165
5166         list_for_each_entry(vma, &o->vma_list, vma_link)
5167                 if (drm_mm_node_allocated(&vma->node))
5168                         return true;
5169
5170         return false;
5171 }
5172
5173 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5174                                 struct i915_address_space *vm)
5175 {
5176         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5177         struct i915_vma *vma;
5178
5179         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5180
5181         BUG_ON(list_empty(&o->vma_list));
5182
5183         list_for_each_entry(vma, &o->vma_list, vma_link) {
5184                 if (i915_is_ggtt(vma->vm) &&
5185                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5186                         continue;
5187                 if (vma->vm == vm)
5188                         return vma->node.size;
5189         }
5190         return 0;
5191 }
5192
5193 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5194 {
5195         struct i915_vma *vma;
5196         list_for_each_entry(vma, &obj->vma_list, vma_link) {
5197                 if (i915_is_ggtt(vma->vm) &&
5198                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5199                         continue;
5200                 if (vma->pin_count > 0)
5201                         return true;
5202         }
5203         return false;
5204 }
5205