These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / gpu / drm / i915 / i915_gem_batch_pool.c
index c690170..7bf2f3f 100644 (file)
@@ -23,6 +23,7 @@
  */
 
 #include "i915_drv.h"
+#include "i915_gem_batch_pool.h"
 
 /**
  * DOC: batch pool
 void i915_gem_batch_pool_init(struct drm_device *dev,
                              struct i915_gem_batch_pool *pool)
 {
+       int n;
+
        pool->dev = dev;
-       INIT_LIST_HEAD(&pool->cache_list);
+
+       for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+               INIT_LIST_HEAD(&pool->cache_list[n]);
 }
 
 /**
@@ -58,33 +63,35 @@ void i915_gem_batch_pool_init(struct drm_device *dev,
  */
 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
 {
-       WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+       int n;
 
-       while (!list_empty(&pool->cache_list)) {
-               struct drm_i915_gem_object *obj =
-                       list_first_entry(&pool->cache_list,
-                                        struct drm_i915_gem_object,
-                                        batch_pool_list);
+       WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
 
-               WARN_ON(obj->active);
+       for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+               while (!list_empty(&pool->cache_list[n])) {
+                       struct drm_i915_gem_object *obj =
+                               list_first_entry(&pool->cache_list[n],
+                                                struct drm_i915_gem_object,
+                                                batch_pool_link);
 
-               list_del_init(&obj->batch_pool_list);
-               drm_gem_object_unreference(&obj->base);
+                       list_del(&obj->batch_pool_link);
+                       drm_gem_object_unreference(&obj->base);
+               }
        }
 }
 
 /**
- * i915_gem_batch_pool_get() - select a buffer from the pool
+ * i915_gem_batch_pool_get() - allocate a buffer from the pool
  * @pool: the batch buffer pool
  * @size: the minimum desired size of the returned buffer
  *
- * Finds or allocates a batch buffer in the pool with at least the requested
- * size. The caller is responsible for any domain, active/inactive, or
- * purgeability management for the returned buffer.
+ * Returns an inactive buffer from @pool with at least @size bytes,
+ * with the pages pinned. The caller must i915_gem_object_unpin_pages()
+ * on the returned object.
  *
  * Note: Callers must hold the struct_mutex
  *
- * Return: the selected batch buffer object
+ * Return: the buffer object or an error pointer
  */
 struct drm_i915_gem_object *
 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
@@ -92,46 +99,53 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 {
        struct drm_i915_gem_object *obj = NULL;
        struct drm_i915_gem_object *tmp, *next;
+       struct list_head *list;
+       int n;
 
        WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
 
-       list_for_each_entry_safe(tmp, next,
-                       &pool->cache_list, batch_pool_list) {
-
+       /* Compute a power-of-two bucket, but throw everything greater than
+        * 16KiB into the same bucket: i.e. the the buckets hold objects of
+        * (1 page, 2 pages, 4 pages, 8+ pages).
+        */
+       n = fls(size >> PAGE_SHIFT) - 1;
+       if (n >= ARRAY_SIZE(pool->cache_list))
+               n = ARRAY_SIZE(pool->cache_list) - 1;
+       list = &pool->cache_list[n];
+
+       list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
+               /* The batches are strictly LRU ordered */
                if (tmp->active)
-                       continue;
+                       break;
 
                /* While we're looping, do some clean up */
                if (tmp->madv == __I915_MADV_PURGED) {
-                       list_del(&tmp->batch_pool_list);
+                       list_del(&tmp->batch_pool_link);
                        drm_gem_object_unreference(&tmp->base);
                        continue;
                }
 
-               /*
-                * Select a buffer that is at least as big as needed
-                * but not 'too much' bigger. A better way to do this
-                * might be to bucket the pool objects based on size.
-                */
-               if (tmp->base.size >= size &&
-                   tmp->base.size <= (2 * size)) {
+               if (tmp->base.size >= size) {
                        obj = tmp;
                        break;
                }
        }
 
-       if (!obj) {
+       if (obj == NULL) {
+               int ret;
+
                obj = i915_gem_alloc_object(pool->dev, size);
-               if (!obj)
+               if (obj == NULL)
                        return ERR_PTR(-ENOMEM);
 
-               list_add_tail(&obj->batch_pool_list, &pool->cache_list);
-       }
-       else
-               /* Keep list in LRU order */
-               list_move_tail(&obj->batch_pool_list, &pool->cache_list);
+               ret = i915_gem_object_get_pages(obj);
+               if (ret)
+                       return ERR_PTR(ret);
 
-       obj->madv = I915_MADV_WILLNEED;
+               obj->madv = I915_MADV_DONTNEED;
+       }
 
+       list_move_tail(&obj->batch_pool_link, list);
+       i915_gem_object_pin_pages(obj);
        return obj;
 }