These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / gpu / drm / i915 / i915_gem_context.c
index f3e84c4..0433d25 100644 (file)
@@ -133,16 +133,37 @@ static int get_context_size(struct drm_device *dev)
        return ret;
 }
 
+static void i915_gem_context_clean(struct intel_context *ctx)
+{
+       struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+       struct i915_vma *vma, *next;
+
+       if (!ppgtt)
+               return;
+
+       list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
+                                mm_list) {
+               if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
+                       break;
+       }
+}
+
 void i915_gem_context_free(struct kref *ctx_ref)
 {
-       struct intel_context *ctx = container_of(ctx_ref,
-                                                typeof(*ctx), ref);
+       struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
 
        trace_i915_context_free(ctx);
 
        if (i915.enable_execlists)
                intel_lr_context_free(ctx);
 
+       /*
+        * This context is going away and we need to remove all VMAs still
+        * around. This is to handle imported shared objects for which
+        * destructor did not run when their handles were closed.
+        */
+       i915_gem_context_clean(ctx);
+
        i915_ppgtt_put(ctx->ppgtt);
 
        if (ctx->legacy_hw_ctx.rcs_state)
@@ -195,6 +216,7 @@ __create_hw_context(struct drm_device *dev,
 
        kref_init(&ctx->ref);
        list_add_tail(&ctx->link, &dev_priv->context_list);
+       ctx->i915 = dev_priv;
 
        if (dev_priv->hw_context_size) {
                struct drm_i915_gem_object *obj =
@@ -287,6 +309,7 @@ err_unpin:
        if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
                i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
 err_destroy:
+       idr_remove(&file_priv->context_idr, ctx->user_handle);
        i915_gem_context_unreference(ctx);
        return ERR_PTR(ret);
 }
@@ -317,6 +340,10 @@ void i915_gem_context_reset(struct drm_device *dev)
                        i915_gem_context_unreference(lctx);
                        ring->last_context = NULL;
                }
+
+               /* Force the GPU state to be reinitialised on enabling */
+               if (ring->default_context)
+                       ring->default_context->legacy_hw_ctx.initialized = false;
        }
 }
 
@@ -331,6 +358,13 @@ int i915_gem_context_init(struct drm_device *dev)
        if (WARN_ON(dev_priv->ring[RCS].default_context))
                return 0;
 
+       if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
+               if (!i915.enable_execlists) {
+                       DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
+                       return -EINVAL;
+               }
+       }
+
        if (i915.enable_execlists) {
                /* NB: intentionally left blank. We will allocate our own
                 * backing objects as we need them, thank you very much */
@@ -407,32 +441,23 @@ void i915_gem_context_fini(struct drm_device *dev)
        i915_gem_context_unreference(dctx);
 }
 
-int i915_gem_context_enable(struct drm_i915_private *dev_priv)
+int i915_gem_context_enable(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring;
-       int ret, i;
-
-       BUG_ON(!dev_priv->ring[RCS].default_context);
+       struct intel_engine_cs *ring = req->ring;
+       int ret;
 
        if (i915.enable_execlists) {
-               for_each_ring(ring, dev_priv, i) {
-                       if (ring->init_context) {
-                               ret = ring->init_context(ring,
-                                               ring->default_context);
-                               if (ret) {
-                                       DRM_ERROR("ring init context: %d\n",
-                                                       ret);
-                                       return ret;
-                               }
-                       }
-               }
+               if (ring->init_context == NULL)
+                       return 0;
 
+               ret = ring->init_context(req);
        } else
-               for_each_ring(ring, dev_priv, i) {
-                       ret = i915_switch_context(ring, ring->default_context);
-                       if (ret)
-                               return ret;
-               }
+               ret = i915_switch_context(req);
+
+       if (ret) {
+               DRM_ERROR("ring init context: %d\n", ret);
+               return ret;
+       }
 
        return 0;
 }
@@ -485,10 +510,9 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 }
 
 static inline int
-mi_set_context(struct intel_engine_cs *ring,
-              struct intel_context *new_context,
-              u32 hw_flags)
+mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 flags = hw_flags | MI_MM_SPACE_GTT;
        const int num_rings =
                /* Use an extended w/a on ivb+ if signalling from other rings */
@@ -503,13 +527,15 @@ mi_set_context(struct intel_engine_cs *ring,
         * itlb_before_ctx_switch.
         */
        if (IS_GEN6(ring->dev)) {
-               ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
+               ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
                if (ret)
                        return ret;
        }
 
        /* These flags are for resource streamer on HSW+ */
-       if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
+       if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+               flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
+       else if (INTEL_INFO(ring->dev)->gen < 8)
                flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
@@ -517,7 +543,7 @@ mi_set_context(struct intel_engine_cs *ring,
        if (INTEL_INFO(ring->dev)->gen >= 7)
                len += 2 + (num_rings ? 4*num_rings + 2 : 0);
 
-       ret = intel_ring_begin(ring, len);
+       ret = intel_ring_begin(req, len);
        if (ret)
                return ret;
 
@@ -540,7 +566,7 @@ mi_set_context(struct intel_engine_cs *ring,
 
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_emit(ring, MI_SET_CONTEXT);
-       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
                        flags);
        /*
         * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -573,20 +599,12 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring,
                                      struct intel_context *from,
                                      struct intel_context *to)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
        if (to->remap_slice)
                return false;
 
-       if (to->ppgtt) {
-               if (from == to && !test_bit(ring->id,
-                               &to->ppgtt->pd_dirty_rings))
-                       return true;
-       } else if (dev_priv->mm.aliasing_ppgtt) {
-               if (from == to && !test_bit(ring->id,
-                               &dev_priv->mm.aliasing_ppgtt->pd_dirty_rings))
-                       return true;
-       }
+       if (to->ppgtt && from == to &&
+           !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
+               return true;
 
        return false;
 }
@@ -629,14 +647,14 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
        return false;
 }
 
-static int do_switch(struct intel_engine_cs *ring,
-                    struct intel_context *to)
+static int do_switch(struct drm_i915_gem_request *req)
 {
+       struct intel_context *to = req->ctx;
+       struct intel_engine_cs *ring = req->ring;
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct intel_context *from = ring->last_context;
        u32 hw_flags = 0;
        bool uninitialized = false;
-       struct i915_vma *vma;
        int ret, i;
 
        if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -668,12 +686,12 @@ static int do_switch(struct intel_engine_cs *ring,
                 * Register Immediate commands in Ring Buffer before submitting
                 * a context."*/
                trace_switch_mm(ring, to);
-               ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+               ret = to->ppgtt->switch_mm(to->ppgtt, req);
                if (ret)
                        goto unpin_out;
 
                /* Doing a PD load always reloads the page dirs */
-               clear_bit(ring->id, &to->ppgtt->pd_dirty_rings);
+               to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
        }
 
        if (ring != &dev_priv->ring[RCS]) {
@@ -694,31 +712,23 @@ static int do_switch(struct intel_engine_cs *ring,
        if (ret)
                goto unpin_out;
 
-       vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
-       if (!(vma->bound & GLOBAL_BIND)) {
-               ret = i915_vma_bind(vma,
-                                   to->legacy_hw_ctx.rcs_state->cache_level,
-                                   GLOBAL_BIND);
-               /* This shouldn't ever fail. */
-               if (WARN_ONCE(ret, "GGTT context bind failed!"))
-                       goto unpin_out;
-       }
-
-       if (!to->legacy_hw_ctx.initialized) {
+       if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
                hw_flags |= MI_RESTORE_INHIBIT;
                /* NB: If we inhibit the restore, the context is not allowed to
                 * die because future work may end up depending on valid address
                 * space. This means we must enforce that a page table load
                 * occur when this occurs. */
        } else if (to->ppgtt &&
-                       test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings))
+                  (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
                hw_flags |= MI_FORCE_RESTORE;
+               to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
+       }
 
        /* We should never emit switch_mm more than once */
        WARN_ON(needs_pd_load_pre(ring, to) &&
-                       needs_pd_load_post(ring, to, hw_flags));
+               needs_pd_load_post(ring, to, hw_flags));
 
-       ret = mi_set_context(ring, to, hw_flags);
+       ret = mi_set_context(req, hw_flags);
        if (ret)
                goto unpin_out;
 
@@ -727,7 +737,7 @@ static int do_switch(struct intel_engine_cs *ring,
         */
        if (needs_pd_load_post(ring, to, hw_flags)) {
                trace_switch_mm(ring, to);
-               ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+               ret = to->ppgtt->switch_mm(to->ppgtt, req);
                /* The hardware context switch is emitted, but we haven't
                 * actually changed the state - so it's probably safe to bail
                 * here. Still, let the user know something dangerous has
@@ -743,7 +753,7 @@ static int do_switch(struct intel_engine_cs *ring,
                if (!(to->remap_slice & (1<<i)))
                        continue;
 
-               ret = i915_gem_l3_remap(ring, i);
+               ret = i915_gem_l3_remap(req, i);
                /* If it failed, try again next round */
                if (ret)
                        DRM_DEBUG_DRIVER("L3 remapping failed\n");
@@ -759,7 +769,7 @@ static int do_switch(struct intel_engine_cs *ring,
         */
        if (from != NULL) {
                from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-               i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
+               i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
                 * object dirty. The only exception is that the context must be
@@ -768,8 +778,6 @@ static int do_switch(struct intel_engine_cs *ring,
                 * swapped, but there is no way to do that yet.
                 */
                from->legacy_hw_ctx.rcs_state->dirty = 1;
-               BUG_ON(i915_gem_request_get_ring(
-                       from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
 
                /* obj is kept alive until the next request by its active ref */
                i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
@@ -785,7 +793,7 @@ done:
 
        if (uninitialized) {
                if (ring->init_context) {
-                       ret = ring->init_context(ring, to);
+                       ret = ring->init_context(req);
                        if (ret)
                                DRM_ERROR("ring init context: %d\n", ret);
                }
@@ -801,8 +809,7 @@ unpin_out:
 
 /**
  * i915_switch_context() - perform a GPU context switch.
- * @ring: ring for which we'll execute the context switch
- * @to: the context to switch to
+ * @req: request for which we'll execute the context switch
  *
  * The context life cycle is simple. The context refcount is incremented and
  * decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -813,25 +820,25 @@ unpin_out:
  * switched by writing to the ELSP and requests keep a reference to their
  * context.
  */
-int i915_switch_context(struct intel_engine_cs *ring,
-                       struct intel_context *to)
+int i915_switch_context(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
        WARN_ON(i915.enable_execlists);
        WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-       if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
-               if (to != ring->last_context) {
-                       i915_gem_context_reference(to);
+       if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
+               if (req->ctx != ring->last_context) {
+                       i915_gem_context_reference(req->ctx);
                        if (ring->last_context)
                                i915_gem_context_unreference(ring->last_context);
-                       ring->last_context = to;
+                       ring->last_context = req->ctx;
                }
                return 0;
        }
 
-       return do_switch(ring, to);
+       return do_switch(req);
 }
 
 static bool contexts_enabled(struct drm_device *dev)
@@ -917,6 +924,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
        case I915_CONTEXT_PARAM_BAN_PERIOD:
                args->value = ctx->hang_stats.ban_period_seconds;
                break;
+       case I915_CONTEXT_PARAM_NO_ZEROMAP:
+               args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -954,6 +964,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
                else
                        ctx->hang_stats.ban_period_seconds = args->value;
                break;
+       case I915_CONTEXT_PARAM_NO_ZEROMAP:
+               if (args->size) {
+                       ret = -EINVAL;
+               } else {
+                       ctx->flags &= ~CONTEXT_NO_ZEROMAP;
+                       ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
+               }
+               break;
        default:
                ret = -EINVAL;
                break;