These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / gpu / drm / i915 / i915_dma.c
index 68e0c85..61fcb3b 100644 (file)
@@ -75,7 +75,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = 1;
                break;
        case I915_PARAM_NUM_FENCES_AVAIL:
-               value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
+               value = dev_priv->num_fence_regs;
                break;
        case I915_PARAM_HAS_OVERLAY:
                value = dev_priv->overlay ? 1 : 0;
@@ -163,6 +163,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
                if (!value)
                        return -ENODEV;
                break;
+       case I915_PARAM_HAS_GPU_RESET:
+               value = i915.enable_hangcheck &&
+                       intel_has_gpu_reset(dev);
+               break;
+       case I915_PARAM_HAS_RESOURCE_STREAMER:
+               value = HAS_RESOURCE_STREAMER(dev);
+               break;
        default:
                DRM_DEBUG("Unknown parameter %d\n", param->param);
                return -EINVAL;
@@ -176,35 +183,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
        return 0;
 }
 
-static int i915_setparam(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       drm_i915_setparam_t *param = data;
-
-       switch (param->param) {
-       case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
-       case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
-       case I915_SETPARAM_ALLOW_BATCHBUFFER:
-               /* Reject all old ums/dri params. */
-               return -ENODEV;
-
-       case I915_SETPARAM_NUM_USED_FENCES:
-               if (param->value > dev_priv->num_fence_regs ||
-                   param->value < 0)
-                       return -EINVAL;
-               /* Userspace can use first N regs */
-               dev_priv->fence_reg_start = param->value;
-               break;
-       default:
-               DRM_DEBUG_DRIVER("unknown parameter %d\n",
-                                       param->param);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static int i915_get_bridge_dev(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -357,12 +335,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                /* i915 resume handler doesn't set to D0 */
                pci_set_power_state(dev->pdev, PCI_D0);
-               i915_resume_legacy(dev);
+               i915_resume_switcheroo(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
                pr_err("switched off\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               i915_suspend_legacy(dev, pmm);
+               i915_suspend_switcheroo(dev, pmm);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
        }
 }
@@ -424,10 +402,14 @@ static int i915_load_modeset_init(struct drm_device *dev)
        if (ret)
                goto cleanup_gem_stolen;
 
+       intel_setup_gmbus(dev);
+
        /* Important: The output setup functions called by modeset_init need
         * working irqs for e.g. gmbus and dp aux transfers. */
        intel_modeset_init(dev);
 
+       intel_guc_ucode_init(dev);
+
        ret = i915_gem_init(dev);
        if (ret)
                goto cleanup_irq;
@@ -469,7 +451,9 @@ cleanup_gem:
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
 cleanup_irq:
+       intel_guc_ucode_fini(dev);
        drm_irq_uninstall(dev);
+       intel_teardown_gmbus(dev);
 cleanup_gem_stolen:
        i915_gem_cleanup_stolen(dev);
 cleanup_vga_switcheroo:
@@ -564,6 +548,205 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 #undef SEP_COMMA
 }
 
+static void cherryview_sseu_info_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_device_info *info;
+       u32 fuse, eu_dis;
+
+       info = (struct intel_device_info *)&dev_priv->info;
+       fuse = I915_READ(CHV_FUSE_GT);
+
+       info->slice_total = 1;
+
+       if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+               info->subslice_per_slice++;
+               eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+                                CHV_FGT_EU_DIS_SS0_R1_MASK);
+               info->eu_total += 8 - hweight32(eu_dis);
+       }
+
+       if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+               info->subslice_per_slice++;
+               eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+                                CHV_FGT_EU_DIS_SS1_R1_MASK);
+               info->eu_total += 8 - hweight32(eu_dis);
+       }
+
+       info->subslice_total = info->subslice_per_slice;
+       /*
+        * CHV expected to always have a uniform distribution of EU
+        * across subslices.
+       */
+       info->eu_per_subslice = info->subslice_total ?
+                               info->eu_total / info->subslice_total :
+                               0;
+       /*
+        * CHV supports subslice power gating on devices with more than
+        * one subslice, and supports EU power gating on devices with
+        * more than one EU pair per subslice.
+       */
+       info->has_slice_pg = 0;
+       info->has_subslice_pg = (info->subslice_total > 1);
+       info->has_eu_pg = (info->eu_per_subslice > 2);
+}
+
+static void gen9_sseu_info_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_device_info *info;
+       int s_max = 3, ss_max = 4, eu_max = 8;
+       int s, ss;
+       u32 fuse2, s_enable, ss_disable, eu_disable;
+       u8 eu_mask = 0xff;
+
+       info = (struct intel_device_info *)&dev_priv->info;
+       fuse2 = I915_READ(GEN8_FUSE2);
+       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
+                  GEN8_F2_S_ENA_SHIFT;
+       ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
+                    GEN9_F2_SS_DIS_SHIFT;
+
+       info->slice_total = hweight32(s_enable);
+       /*
+        * The subslice disable field is global, i.e. it applies
+        * to each of the enabled slices.
+       */
+       info->subslice_per_slice = ss_max - hweight32(ss_disable);
+       info->subslice_total = info->slice_total *
+                              info->subslice_per_slice;
+
+       /*
+        * Iterate through enabled slices and subslices to
+        * count the total enabled EU.
+       */
+       for (s = 0; s < s_max; s++) {
+               if (!(s_enable & (0x1 << s)))
+                       /* skip disabled slice */
+                       continue;
+
+               eu_disable = I915_READ(GEN9_EU_DISABLE(s));
+               for (ss = 0; ss < ss_max; ss++) {
+                       int eu_per_ss;
+
+                       if (ss_disable & (0x1 << ss))
+                               /* skip disabled subslice */
+                               continue;
+
+                       eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
+                                                     eu_mask);
+
+                       /*
+                        * Record which subslice(s) has(have) 7 EUs. we
+                        * can tune the hash used to spread work among
+                        * subslices if they are unbalanced.
+                        */
+                       if (eu_per_ss == 7)
+                               info->subslice_7eu[s] |= 1 << ss;
+
+                       info->eu_total += eu_per_ss;
+               }
+       }
+
+       /*
+        * SKL is expected to always have a uniform distribution
+        * of EU across subslices with the exception that any one
+        * EU in any one subslice may be fused off for die
+        * recovery. BXT is expected to be perfectly uniform in EU
+        * distribution.
+       */
+       info->eu_per_subslice = info->subslice_total ?
+                               DIV_ROUND_UP(info->eu_total,
+                                            info->subslice_total) : 0;
+       /*
+        * SKL supports slice power gating on devices with more than
+        * one slice, and supports EU power gating on devices with
+        * more than one EU pair per subslice. BXT supports subslice
+        * power gating on devices with more than one subslice, and
+        * supports EU power gating on devices with more than one EU
+        * pair per subslice.
+       */
+       info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
+       info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
+       info->has_eu_pg = (info->eu_per_subslice > 2);
+}
+
+static void broadwell_sseu_info_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_device_info *info;
+       const int s_max = 3, ss_max = 3, eu_max = 8;
+       int s, ss;
+       u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+
+       fuse2 = I915_READ(GEN8_FUSE2);
+       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+       ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+
+       eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
+       eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
+                       ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
+                        (32 - GEN8_EU_DIS0_S1_SHIFT));
+       eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
+                       ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
+                        (32 - GEN8_EU_DIS1_S2_SHIFT));
+
+
+       info = (struct intel_device_info *)&dev_priv->info;
+       info->slice_total = hweight32(s_enable);
+
+       /*
+        * The subslice disable field is global, i.e. it applies
+        * to each of the enabled slices.
+        */
+       info->subslice_per_slice = ss_max - hweight32(ss_disable);
+       info->subslice_total = info->slice_total * info->subslice_per_slice;
+
+       /*
+        * Iterate through enabled slices and subslices to
+        * count the total enabled EU.
+        */
+       for (s = 0; s < s_max; s++) {
+               if (!(s_enable & (0x1 << s)))
+                       /* skip disabled slice */
+                       continue;
+
+               for (ss = 0; ss < ss_max; ss++) {
+                       u32 n_disabled;
+
+                       if (ss_disable & (0x1 << ss))
+                               /* skip disabled subslice */
+                               continue;
+
+                       n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
+
+                       /*
+                        * Record which subslices have 7 EUs.
+                        */
+                       if (eu_max - n_disabled == 7)
+                               info->subslice_7eu[s] |= 1 << ss;
+
+                       info->eu_total += eu_max - n_disabled;
+               }
+       }
+
+       /*
+        * BDW is expected to always have a uniform distribution of EU across
+        * subslices with the exception that any one EU in any one subslice may
+        * be fused off for die recovery.
+        */
+       info->eu_per_subslice = info->subslice_total ?
+               DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+
+       /*
+        * BDW supports slice power gating on devices with more than
+        * one slice.
+        */
+       info->has_slice_pg = (info->slice_total > 1);
+       info->has_subslice_pg = 0;
+       info->has_eu_pg = 0;
+}
+
 /*
  * Determine various intel_device_info fields at runtime.
  *
@@ -585,7 +768,19 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
 
        info = (struct intel_device_info *)&dev_priv->info;
 
-       if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
+       /*
+        * Skylake and Broxton currently don't expose the topmost plane as its
+        * use is exclusive with the legacy cursor and we only want to expose
+        * one of those, not both. Until we can safely expose the topmost plane
+        * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+        * we don't expose the topmost plane at all to prevent ABI breakage
+        * down the line.
+        */
+       if (IS_BROXTON(dev)) {
+               info->num_sprites[PIPE_A] = 2;
+               info->num_sprites[PIPE_B] = 2;
+               info->num_sprites[PIPE_C] = 1;
+       } else if (IS_VALLEYVIEW(dev))
                for_each_pipe(dev_priv, pipe)
                        info->num_sprites[pipe] = 2;
        else
@@ -620,116 +815,13 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
        }
 
        /* Initialize slice/subslice/EU info */
-       if (IS_CHERRYVIEW(dev)) {
-               u32 fuse, eu_dis;
-
-               fuse = I915_READ(CHV_FUSE_GT);
-
-               info->slice_total = 1;
-
-               if (!(fuse & CHV_FGT_DISABLE_SS0)) {
-                       info->subslice_per_slice++;
-                       eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
-                                        CHV_FGT_EU_DIS_SS0_R1_MASK);
-                       info->eu_total += 8 - hweight32(eu_dis);
-               }
-
-               if (!(fuse & CHV_FGT_DISABLE_SS1)) {
-                       info->subslice_per_slice++;
-                       eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
-                                       CHV_FGT_EU_DIS_SS1_R1_MASK);
-                       info->eu_total += 8 - hweight32(eu_dis);
-               }
-
-               info->subslice_total = info->subslice_per_slice;
-               /*
-                * CHV expected to always have a uniform distribution of EU
-                * across subslices.
-               */
-               info->eu_per_subslice = info->subslice_total ?
-                                       info->eu_total / info->subslice_total :
-                                       0;
-               /*
-                * CHV supports subslice power gating on devices with more than
-                * one subslice, and supports EU power gating on devices with
-                * more than one EU pair per subslice.
-               */
-               info->has_slice_pg = 0;
-               info->has_subslice_pg = (info->subslice_total > 1);
-               info->has_eu_pg = (info->eu_per_subslice > 2);
-       } else if (IS_SKYLAKE(dev)) {
-               const int s_max = 3, ss_max = 4, eu_max = 8;
-               int s, ss;
-               u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
-
-               fuse2 = I915_READ(GEN8_FUSE2);
-               s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
-                          GEN8_F2_S_ENA_SHIFT;
-               ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
-                            GEN9_F2_SS_DIS_SHIFT;
-
-               eu_disable[0] = I915_READ(GEN8_EU_DISABLE0);
-               eu_disable[1] = I915_READ(GEN8_EU_DISABLE1);
-               eu_disable[2] = I915_READ(GEN8_EU_DISABLE2);
-
-               info->slice_total = hweight32(s_enable);
-               /*
-                * The subslice disable field is global, i.e. it applies
-                * to each of the enabled slices.
-               */
-               info->subslice_per_slice = ss_max - hweight32(ss_disable);
-               info->subslice_total = info->slice_total *
-                                      info->subslice_per_slice;
-
-               /*
-                * Iterate through enabled slices and subslices to
-                * count the total enabled EU.
-               */
-               for (s = 0; s < s_max; s++) {
-                       if (!(s_enable & (0x1 << s)))
-                               /* skip disabled slice */
-                               continue;
-
-                       for (ss = 0; ss < ss_max; ss++) {
-                               u32 n_disabled;
-
-                               if (ss_disable & (0x1 << ss))
-                                       /* skip disabled subslice */
-                                       continue;
-
-                               n_disabled = hweight8(eu_disable[s] >>
-                                                     (ss * eu_max));
+       if (IS_CHERRYVIEW(dev))
+               cherryview_sseu_info_init(dev);
+       else if (IS_BROADWELL(dev))
+               broadwell_sseu_info_init(dev);
+       else if (INTEL_INFO(dev)->gen >= 9)
+               gen9_sseu_info_init(dev);
 
-                               /*
-                                * Record which subslice(s) has(have) 7 EUs. we
-                                * can tune the hash used to spread work among
-                                * subslices if they are unbalanced.
-                                */
-                               if (eu_max - n_disabled == 7)
-                                       info->subslice_7eu[s] |= 1 << ss;
-
-                               info->eu_total += eu_max - n_disabled;
-                       }
-               }
-
-               /*
-                * SKL is expected to always have a uniform distribution
-                * of EU across subslices with the exception that any one
-                * EU in any one subslice may be fused off for die
-                * recovery.
-               */
-               info->eu_per_subslice = info->subslice_total ?
-                                       DIV_ROUND_UP(info->eu_total,
-                                                    info->subslice_total) : 0;
-               /*
-                * SKL supports slice power gating on devices with more than
-                * one slice, and supports EU power gating on devices with
-                * more than one EU pair per subslice.
-               */
-               info->has_slice_pg = (info->slice_total > 1) ? 1 : 0;
-               info->has_subslice_pg = 0;
-               info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0;
-       }
        DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
        DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
        DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
@@ -743,6 +835,24 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
                         info->has_eu_pg ? "y" : "n");
 }
 
+static void intel_init_dpio(struct drm_i915_private *dev_priv)
+{
+       if (!IS_VALLEYVIEW(dev_priv))
+               return;
+
+       /*
+        * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
+        * CHV x1 PHY (DP/HDMI D)
+        * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
+        */
+       if (IS_CHERRYVIEW(dev_priv)) {
+               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
+               DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
+       } else {
+               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+       }
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -781,8 +891,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        spin_lock_init(&dev_priv->uncore.lock);
        spin_lock_init(&dev_priv->mm.object_stat_lock);
        spin_lock_init(&dev_priv->mmio_flip_lock);
-       mutex_init(&dev_priv->dpio_lock);
+       mutex_init(&dev_priv->sb_lock);
        mutex_init(&dev_priv->modeset_restore_lock);
+       mutex_init(&dev_priv->csr_lock);
+       mutex_init(&dev_priv->av_mutex);
 
        intel_pm_setup(dev);
 
@@ -828,9 +940,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        intel_uncore_init(dev);
 
+       /* Load CSR Firmware for SKL */
+       intel_csr_ucode_init(dev);
+
        ret = i915_gem_gtt_init(dev);
        if (ret)
-               goto out_regs;
+               goto out_freecsr;
 
        /* WARNING: Apparently we must kick fbdev drivers before vgacon,
         * otherwise the vga fbdev driver falls over. */
@@ -896,8 +1011,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto out_mtrrfree;
        }
 
-       dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
-       if (dev_priv->dp_wq == NULL) {
+       dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+       if (dev_priv->hotplug.dp_wq == NULL) {
                DRM_ERROR("Failed to create our dp workqueue.\n");
                ret = -ENOMEM;
                goto out_freewq;
@@ -916,11 +1031,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        /* Try to make sure MCHBAR is enabled before poking at it */
        intel_setup_mchbar(dev);
-       intel_setup_gmbus(dev);
        intel_opregion_setup(dev);
 
-       intel_setup_bios(dev);
-
        i915_gem_load(dev);
 
        /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -939,6 +1051,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        intel_device_info_runtime_init(dev);
 
+       intel_init_dpio(dev_priv);
+
        if (INTEL_INFO(dev)->num_pipes) {
                ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
                if (ret)
@@ -987,12 +1101,11 @@ out_gem_unload:
        if (dev->pdev->msi_enabled)
                pci_disable_msi(dev->pdev);
 
-       intel_teardown_gmbus(dev);
        intel_teardown_mchbar(dev);
        pm_qos_remove_request(&dev_priv->pm_qos);
        destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
 out_freedpwq:
-       destroy_workqueue(dev_priv->dp_wq);
+       destroy_workqueue(dev_priv->hotplug.dp_wq);
 out_freewq:
        destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
@@ -1000,14 +1113,16 @@ out_mtrrfree:
        io_mapping_free(dev_priv->gtt.mappable);
 out_gtt:
        i915_global_gtt_cleanup(dev);
-out_regs:
+out_freecsr:
+       intel_csr_ucode_fini(dev);
        intel_uncore_fini(dev);
        pci_iounmap(dev->pdev, dev_priv->regs);
 put_bridge:
        pci_dev_put(dev_priv->bridge_dev);
 free_priv:
-       if (dev_priv->slab)
-               kmem_cache_destroy(dev_priv->slab);
+       kmem_cache_destroy(dev_priv->requests);
+       kmem_cache_destroy(dev_priv->vmas);
+       kmem_cache_destroy(dev_priv->objects);
        kfree(dev_priv);
        return ret;
 }
@@ -1054,6 +1169,10 @@ int i915_driver_unload(struct drm_device *dev)
                dev_priv->vbt.child_dev = NULL;
                dev_priv->vbt.child_dev_num = 0;
        }
+       kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+       dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+       kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+       dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
 
        vga_switcheroo_unregister_client(dev->pdev);
        vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -1070,17 +1189,19 @@ int i915_driver_unload(struct drm_device *dev)
        /* Flush any outstanding unpin_work. */
        flush_workqueue(dev_priv->wq);
 
+       intel_guc_ucode_fini(dev);
        mutex_lock(&dev->struct_mutex);
        i915_gem_cleanup_ringbuffer(dev);
-       i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
+       intel_fbc_cleanup_cfb(dev_priv);
        i915_gem_cleanup_stolen(dev);
 
-       intel_teardown_gmbus(dev);
+       intel_csr_ucode_fini(dev);
+
        intel_teardown_mchbar(dev);
 
-       destroy_workqueue(dev_priv->dp_wq);
+       destroy_workqueue(dev_priv->hotplug.dp_wq);
        destroy_workqueue(dev_priv->wq);
        destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
        pm_qos_remove_request(&dev_priv->pm_qos);
@@ -1091,9 +1212,9 @@ int i915_driver_unload(struct drm_device *dev)
        if (dev_priv->regs != NULL)
                pci_iounmap(dev->pdev, dev_priv->regs);
 
-       if (dev_priv->slab)
-               kmem_cache_destroy(dev_priv->slab);
-
+       kmem_cache_destroy(dev_priv->requests);
+       kmem_cache_destroy(dev_priv->vmas);
+       kmem_cache_destroy(dev_priv->objects);
        pci_dev_put(dev_priv->bridge_dev);
        kfree(dev_priv);
 
@@ -1163,7 +1284,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -1173,51 +1294,41 @@ const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
 };
 
 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
-
-/*
- * This is really ugly: Because old userspace abused the linux agp interface to
- * manage the gtt, we need to claim that all intel devices are agp.  For
- * otherwise the drm core refuses to initialize the agp support code.
- */
-int i915_driver_device_is_agp(struct drm_device *dev)
-{
-       return 1;
-}