These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / gpu / drm / i915 / i915_drv.c
index fb91df1..a6ad938 100644 (file)
@@ -356,26 +356,39 @@ static const struct intel_device_info intel_cherryview_info = {
 };
 
 static const struct intel_device_info intel_skylake_info = {
-       .is_preliminary = 1,
        .is_skylake = 1,
        .gen = 9, .num_pipes = 3,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .has_llc = 1,
        .has_ddi = 1,
+       .has_fpga_dbg = 1,
        .has_fbc = 1,
        GEN_DEFAULT_PIPEOFFSETS,
        IVB_CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_skylake_gt3_info = {
-       .is_preliminary = 1,
        .is_skylake = 1,
        .gen = 9, .num_pipes = 3,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
        .has_llc = 1,
        .has_ddi = 1,
+       .has_fpga_dbg = 1,
+       .has_fbc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
+       IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_broxton_info = {
+       .is_preliminary = 1,
+       .gen = 9,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+       .num_pipes = 3,
+       .has_ddi = 1,
+       .has_fpga_dbg = 1,
        .has_fbc = 1,
        GEN_DEFAULT_PIPEOFFSETS,
        IVB_CURSOR_OFFSETS,
@@ -420,16 +433,43 @@ static const struct intel_device_info intel_skylake_gt3_info = {
        INTEL_CHV_IDS(&intel_cherryview_info),  \
        INTEL_SKL_GT1_IDS(&intel_skylake_info), \
        INTEL_SKL_GT2_IDS(&intel_skylake_info), \
-       INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info)      \
+       INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),     \
+       INTEL_BXT_IDS(&intel_broxton_info)
 
 static const struct pci_device_id pciidlist[] = {              /* aka */
        INTEL_PCI_IDS,
        {0, 0, 0}
 };
 
-#if defined(CONFIG_DRM_I915_KMS)
 MODULE_DEVICE_TABLE(pci, pciidlist);
-#endif
+
+static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
+{
+       enum intel_pch ret = PCH_NOP;
+
+       /*
+        * In a virtualized passthrough environment we can be in a
+        * setup where the ISA bridge is not able to be passed through.
+        * In this case, a south bridge can be emulated and we have to
+        * make an educated guess as to which PCH is really there.
+        */
+
+       if (IS_GEN5(dev)) {
+               ret = PCH_IBX;
+               DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
+       } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+               ret = PCH_CPT;
+               DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
+       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+               ret = PCH_LPT;
+               DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
+       } else if (IS_SKYLAKE(dev)) {
+               ret = PCH_SPT;
+               DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
+       }
+
+       return ret;
+}
 
 void intel_detect_pch(struct drm_device *dev)
 {
@@ -491,6 +531,11 @@ void intel_detect_pch(struct drm_device *dev)
                                dev_priv->pch_type = PCH_SPT;
                                DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
                                WARN_ON(!IS_SKYLAKE(dev));
+                       } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+                                  ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+                                   pch->subsystem_vendor == 0x1af4 &&
+                                   pch->subsystem_device == 0x1100)) {
+                               dev_priv->pch_type = intel_virt_detect_pch(dev);
                        } else
                                continue;
 
@@ -528,19 +573,24 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
        return true;
 }
 
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+void i915_firmware_load_error_print(const char *fw_path, int err)
 {
-       spin_lock_irq(&dev_priv->irq_lock);
+       DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
 
-       dev_priv->long_hpd_port_mask = 0;
-       dev_priv->short_hpd_port_mask = 0;
-       dev_priv->hpd_event_bits = 0;
+       /*
+        * If the reason is not known assume -ENOENT since that's the most
+        * usual failure mode.
+        */
+       if (!err)
+               err = -ENOENT;
 
-       spin_unlock_irq(&dev_priv->irq_lock);
+       if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
+               return;
 
-       cancel_work_sync(&dev_priv->dig_port_work);
-       cancel_work_sync(&dev_priv->hotplug_work);
-       cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
+       DRM_ERROR(
+         "The driver is built-in, so to load the firmware you need to\n"
+         "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
+         "in your initrd/initramfs image.\n");
 }
 
 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
@@ -561,11 +611,13 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
 static int intel_suspend_complete(struct drm_i915_private *dev_priv);
 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
                              bool rpm_resume);
+static int skl_resume_prepare(struct drm_i915_private *dev_priv);
+static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
+
 
 static int i915_drm_suspend(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc;
        pci_power_t opregion_target_state;
        int error;
 
@@ -589,6 +641,8 @@ static int i915_drm_suspend(struct drm_device *dev)
                return error;
        }
 
+       intel_guc_suspend(dev);
+
        intel_suspend_gt_powersave(dev);
 
        /*
@@ -596,8 +650,7 @@ static int i915_drm_suspend(struct drm_device *dev)
         * for _thaw. Also, power gate the CRTC power wells.
         */
        drm_modeset_lock_all(dev);
-       for_each_crtc(dev, crtc)
-               intel_crtc_control(crtc, false);
+       intel_display_suspend(dev);
        drm_modeset_unlock_all(dev);
 
        intel_dp_mst_suspend(dev);
@@ -664,7 +717,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
        return 0;
 }
 
-int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
+int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
 {
        int error;
 
@@ -715,10 +768,12 @@ static int i915_drm_resume(struct drm_device *dev)
        mutex_lock(&dev->struct_mutex);
        if (i915_gem_init_hw(dev)) {
                DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-               atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+                       atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
        }
        mutex_unlock(&dev->struct_mutex);
 
+       intel_guc_resume(dev);
+
        intel_modeset_init_hw(dev);
 
        spin_lock_irq(&dev_priv->irq_lock);
@@ -727,7 +782,7 @@ static int i915_drm_resume(struct drm_device *dev)
        spin_unlock_irq(&dev_priv->irq_lock);
 
        drm_modeset_lock_all(dev);
-       intel_modeset_setup_hw_state(dev, true);
+       intel_display_resume(dev);
        drm_modeset_unlock_all(dev);
 
        intel_dp_mst_resume(dev);
@@ -779,11 +834,16 @@ static int i915_drm_resume_early(struct drm_device *dev)
        if (IS_VALLEYVIEW(dev_priv))
                ret = vlv_resume_prepare(dev_priv, false);
        if (ret)
-               DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
+               DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
+                         ret);
 
        intel_uncore_early_sanitize(dev, true);
 
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+       if (IS_BROXTON(dev))
+               ret = bxt_resume_prepare(dev_priv);
+       else if (IS_SKYLAKE(dev_priv))
+               ret = skl_resume_prepare(dev_priv);
+       else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                hsw_disable_pc8(dev_priv);
 
        intel_uncore_sanitize(dev);
@@ -792,7 +852,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
        return ret;
 }
 
-int i915_resume_legacy(struct drm_device *dev)
+int i915_resume_switcheroo(struct drm_device *dev)
 {
        int ret;
 
@@ -827,9 +887,6 @@ int i915_reset(struct drm_device *dev)
        bool simulated;
        int ret;
 
-       if (!i915.reset)
-               return 0;
-
        intel_reset_gt_powersave(dev);
 
        mutex_lock(&dev->struct_mutex);
@@ -921,8 +978,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (PCI_FUNC(pdev->devfn))
                return -ENODEV;
 
-       driver.driver_features &= ~(DRIVER_USE_AGP);
-
        return drm_get_pci_dev(pdev, ent, &driver);
 }
 
@@ -955,7 +1010,7 @@ static int i915_pm_suspend_late(struct device *dev)
        struct drm_device *drm_dev = dev_to_i915(dev)->dev;
 
        /*
-        * We have a suspedn ordering issue with the snd-hda driver also
+        * We have a suspend ordering issue with the snd-hda driver also
         * requiring our device to be power up. Due to the lack of a
         * parent/child relationship we currently solve this with an late
         * suspend hook.
@@ -999,6 +1054,15 @@ static int i915_pm_resume(struct device *dev)
        return i915_drm_resume(drm_dev);
 }
 
+static int skl_suspend_complete(struct drm_i915_private *dev_priv)
+{
+       /* Enabling DC6 is not a hard requirement to enter runtime D3 */
+
+       skl_uninit_cdclk(dev_priv);
+
+       return 0;
+}
+
 static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
 {
        hsw_enable_pc8(dev_priv);
@@ -1006,6 +1070,48 @@ static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
        return 0;
 }
 
+static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+
+       /* TODO: when DC5 support is added disable DC5 here. */
+
+       broxton_ddi_phy_uninit(dev);
+       broxton_uninit_cdclk(dev);
+       bxt_enable_dc9(dev_priv);
+
+       return 0;
+}
+
+static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+
+       /* TODO: when CSR FW support is added make sure the FW is loaded */
+
+       bxt_disable_dc9(dev_priv);
+
+       /*
+        * TODO: when DC5 support is added enable DC5 here if the CSR FW
+        * is available.
+        */
+       broxton_init_cdclk(dev);
+       broxton_ddi_phy_init(dev);
+       intel_prepare_ddi(dev);
+
+       return 0;
+}
+
+static int skl_resume_prepare(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+
+       skl_init_cdclk(dev_priv);
+       intel_csr_load_program(dev);
+
+       return 0;
+}
+
 /*
  * Save all Gunit registers that may be lost after a D3 and a subsequent
  * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1045,7 +1151,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
        s->gfx_pend_tlb1        = I915_READ(GEN7_GFX_PEND_TLB1);
 
        for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
-               s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
+               s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
 
        s->media_max_req_count  = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
        s->gfx_max_req_count    = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
@@ -1089,7 +1195,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
        s->pm_ier               = I915_READ(GEN6_PMIER);
 
        for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
-               s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
+               s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
 
        /* GT SA CZ domain, 0x100000-0x138124 */
        s->tilectl              = I915_READ(TILECTL);
@@ -1127,7 +1233,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN7_GFX_PEND_TLB1,  s->gfx_pend_tlb1);
 
        for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
-               I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
+               I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
 
        I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
        I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
@@ -1171,7 +1277,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_PMIER,          s->pm_ier);
 
        for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
-               I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
+               I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
 
        /* GT SA CZ domain, 0x100000-0x138124 */
        I915_WRITE(TILECTL,                     s->tilectl);
@@ -1401,6 +1507,8 @@ static int intel_runtime_suspend(struct device *device)
        i915_gem_release_all_mmaps(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
+       intel_guc_suspend(dev);
+
        intel_suspend_gt_powersave(dev);
        intel_runtime_pm_disable_interrupts(dev_priv);
 
@@ -1420,7 +1528,15 @@ static int intel_runtime_suspend(struct device *device)
         * FIXME: We really should find a document that references the arguments
         * used below!
         */
-       if (IS_HASWELL(dev)) {
+       if (IS_BROADWELL(dev)) {
+               /*
+                * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
+                * being detected, and the call we do at intel_runtime_resume()
+                * won't be able to restore them. Since PCI_D3hot matches the
+                * actual specification and appears to be working, use it.
+                */
+               intel_opregion_notify_adapter(dev, PCI_D3hot);
+       } else {
                /*
                 * current versions of firmware which depend on this opregion
                 * notification have repurposed the D1 definition to mean
@@ -1429,16 +1545,6 @@ static int intel_runtime_suspend(struct device *device)
                 * the suspend path.
                 */
                intel_opregion_notify_adapter(dev, PCI_D1);
-       } else {
-               /*
-                * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
-                * being detected, and the call we do at intel_runtime_resume()
-                * won't be able to restore them. Since PCI_D3hot matches the
-                * actual specification and appears to be working, use it. Let's
-                * assume the other non-Haswell platforms will stay the same as
-                * Broadwell.
-                */
-               intel_opregion_notify_adapter(dev, PCI_D3hot);
        }
 
        assert_forcewakes_inactive(dev_priv);
@@ -1462,8 +1568,15 @@ static int intel_runtime_resume(struct device *device)
        intel_opregion_notify_adapter(dev, PCI_D0);
        dev_priv->pm.suspended = false;
 
+       intel_guc_resume(dev);
+
        if (IS_GEN6(dev_priv))
                intel_init_pch_refclk(dev);
+
+       if (IS_BROXTON(dev))
+               ret = bxt_resume_prepare(dev_priv);
+       else if (IS_SKYLAKE(dev))
+               ret = skl_resume_prepare(dev_priv);
        else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                hsw_disable_pc8(dev_priv);
        else if (IS_VALLEYVIEW(dev_priv))
@@ -1477,6 +1590,15 @@ static int intel_runtime_resume(struct device *device)
        gen6_update_ring_freq(dev);
 
        intel_runtime_pm_enable_interrupts(dev_priv);
+
+       /*
+        * On VLV/CHV display interrupts are part of the display
+        * power well, so hpd is reinitialized from there. For
+        * everyone else do it here.
+        */
+       if (!IS_VALLEYVIEW(dev_priv))
+               intel_hpd_init(dev_priv);
+
        intel_enable_gt_powersave(dev);
 
        if (ret)
@@ -1493,12 +1615,15 @@ static int intel_runtime_resume(struct device *device)
  */
 static int intel_suspend_complete(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
        int ret;
 
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+       if (IS_BROXTON(dev_priv))
+               ret = bxt_suspend_complete(dev_priv);
+       else if (IS_SKYLAKE(dev_priv))
+               ret = skl_suspend_complete(dev_priv);
+       else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                ret = hsw_suspend_complete(dev_priv);
-       else if (IS_VALLEYVIEW(dev))
+       else if (IS_VALLEYVIEW(dev_priv))
                ret = vlv_suspend_complete(dev_priv);
        else
                ret = 0;
@@ -1570,9 +1695,8 @@ static struct drm_driver driver = {
         * deal with them for Intel hardware.
         */
        .driver_features =
-           DRIVER_USE_AGP |
            DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
-           DRIVER_RENDER,
+           DRIVER_RENDER | DRIVER_MODESET,
        .load = i915_driver_load,
        .unload = i915_driver_unload,
        .open = i915_driver_open,
@@ -1581,11 +1705,6 @@ static struct drm_driver driver = {
        .postclose = i915_driver_postclose,
        .set_busid = drm_pci_set_busid,
 
-       /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
-       .suspend = i915_suspend_legacy,
-       .resume = i915_resume_legacy,
-
-       .device_is_agp = i915_driver_device_is_agp,
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = i915_debugfs_init,
        .debugfs_cleanup = i915_debugfs_cleanup,
@@ -1624,20 +1743,13 @@ static int __init i915_init(void)
        driver.num_ioctls = i915_max_ioctl;
 
        /*
-        * If CONFIG_DRM_I915_KMS is set, default to KMS unless
-        * explicitly disabled with the module pararmeter.
-        *
-        * Otherwise, just follow the parameter (defaulting to off).
-        *
-        * Allow optional vga_text_mode_force boot option to override
-        * the default behavior.
+        * Enable KMS by default, unless explicitly overriden by
+        * either the i915.modeset prarameter or by the
+        * vga_text_mode_force boot option.
         */
-#if defined(CONFIG_DRM_I915_KMS)
-       if (i915.modeset != 0)
-               driver.driver_features |= DRIVER_MODESET;
-#endif
-       if (i915.modeset == 1)
-               driver.driver_features |= DRIVER_MODESET;
+
+       if (i915.modeset == 0)
+               driver.driver_features &= ~DRIVER_MODESET;
 
 #ifdef CONFIG_VGA_CONSOLE
        if (vgacon_text_force() && i915.modeset == -1)
@@ -1645,17 +1757,11 @@ static int __init i915_init(void)
 #endif
 
        if (!(driver.driver_features & DRIVER_MODESET)) {
-               driver.get_vblank_timestamp = NULL;
                /* Silently fail loading to not upset userspace. */
                DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
                return 0;
        }
 
-       /*
-        * FIXME: Note that we're lying to the DRM core here so that we can get access
-        * to the atomic ioctl and the atomic properties.  Only plane operations on
-        * a single CRTC will actually work.
-        */
        if (i915.nuclear_pageflip)
                driver.driver_features |= DRIVER_ATOMIC;