These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / gpu / drm / vmwgfx / vmwgfx_drv.c
index 620bb5c..24fb348 100644 (file)
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  *
  **************************************************************************/
 #include <linux/module.h>
+#include <linux/console.h>
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
 #include <drm/ttm/ttm_placement.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_object.h>
 #define DRM_IOCTL_VMW_SYNCCPU                                  \
        DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,             \
                 struct drm_vmw_synccpu_arg)
+#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT                  \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,    \
+               struct drm_vmw_context_arg)
 
 /**
  * The core DRM version of this macro doesn't account for
 
 static const struct drm_ioctl_desc vmw_ioctls[] = {
        VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
                      vmw_kms_cursor_bypass_ioctl,
-                     DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+                     DRM_MASTER | DRM_CONTROL_ALLOW),
 
        VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
-                     DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+                     DRM_MASTER | DRM_CONTROL_ALLOW),
        VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
-                     DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+                     DRM_MASTER | DRM_CONTROL_ALLOW),
        VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
-                     DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+                     DRM_MASTER | DRM_CONTROL_ALLOW),
 
        VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
-       VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
+       VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
                      vmw_fence_obj_signaled_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
 
        /* these allow direct access to the framebuffers mark as master only */
        VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
-                     DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
+                     DRM_MASTER | DRM_AUTH),
        VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
                      vmw_present_readback_ioctl,
-                     DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
+                     DRM_MASTER | DRM_AUTH),
        VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
                      vmw_kms_update_layout_ioctl,
-                     DRM_MASTER | DRM_UNLOCKED),
+                     DRM_MASTER),
        VMW_IOCTL_DEF(VMW_CREATE_SHADER,
                      vmw_shader_define_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_UNREF_SHADER,
                      vmw_shader_destroy_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
                      vmw_gb_surface_define_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
                      vmw_gb_surface_reference_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_SYNCCPU,
                      vmw_user_dmabuf_synccpu_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
+       VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
+                     vmw_extended_context_define_ioctl,
+                     DRM_AUTH | DRM_RENDER_ALLOW),
 };
 
 static struct pci_device_id vmw_pci_id_list[] = {
@@ -278,6 +286,8 @@ static void vmw_print_capabilities(uint32_t capabilities)
                DRM_INFO("  Command Buffers 2.\n");
        if (capabilities & SVGA_CAP_GBOBJECTS)
                DRM_INFO("  Guest Backed Resources.\n");
+       if (capabilities & SVGA_CAP_DX)
+               DRM_INFO("  DX Features.\n");
 }
 
 /**
@@ -296,30 +306,31 @@ static void vmw_print_capabilities(uint32_t capabilities)
 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 {
        int ret;
-       struct ttm_buffer_object *bo;
+       struct vmw_dma_buffer *vbo;
        struct ttm_bo_kmap_obj map;
        volatile SVGA3dQueryResult *result;
        bool dummy;
 
        /*
-        * Create the bo as pinned, so that a tryreserve will
+        * Create the vbo as pinned, so that a tryreserve will
         * immediately succeed. This is because we're the only
         * user of the bo currently.
         */
-       ret = ttm_bo_create(&dev_priv->bdev,
-                           PAGE_SIZE,
-                           ttm_bo_type_device,
-                           &vmw_sys_ne_placement,
-                           0, false, NULL,
-                           &bo);
+       vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
+       if (!vbo)
+               return -ENOMEM;
 
+       ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
+                             &vmw_sys_ne_placement, false,
+                             &vmw_dmabuf_bo_free);
        if (unlikely(ret != 0))
                return ret;
 
-       ret = ttm_bo_reserve(bo, false, true, false, NULL);
+       ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
        BUG_ON(ret != 0);
+       vmw_bo_pin_reserved(vbo, true);
 
-       ret = ttm_bo_kmap(bo, 0, 1, &map);
+       ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
        if (likely(ret == 0)) {
                result = ttm_kmap_obj_virtual(&map, &dummy);
                result->totalSize = sizeof(*result);
@@ -327,18 +338,55 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
                result->result32 = 0xff;
                ttm_bo_kunmap(&map);
        }
-       vmw_bo_pin(bo, false);
-       ttm_bo_unreserve(bo);
+       vmw_bo_pin_reserved(vbo, false);
+       ttm_bo_unreserve(&vbo->base);
 
        if (unlikely(ret != 0)) {
                DRM_ERROR("Dummy query buffer map failed.\n");
-               ttm_bo_unref(&bo);
+               vmw_dmabuf_unreference(&vbo);
        } else
-               dev_priv->dummy_query_bo = bo;
+               dev_priv->dummy_query_bo = vbo;
 
        return ret;
 }
 
+/**
+ * vmw_request_device_late - Perform late device setup
+ *
+ * @dev_priv: Pointer to device private.
+ *
+ * This function performs setup of otables and enables large command
+ * buffer submission. These tasks are split out to a separate function
+ * because it reverts vmw_release_device_early and is intended to be used
+ * by an error path in the hibernation code.
+ */
+static int vmw_request_device_late(struct vmw_private *dev_priv)
+{
+       int ret;
+
+       if (dev_priv->has_mob) {
+               ret = vmw_otables_setup(dev_priv);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Unable to initialize "
+                                 "guest Memory OBjects.\n");
+                       return ret;
+               }
+       }
+
+       if (dev_priv->cman) {
+               ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
+                                              256*4096, 2*4096);
+               if (ret) {
+                       struct vmw_cmdbuf_man *man = dev_priv->cman;
+
+                       dev_priv->cman = NULL;
+                       vmw_cmdbuf_man_destroy(man);
+               }
+       }
+
+       return 0;
+}
+
 static int vmw_request_device(struct vmw_private *dev_priv)
 {
        int ret;
@@ -349,14 +397,16 @@ static int vmw_request_device(struct vmw_private *dev_priv)
                return ret;
        }
        vmw_fence_fifo_up(dev_priv->fman);
-       if (dev_priv->has_mob) {
-               ret = vmw_otables_setup(dev_priv);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("Unable to initialize "
-                                 "guest Memory OBjects.\n");
-                       goto out_no_mob;
-               }
+       dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
+       if (IS_ERR(dev_priv->cman)) {
+               dev_priv->cman = NULL;
+               dev_priv->has_dx = false;
        }
+
+       ret = vmw_request_device_late(dev_priv);
+       if (ret)
+               goto out_no_mob;
+
        ret = vmw_dummy_query_bo_create(dev_priv);
        if (unlikely(ret != 0))
                goto out_no_query_bo;
@@ -364,15 +414,29 @@ static int vmw_request_device(struct vmw_private *dev_priv)
        return 0;
 
 out_no_query_bo:
-       if (dev_priv->has_mob)
+       if (dev_priv->cman)
+               vmw_cmdbuf_remove_pool(dev_priv->cman);
+       if (dev_priv->has_mob) {
+               (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
                vmw_otables_takedown(dev_priv);
+       }
+       if (dev_priv->cman)
+               vmw_cmdbuf_man_destroy(dev_priv->cman);
 out_no_mob:
        vmw_fence_fifo_down(dev_priv->fman);
        vmw_fifo_release(dev_priv, &dev_priv->fifo);
        return ret;
 }
 
-static void vmw_release_device(struct vmw_private *dev_priv)
+/**
+ * vmw_release_device_early - Early part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the first part of command submission takedown, to be called before
+ * buffer management is taken down.
+ */
+static void vmw_release_device_early(struct vmw_private *dev_priv)
 {
        /*
         * Previous destructions should've released
@@ -381,65 +445,31 @@ static void vmw_release_device(struct vmw_private *dev_priv)
 
        BUG_ON(dev_priv->pinned_bo != NULL);
 
-       ttm_bo_unref(&dev_priv->dummy_query_bo);
-       if (dev_priv->has_mob)
-               vmw_otables_takedown(dev_priv);
-       vmw_fence_fifo_down(dev_priv->fman);
-       vmw_fifo_release(dev_priv, &dev_priv->fifo);
-}
-
+       vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
+       if (dev_priv->cman)
+               vmw_cmdbuf_remove_pool(dev_priv->cman);
 
-/**
- * Increase the 3d resource refcount.
- * If the count was prevously zero, initialize the fifo, switching to svga
- * mode. Note that the master holds a ref as well, and may request an
- * explicit switch to svga mode if fb is not running, using @unhide_svga.
- */
-int vmw_3d_resource_inc(struct vmw_private *dev_priv,
-                       bool unhide_svga)
-{
-       int ret = 0;
-
-       mutex_lock(&dev_priv->release_mutex);
-       if (unlikely(dev_priv->num_3d_resources++ == 0)) {
-               ret = vmw_request_device(dev_priv);
-               if (unlikely(ret != 0))
-                       --dev_priv->num_3d_resources;
-       } else if (unhide_svga) {
-               vmw_write(dev_priv, SVGA_REG_ENABLE,
-                         vmw_read(dev_priv, SVGA_REG_ENABLE) &
-                         ~SVGA_REG_ENABLE_HIDE);
+       if (dev_priv->has_mob) {
+               ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+               vmw_otables_takedown(dev_priv);
        }
-
-       mutex_unlock(&dev_priv->release_mutex);
-       return ret;
 }
 
 /**
- * Decrease the 3d resource refcount.
- * If the count reaches zero, disable the fifo, switching to vga mode.
- * Note that the master holds a refcount as well, and may request an
- * explicit switch to vga mode when it releases its refcount to account
- * for the situation of an X server vt switch to VGA with 3d resources
- * active.
+ * vmw_release_device_late - Late part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the last part of the command submission takedown, to be called when
+ * command submission is no longer needed. It may wait on pending fences.
  */
-void vmw_3d_resource_dec(struct vmw_private *dev_priv,
-                        bool hide_svga)
+static void vmw_release_device_late(struct vmw_private *dev_priv)
 {
-       int32_t n3d;
-
-       mutex_lock(&dev_priv->release_mutex);
-       if (unlikely(--dev_priv->num_3d_resources == 0))
-               vmw_release_device(dev_priv);
-       else if (hide_svga)
-               vmw_write(dev_priv, SVGA_REG_ENABLE,
-                         vmw_read(dev_priv, SVGA_REG_ENABLE) |
-                         SVGA_REG_ENABLE_HIDE);
-
-       n3d = (int32_t) dev_priv->num_3d_resources;
-       mutex_unlock(&dev_priv->release_mutex);
+       vmw_fence_fifo_down(dev_priv->fman);
+       if (dev_priv->cman)
+               vmw_cmdbuf_man_destroy(dev_priv->cman);
 
-       BUG_ON(n3d < 0);
+       vmw_fifo_release(dev_priv, &dev_priv->fifo);
 }
 
 /**
@@ -603,6 +633,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        spin_lock_init(&dev_priv->hw_lock);
        spin_lock_init(&dev_priv->waiter_lock);
        spin_lock_init(&dev_priv->cap_lock);
+       spin_lock_init(&dev_priv->svga_lock);
 
        for (i = vmw_res_context; i < vmw_res_max; ++i) {
                idr_init(&dev_priv->res_idr[i]);
@@ -613,7 +644,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        init_waitqueue_head(&dev_priv->fence_queue);
        init_waitqueue_head(&dev_priv->fifo_queue);
        dev_priv->fence_queue_waiters = 0;
-       atomic_set(&dev_priv->fifo_queue_waiters, 0);
+       dev_priv->fifo_queue_waiters = 0;
 
        dev_priv->used_memory_size = 0;
 
@@ -673,22 +704,31 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                                 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
                dev_priv->max_mob_size =
                        vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
-       } else
+               dev_priv->stdu_max_width =
+                       vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
+               dev_priv->stdu_max_height =
+                       vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
+
+               vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+                         SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
+               dev_priv->texture_max_width = vmw_read(dev_priv,
+                                                      SVGA_REG_DEV_CAP);
+               vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+                         SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
+               dev_priv->texture_max_height = vmw_read(dev_priv,
+                                                       SVGA_REG_DEV_CAP);
+       } else {
+               dev_priv->texture_max_width = 8192;
+               dev_priv->texture_max_height = 8192;
                dev_priv->prim_bb_mem = dev_priv->vram_size;
+       }
+
+       vmw_print_capabilities(dev_priv->capabilities);
 
        ret = vmw_dma_masks(dev_priv);
        if (unlikely(ret != 0))
                goto out_err0;
 
-       /*
-        * Limit back buffer size to VRAM size.  Remove this once
-        * screen targets are implemented.
-        */
-       if (dev_priv->prim_bb_mem > dev_priv->vram_size)
-               dev_priv->prim_bb_mem = dev_priv->vram_size;
-
-       vmw_print_capabilities(dev_priv->capabilities);
-
        if (dev_priv->capabilities & SVGA_CAP_GMR2) {
                DRM_INFO("Max GMR ids is %u\n",
                         (unsigned)dev_priv->max_gmr_ids);
@@ -713,23 +753,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        dev_priv->active_master = &dev_priv->fbdev_master;
 
-
-       ret = ttm_bo_device_init(&dev_priv->bdev,
-                                dev_priv->bo_global_ref.ref.object,
-                                &vmw_bo_driver,
-                                dev->anon_inode->i_mapping,
-                                VMWGFX_FILE_PAGE_OFFSET,
-                                false);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Failed initializing TTM buffer object driver.\n");
-               goto out_err1;
-       }
-
-       dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
-                                              dev_priv->mmio_size);
-
-       dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
-                                        dev_priv->mmio_size);
+       dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
+                                      dev_priv->mmio_size, MEMREMAP_WB);
 
        if (unlikely(dev_priv->mmio_virt == NULL)) {
                ret = -ENOMEM;
@@ -787,13 +812,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                goto out_no_fman;
        }
 
+       ret = ttm_bo_device_init(&dev_priv->bdev,
+                                dev_priv->bo_global_ref.ref.object,
+                                &vmw_bo_driver,
+                                dev->anon_inode->i_mapping,
+                                VMWGFX_FILE_PAGE_OFFSET,
+                                false);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed initializing TTM buffer object driver.\n");
+               goto out_no_bdev;
+       }
 
+       /*
+        * Enable VRAM, but initially don't use it until SVGA is enabled and
+        * unhidden.
+        */
        ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
                             (dev_priv->vram_size >> PAGE_SHIFT));
        if (unlikely(ret != 0)) {
                DRM_ERROR("Failed initializing memory manager for VRAM.\n");
                goto out_no_vram;
        }
+       dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
 
        dev_priv->has_gmr = true;
        if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
@@ -814,18 +854,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                }
        }
 
-       vmw_kms_save_vga(dev_priv);
+       if (dev_priv->has_mob) {
+               spin_lock(&dev_priv->cap_lock);
+               vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
+               dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+               spin_unlock(&dev_priv->cap_lock);
+       }
+
 
-       /* Start kms and overlay systems, needs fifo. */
        ret = vmw_kms_init(dev_priv);
        if (unlikely(ret != 0))
                goto out_no_kms;
        vmw_overlay_init(dev_priv);
 
+       ret = vmw_request_device(dev_priv);
+       if (ret)
+               goto out_no_fifo;
+
+       DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
+
        if (dev_priv->enable_fb) {
-               ret = vmw_3d_resource_inc(dev_priv, true);
-               if (unlikely(ret != 0))
-                       goto out_no_fifo;
+               vmw_fifo_resource_inc(dev_priv);
+               vmw_svga_enable(dev_priv);
                vmw_fb_init(dev_priv);
        }
 
@@ -838,13 +888,14 @@ out_no_fifo:
        vmw_overlay_close(dev_priv);
        vmw_kms_close(dev_priv);
 out_no_kms:
-       vmw_kms_restore_vga(dev_priv);
        if (dev_priv->has_mob)
                (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
        if (dev_priv->has_gmr)
                (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
        (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 out_no_vram:
+       (void)ttm_bo_device_release(&dev_priv->bdev);
+out_no_bdev:
        vmw_fence_manager_takedown(dev_priv->fman);
 out_no_fman:
        if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -857,16 +908,15 @@ out_no_irq:
 out_no_device:
        ttm_object_device_release(&dev_priv->tdev);
 out_err4:
-       iounmap(dev_priv->mmio_virt);
+       memunmap(dev_priv->mmio_virt);
 out_err3:
-       arch_phys_wc_del(dev_priv->mmio_mtrr);
-       (void)ttm_bo_device_release(&dev_priv->bdev);
-out_err1:
        vmw_ttm_global_release(dev_priv);
 out_err0:
        for (i = vmw_res_context; i < vmw_res_max; ++i)
                idr_destroy(&dev_priv->res_idr[i]);
 
+       if (dev_priv->ctx.staged_bindings)
+               vmw_binding_state_free(dev_priv->ctx.staged_bindings);
        kfree(dev_priv);
        return ret;
 }
@@ -882,19 +932,24 @@ static int vmw_driver_unload(struct drm_device *dev)
                drm_ht_remove(&dev_priv->ctx.res_ht);
        vfree(dev_priv->ctx.cmd_bounce);
        if (dev_priv->enable_fb) {
+               vmw_fb_off(dev_priv);
                vmw_fb_close(dev_priv);
-               vmw_kms_restore_vga(dev_priv);
-               vmw_3d_resource_dec(dev_priv, false);
+               vmw_fifo_resource_dec(dev_priv);
+               vmw_svga_disable(dev_priv);
        }
+
        vmw_kms_close(dev_priv);
        vmw_overlay_close(dev_priv);
 
-       if (dev_priv->has_mob)
-               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
        if (dev_priv->has_gmr)
                (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
        (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 
+       vmw_release_device_early(dev_priv);
+       if (dev_priv->has_mob)
+               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+       (void) ttm_bo_device_release(&dev_priv->bdev);
+       vmw_release_device_late(dev_priv);
        vmw_fence_manager_takedown(dev_priv->fman);
        if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
                drm_irq_uninstall(dev_priv->dev);
@@ -904,9 +959,9 @@ static int vmw_driver_unload(struct drm_device *dev)
                pci_release_regions(dev->pdev);
 
        ttm_object_device_release(&dev_priv->tdev);
-       iounmap(dev_priv->mmio_virt);
-       arch_phys_wc_del(dev_priv->mmio_mtrr);
-       (void)ttm_bo_device_release(&dev_priv->bdev);
+       memunmap(dev_priv->mmio_virt);
+       if (dev_priv->ctx.staged_bindings)
+               vmw_binding_state_free(dev_priv->ctx.staged_bindings);
        vmw_ttm_global_release(dev_priv);
 
        for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -992,24 +1047,21 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
        }
 
        /*
-        * Check if we were previously master, but now dropped.
+        * Check if we were previously master, but now dropped. In that
+        * case, allow at least render node functionality.
         */
        if (vmw_fp->locked_master) {
                mutex_unlock(&dev->master_mutex);
+
+               if (flags & DRM_RENDER_ALLOW)
+                       return NULL;
+
                DRM_ERROR("Dropped master trying to access ioctl that "
                          "requires authentication.\n");
                return ERR_PTR(-EACCES);
        }
        mutex_unlock(&dev->master_mutex);
 
-       /*
-        * Taking the drm_global_mutex after the TTM lock might deadlock
-        */
-       if (!(flags & DRM_UNLOCKED)) {
-               DRM_ERROR("Refusing locked ioctl access.\n");
-               return ERR_PTR(-EDEADLK);
-       }
-
        /*
         * Take the TTM lock. Possibly sleep waiting for the authenticating
         * master to become master again, or for a SIGTERM if the
@@ -1044,17 +1096,27 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
                const struct drm_ioctl_desc *ioctl =
                        &vmw_ioctls[nr - DRM_COMMAND_BASE];
 
-               if (unlikely(ioctl->cmd != cmd)) {
-                       DRM_ERROR("Invalid command format, ioctl %d\n",
-                                 nr - DRM_COMMAND_BASE);
-                       return -EINVAL;
+               if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
+                       ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
+                       if (unlikely(ret != 0))
+                               return ret;
+
+                       if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
+                               goto out_io_encoding;
+
+                       return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
+                                                       _IOC_SIZE(cmd));
                }
+
+               if (unlikely(ioctl->cmd != cmd))
+                       goto out_io_encoding;
+
                flags = ioctl->flags;
        } else if (!drm_ioctl_flags(nr, &flags))
                return -EINVAL;
 
        vmaster = vmw_master_check(dev, file_priv, flags);
-       if (unlikely(IS_ERR(vmaster))) {
+       if (IS_ERR(vmaster)) {
                ret = PTR_ERR(vmaster);
 
                if (ret != -ERESTARTSYS)
@@ -1068,6 +1130,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
                ttm_read_unlock(&vmaster->lock);
 
        return ret;
+
+out_io_encoding:
+       DRM_ERROR("Invalid command format, ioctl %d\n",
+                 nr - DRM_COMMAND_BASE);
+
+       return -EINVAL;
 }
 
 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1086,30 +1154,11 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
 
 static void vmw_lastclose(struct drm_device *dev)
 {
-       struct drm_crtc *crtc;
-       struct drm_mode_set set;
-       int ret;
-
-       set.x = 0;
-       set.y = 0;
-       set.fb = NULL;
-       set.mode = NULL;
-       set.connectors = NULL;
-       set.num_connectors = 0;
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               set.crtc = crtc;
-               ret = drm_mode_set_config_internal(&set);
-               WARN_ON(ret != 0);
-       }
-
 }
 
 static void vmw_master_init(struct vmw_master *vmaster)
 {
        ttm_lock_init(&vmaster->lock);
-       INIT_LIST_HEAD(&vmaster->fb_surf);
-       mutex_init(&vmaster->fb_surf_mutex);
 }
 
 static int vmw_master_create(struct drm_device *dev,
@@ -1137,7 +1186,6 @@ static void vmw_master_destroy(struct drm_device *dev,
        kfree(vmaster);
 }
 
-
 static int vmw_master_set(struct drm_device *dev,
                          struct drm_file *file_priv,
                          bool from_open)
@@ -1148,27 +1196,13 @@ static int vmw_master_set(struct drm_device *dev,
        struct vmw_master *vmaster = vmw_master(file_priv->master);
        int ret = 0;
 
-       if (!dev_priv->enable_fb) {
-               ret = vmw_3d_resource_inc(dev_priv, true);
-               if (unlikely(ret != 0))
-                       return ret;
-               vmw_kms_save_vga(dev_priv);
-               vmw_write(dev_priv, SVGA_REG_TRACES, 0);
-       }
-
        if (active) {
                BUG_ON(active != &dev_priv->fbdev_master);
                ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
                if (unlikely(ret != 0))
-                       goto out_no_active_lock;
+                       return ret;
 
                ttm_lock_set_kill(&active->lock, true, SIGTERM);
-               ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("Unable to clean VRAM on "
-                                 "master drop.\n");
-               }
-
                dev_priv->active_master = NULL;
        }
 
@@ -1182,14 +1216,6 @@ static int vmw_master_set(struct drm_device *dev,
        dev_priv->active_master = vmaster;
 
        return 0;
-
-out_no_active_lock:
-       if (!dev_priv->enable_fb) {
-               vmw_kms_restore_vga(dev_priv);
-               vmw_3d_resource_dec(dev_priv, true);
-               vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-       }
-       return ret;
 }
 
 static void vmw_master_drop(struct drm_device *dev,
@@ -1208,22 +1234,16 @@ static void vmw_master_drop(struct drm_device *dev,
 
        vmw_fp->locked_master = drm_master_get(file_priv->master);
        ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
+       vmw_kms_legacy_hotspot_clear(dev_priv);
        if (unlikely((ret != 0))) {
                DRM_ERROR("Unable to lock TTM at VT switch.\n");
                drm_master_put(&vmw_fp->locked_master);
        }
 
        ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
-       vmw_execbuf_release_pinned_bo(dev_priv);
 
-       if (!dev_priv->enable_fb) {
-               ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-               if (unlikely(ret != 0))
-                       DRM_ERROR("Unable to clean VRAM on master drop.\n");
-               vmw_kms_restore_vga(dev_priv);
-               vmw_3d_resource_dec(dev_priv, true);
-               vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-       }
+       if (!dev_priv->enable_fb)
+               vmw_svga_disable(dev_priv);
 
        dev_priv->active_master = &dev_priv->fbdev_master;
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
@@ -1233,6 +1253,76 @@ static void vmw_master_drop(struct drm_device *dev,
                vmw_fb_on(dev_priv);
 }
 
+/**
+ * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in non-exclusive mode.
+ */
+static void __vmw_svga_enable(struct vmw_private *dev_priv)
+{
+       spin_lock(&dev_priv->svga_lock);
+       if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+               vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
+               dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
+       }
+       spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ */
+void vmw_svga_enable(struct vmw_private *dev_priv)
+{
+       ttm_read_lock(&dev_priv->reservation_sem, false);
+       __vmw_svga_enable(dev_priv);
+       ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in exclusive mode.
+ * Will not empty VRAM. VRAM must be emptied by caller.
+ */
+static void __vmw_svga_disable(struct vmw_private *dev_priv)
+{
+       spin_lock(&dev_priv->svga_lock);
+       if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+               dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+               vmw_write(dev_priv, SVGA_REG_ENABLE,
+                         SVGA_REG_ENABLE_HIDE |
+                         SVGA_REG_ENABLE_ENABLE);
+       }
+       spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
+ * running.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Will empty VRAM.
+ */
+void vmw_svga_disable(struct vmw_private *dev_priv)
+{
+       ttm_write_lock(&dev_priv->reservation_sem, false);
+       spin_lock(&dev_priv->svga_lock);
+       if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+               dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+               spin_unlock(&dev_priv->svga_lock);
+               if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+                       DRM_ERROR("Failed evicting VRAM buffers.\n");
+               vmw_write(dev_priv, SVGA_REG_ENABLE,
+                         SVGA_REG_ENABLE_HIDE |
+                         SVGA_REG_ENABLE_ENABLE);
+       } else
+               spin_unlock(&dev_priv->svga_lock);
+       ttm_write_unlock(&dev_priv->reservation_sem);
+}
 
 static void vmw_remove(struct pci_dev *pdev)
 {
@@ -1250,23 +1340,26 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 
        switch (val) {
        case PM_HIBERNATION_PREPARE:
-       case PM_SUSPEND_PREPARE:
+               if (dev_priv->enable_fb)
+                       vmw_fb_off(dev_priv);
                ttm_suspend_lock(&dev_priv->reservation_sem);
 
-               /**
+               /*
                 * This empties VRAM and unbinds all GMR bindings.
                 * Buffer contents is moved to swappable memory.
                 */
                vmw_execbuf_release_pinned_bo(dev_priv);
                vmw_resource_evict_all(dev_priv);
+               vmw_release_device_early(dev_priv);
                ttm_bo_swapout_all(&dev_priv->bdev);
-
+               vmw_fence_fifo_down(dev_priv->fman);
                break;
        case PM_POST_HIBERNATION:
-       case PM_POST_SUSPEND:
        case PM_POST_RESTORE:
+               vmw_fence_fifo_up(dev_priv->fman);
                ttm_suspend_unlock(&dev_priv->reservation_sem);
-
+               if (dev_priv->enable_fb)
+                       vmw_fb_on(dev_priv);
                break;
        case PM_RESTORE_PREPARE:
                break;
@@ -1276,20 +1369,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
        return 0;
 }
 
-/**
- * These might not be needed with the virtual SVGA device.
- */
-
 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct vmw_private *dev_priv = vmw_priv(dev);
 
-       if (dev_priv->num_3d_resources != 0) {
-               DRM_INFO("Can't suspend or hibernate "
-                        "while 3D resources are active.\n");
+       if (dev_priv->refuse_hibernation)
                return -EBUSY;
-       }
 
        pci_save_state(pdev);
        pci_disable_device(pdev);
@@ -1321,56 +1407,62 @@ static int vmw_pm_resume(struct device *kdev)
        return vmw_pci_resume(pdev);
 }
 
-static int vmw_pm_prepare(struct device *kdev)
+static int vmw_pm_freeze(struct device *kdev)
 {
        struct pci_dev *pdev = to_pci_dev(kdev);
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct vmw_private *dev_priv = vmw_priv(dev);
 
-       /**
-        * Release 3d reference held by fbdev and potentially
-        * stop fifo.
-        */
        dev_priv->suspended = true;
        if (dev_priv->enable_fb)
-                       vmw_3d_resource_dec(dev_priv, true);
-
-       if (dev_priv->num_3d_resources != 0) {
-
-               DRM_INFO("Can't suspend or hibernate "
-                        "while 3D resources are active.\n");
+               vmw_fifo_resource_dec(dev_priv);
 
+       if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
+               DRM_ERROR("Can't hibernate while 3D resources are active.\n");
                if (dev_priv->enable_fb)
-                       vmw_3d_resource_inc(dev_priv, true);
+                       vmw_fifo_resource_inc(dev_priv);
+               WARN_ON(vmw_request_device_late(dev_priv));
                dev_priv->suspended = false;
                return -EBUSY;
        }
 
+       if (dev_priv->enable_fb)
+               __vmw_svga_disable(dev_priv);
+       
+       vmw_release_device_late(dev_priv);
+
        return 0;
 }
 
-static void vmw_pm_complete(struct device *kdev)
+static int vmw_pm_restore(struct device *kdev)
 {
        struct pci_dev *pdev = to_pci_dev(kdev);
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct vmw_private *dev_priv = vmw_priv(dev);
+       int ret;
 
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
        (void) vmw_read(dev_priv, SVGA_REG_ID);
 
-       /**
-        * Reclaim 3d reference held by fbdev and potentially
-        * start fifo.
-        */
        if (dev_priv->enable_fb)
-                       vmw_3d_resource_inc(dev_priv, false);
+               vmw_fifo_resource_inc(dev_priv);
+
+       ret = vmw_request_device(dev_priv);
+       if (ret)
+               return ret;
+
+       if (dev_priv->enable_fb)
+               __vmw_svga_enable(dev_priv);
 
        dev_priv->suspended = false;
+
+       return 0;
 }
 
 static const struct dev_pm_ops vmw_pm_ops = {
-       .prepare = vmw_pm_prepare,
-       .complete = vmw_pm_complete,
+       .freeze = vmw_pm_freeze,
+       .thaw = vmw_pm_restore,
+       .restore = vmw_pm_restore,
        .suspend = vmw_pm_suspend,
        .resume = vmw_pm_resume,
 };
@@ -1447,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 static int __init vmwgfx_init(void)
 {
        int ret;
+
+#ifdef CONFIG_VGA_CONSOLE
+       if (vgacon_text_force())
+               return -EINVAL;
+#endif
+
        ret = drm_pci_init(&driver, &vmw_pci_driver);
        if (ret)
                DRM_ERROR("Failed initializing DRM.\n");