These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / gpu / drm / drm_gem.c
index 16a1647..c7de454 100644 (file)
@@ -491,7 +491,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
                 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
                 * so shmem can relocate pages during swapin if required.
                 */
-               BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
+               BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
                                (page_to_pfn(p) >= 0x00100000UL));
        }
 
@@ -763,10 +763,11 @@ EXPORT_SYMBOL(drm_gem_object_release);
 void
 drm_gem_object_free(struct kref *kref)
 {
-       struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+       struct drm_gem_object *obj =
+               container_of(kref, struct drm_gem_object, refcount);
        struct drm_device *dev = obj->dev;
 
-       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
        if (dev->driver->gem_free_object != NULL)
                dev->driver->gem_free_object(obj);
@@ -778,22 +779,14 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
        struct drm_gem_object *obj = vma->vm_private_data;
 
        drm_gem_object_reference(obj);
-
-       mutex_lock(&obj->dev->struct_mutex);
-       drm_vm_open_locked(obj->dev, vma);
-       mutex_unlock(&obj->dev->struct_mutex);
 }
 EXPORT_SYMBOL(drm_gem_vm_open);
 
 void drm_gem_vm_close(struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
-       struct drm_device *dev = obj->dev;
 
-       mutex_lock(&dev->struct_mutex);
-       drm_vm_close_locked(obj->dev, vma);
-       drm_gem_object_unreference(obj);
-       mutex_unlock(&dev->struct_mutex);
+       drm_gem_object_unreference_unlocked(obj);
 }
 EXPORT_SYMBOL(drm_gem_vm_close);
 
@@ -818,8 +811,6 @@ EXPORT_SYMBOL(drm_gem_vm_close);
  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
  * callers must verify access restrictions before calling this helper.
  *
- * NOTE: This function has to be protected with dev->struct_mutex
- *
  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
  * size, or if no gem_vm_ops are provided.
  */
@@ -828,8 +819,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
 {
        struct drm_device *dev = obj->dev;
 
-       lockdep_assert_held(&dev->struct_mutex);
-
        /* Check for valid size. */
        if (obj_size < vma->vm_end - vma->vm_start)
                return -EINVAL;
@@ -850,7 +839,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
         */
        drm_gem_object_reference(obj);
 
-       drm_vm_open_locked(dev, vma);
        return 0;
 }
 EXPORT_SYMBOL(drm_gem_mmap_obj);
@@ -874,30 +862,46 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        struct drm_file *priv = filp->private_data;
        struct drm_device *dev = priv->minor->dev;
-       struct drm_gem_object *obj;
+       struct drm_gem_object *obj = NULL;
        struct drm_vma_offset_node *node;
        int ret;
 
        if (drm_device_is_unplugged(dev))
                return -ENODEV;
 
-       mutex_lock(&dev->struct_mutex);
+       drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+       node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+                                                 vma->vm_pgoff,
+                                                 vma_pages(vma));
+       if (likely(node)) {
+               obj = container_of(node, struct drm_gem_object, vma_node);
+               /*
+                * When the object is being freed, after it hits 0-refcnt it
+                * proceeds to tear down the object. In the process it will
+                * attempt to remove the VMA offset and so acquire this
+                * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
+                * that matches our range, we know it is in the process of being
+                * destroyed and will be freed as soon as we release the lock -
+                * so we have to check for the 0-refcnted object and treat it as
+                * invalid.
+                */
+               if (!kref_get_unless_zero(&obj->refcount))
+                       obj = NULL;
+       }
+       drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
 
-       node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
-                                          vma->vm_pgoff,
-                                          vma_pages(vma));
-       if (!node) {
-               mutex_unlock(&dev->struct_mutex);
+       if (!obj)
                return -EINVAL;
-       } else if (!drm_vma_node_is_allowed(node, filp)) {
-               mutex_unlock(&dev->struct_mutex);
+
+       if (!drm_vma_node_is_allowed(node, filp)) {
+               drm_gem_object_unreference_unlocked(obj);
                return -EACCES;
        }
 
-       obj = container_of(node, struct drm_gem_object, vma_node);
-       ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
+       ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
+                              vma);
 
-       mutex_unlock(&dev->struct_mutex);
+       drm_gem_object_unreference_unlocked(obj);
 
        return ret;
 }