These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / gpu / drm / nouveau / nvkm / subdev / instmem / gk20a.c
index dd0994d..14107b5 100644 (file)
 /*
  * GK20A does not have dedicated video memory, and to accurately represent this
  * fact Nouveau will not create a RAM device for it. Therefore its instmem
- * implementation must be done directly on top of system memory, while providing
- * coherent read and write operations.
+ * implementation must be done directly on top of system memory, while
+ * preserving coherency for read and write operations.
  *
  * Instmem can be allocated through two means:
- * 1) If an IOMMU mapping has been probed, the IOMMU API is used to make memory
+ * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory
  *    pages contiguous to the GPU. This is the preferred way.
- * 2) If no IOMMU mapping is probed, the DMA API is used to allocate physically
+ * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically
  *    contiguous memory.
  *
- * In both cases CPU read and writes are performed using PRAMIN (i.e. using the
- * GPU path) to ensure these operations are coherent for the GPU. This allows us
- * to use more "relaxed" allocation parameters when using the DMA API, since we
- * never need a kernel mapping.
+ * In both cases CPU read and writes are performed by creating a write-combined
+ * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To
+ * be conservative we do this every time we acquire or release an instobj, but
+ * ideally L2 management should be handled at a higher level.
+ *
+ * To improve performance, CPU mappings are not removed upon instobj release.
+ * Instead they are placed into a LRU list to be recycled when the mapped space
+ * goes beyond a certain threshold. At the moment this limit is 1MB.
  */
+#include "priv.h"
 
-#include <subdev/fb.h>
+#include <core/memory.h>
 #include <core/mm.h>
-#include <core/device.h>
-
-#ifdef __KERNEL__
-#include <linux/dma-attrs.h>
-#include <linux/iommu.h>
-#include <nouveau_platform.h>
-#endif
+#include <core/tegra.h>
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
 
-#include "priv.h"
+struct gk20a_instobj {
+       struct nvkm_memory memory;
+       struct nvkm_mem mem;
+       struct gk20a_instmem *imem;
 
-struct gk20a_instobj_priv {
-       struct nvkm_instobj base;
-       /* Must be second member here - see nouveau_gpuobj_map_vm() */
-       struct nvkm_mem *mem;
-       /* Pointed by mem */
-       struct nvkm_mem _mem;
+       /* CPU mapping */
+       u32 *vaddr;
+       struct list_head vaddr_node;
 };
+#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
 
 /*
  * Used for objects allocated using the DMA API
  */
 struct gk20a_instobj_dma {
-       struct gk20a_instobj_priv base;
+       struct gk20a_instobj base;
 
-       void *cpuaddr;
+       u32 *cpuaddr;
        dma_addr_t handle;
        struct nvkm_mm_node r;
 };
+#define gk20a_instobj_dma(p) \
+       container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base)
 
 /*
  * Used for objects flattened using the IOMMU API
  */
 struct gk20a_instobj_iommu {
-       struct gk20a_instobj_priv base;
+       struct gk20a_instobj base;
 
-       /* array of base.mem->size pages */
+       /* will point to the higher half of pages */
+       dma_addr_t *dma_addrs;
+       /* array of base.mem->size pages (+ dma_addr_ts) */
        struct page *pages[];
 };
+#define gk20a_instobj_iommu(p) \
+       container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base)
 
-struct gk20a_instmem_priv {
+struct gk20a_instmem {
        struct nvkm_instmem base;
+
+       /* protects vaddr_* and gk20a_instobj::vaddr* */
        spinlock_t lock;
-       u64 addr;
+
+       /* CPU mappings LRU */
+       unsigned int vaddr_use;
+       unsigned int vaddr_max;
+       struct list_head vaddr_lru;
 
        /* Only used if IOMMU if present */
        struct mutex *mm_mutex;
        struct nvkm_mm *mm;
        struct iommu_domain *domain;
        unsigned long iommu_pgshift;
+       u16 iommu_bit;
 
        /* Only used by DMA API */
        struct dma_attrs attrs;
+
+       void __iomem * (*cpu_map)(struct nvkm_memory *);
 };
+#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
+
+static enum nvkm_memory_target
+gk20a_instobj_target(struct nvkm_memory *memory)
+{
+       return NVKM_MEM_TARGET_HOST;
+}
+
+static u64
+gk20a_instobj_addr(struct nvkm_memory *memory)
+{
+       return gk20a_instobj(memory)->mem.offset;
+}
+
+static u64
+gk20a_instobj_size(struct nvkm_memory *memory)
+{
+       return (u64)gk20a_instobj(memory)->mem.size << 12;
+}
+
+static void __iomem *
+gk20a_instobj_cpu_map_dma(struct nvkm_memory *memory)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+       struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
+       struct device *dev = node->base.imem->base.subdev.device->dev;
+       int npages = nvkm_memory_size(memory) >> 12;
+       struct page *pages[npages];
+       int i;
+
+       /* we shouldn't see a gk20a on anything but arm/arm64 anyways */
+       /* phys_to_page does not exist on all platforms... */
+       pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT);
+       for (i = 1; i < npages; i++)
+               pages[i] = pages[0] + i;
+
+       return vmap(pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+#else
+       BUG();
+       return NULL;
+#endif
+}
+
+static void __iomem *
+gk20a_instobj_cpu_map_iommu(struct nvkm_memory *memory)
+{
+       struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
+       int npages = nvkm_memory_size(memory) >> 12;
+
+       return vmap(node->pages, npages, VM_MAP,
+                   pgprot_writecombine(PAGE_KERNEL));
+}
 
 /*
- * Use PRAMIN to read/write data and avoid coherency issues.
- * PRAMIN uses the GPU path and ensures data will always be coherent.
- *
- * A dynamic mapping based solution would be desirable in the future, but
- * the issue remains of how to maintain coherency efficiently. On ARM it is
- * not easy (if possible at all?) to create uncached temporary mappings.
+ * Must be called while holding gk20a_instmem_lock
  */
+static void
+gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size)
+{
+       while (imem->vaddr_use + size > imem->vaddr_max) {
+               struct gk20a_instobj *obj;
+
+               /* no candidate that can be unmapped, abort... */
+               if (list_empty(&imem->vaddr_lru))
+                       break;
+
+               obj = list_first_entry(&imem->vaddr_lru, struct gk20a_instobj,
+                                      vaddr_node);
+               list_del(&obj->vaddr_node);
+               vunmap(obj->vaddr);
+               obj->vaddr = NULL;
+               imem->vaddr_use -= nvkm_memory_size(&obj->memory);
+               nvkm_debug(&imem->base.subdev, "(GC) vaddr used: %x/%x\n",
+                          imem->vaddr_use, imem->vaddr_max);
 
-static u32
-gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
+       }
+}
+
+static void __iomem *
+gk20a_instobj_acquire(struct nvkm_memory *memory)
 {
-       struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
-       struct gk20a_instobj_priv *node = (void *)object;
+       struct gk20a_instobj *node = gk20a_instobj(memory);
+       struct gk20a_instmem *imem = node->imem;
+       struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
+       const u64 size = nvkm_memory_size(memory);
        unsigned long flags;
-       u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-       u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
-       u32 data;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       if (unlikely(priv->addr != base)) {
-               nv_wr32(priv, 0x001700, base >> 16);
-               priv->addr = base;
+
+       nvkm_ltc_flush(ltc);
+
+       spin_lock_irqsave(&imem->lock, flags);
+
+       if (node->vaddr) {
+               /* remove us from the LRU list since we cannot be unmapped */
+               list_del(&node->vaddr_node);
+
+               goto out;
+       }
+
+       /* try to free some address space if we reached the limit */
+       gk20a_instmem_vaddr_gc(imem, size);
+
+       node->vaddr = imem->cpu_map(memory);
+
+       if (!node->vaddr) {
+               nvkm_error(&imem->base.subdev, "cannot map instobj - "
+                          "this is not going to end well...\n");
+               goto out;
        }
-       data = nv_rd32(priv, 0x700000 + addr);
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return data;
+
+       imem->vaddr_use += size;
+       nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
+                  imem->vaddr_use, imem->vaddr_max);
+
+out:
+       spin_unlock_irqrestore(&imem->lock, flags);
+
+       return node->vaddr;
 }
 
 static void
-gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
+gk20a_instobj_release(struct nvkm_memory *memory)
 {
-       struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
-       struct gk20a_instobj_priv *node = (void *)object;
+       struct gk20a_instobj *node = gk20a_instobj(memory);
+       struct gk20a_instmem *imem = node->imem;
+       struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
        unsigned long flags;
-       u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-       u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
 
-       spin_lock_irqsave(&priv->lock, flags);
-       if (unlikely(priv->addr != base)) {
-               nv_wr32(priv, 0x001700, base >> 16);
-               priv->addr = base;
-       }
-       nv_wr32(priv, 0x700000 + addr, data);
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_lock_irqsave(&imem->lock, flags);
+
+       /* add ourselves to the LRU list so our CPU mapping can be freed */
+       list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
+
+       spin_unlock_irqrestore(&imem->lock, flags);
+
+       wmb();
+       nvkm_ltc_invalidate(ltc);
+}
+
+static u32
+gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+       struct gk20a_instobj *node = gk20a_instobj(memory);
+
+       return node->vaddr[offset / 4];
 }
 
 static void
-gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node)
+gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-       struct gk20a_instobj_dma *node = (void *)_node;
-       struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
-       struct device *dev = nv_device_base(nv_device(priv));
+       struct gk20a_instobj *node = gk20a_instobj(memory);
 
-       if (unlikely(!node->cpuaddr))
-               return;
+       node->vaddr[offset / 4] = data;
+}
 
-       dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr,
-                      node->handle, &priv->attrs);
+static void
+gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+{
+       struct gk20a_instobj *node = gk20a_instobj(memory);
+
+       nvkm_vm_map_at(vma, offset, &node->mem);
 }
 
+/*
+ * Clear the CPU mapping of an instobj if it exists
+ */
 static void
-gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node)
+gk20a_instobj_dtor(struct gk20a_instobj *node)
+{
+       struct gk20a_instmem *imem = node->imem;
+       struct gk20a_instobj *obj;
+       unsigned long flags;
+
+       spin_lock_irqsave(&imem->lock, flags);
+
+       if (!node->vaddr)
+               goto out;
+
+       list_for_each_entry(obj, &imem->vaddr_lru, vaddr_node) {
+               if (obj == node) {
+                       list_del(&obj->vaddr_node);
+                       break;
+               }
+       }
+       vunmap(node->vaddr);
+       node->vaddr = NULL;
+       imem->vaddr_use -= nvkm_memory_size(&node->memory);
+       nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
+                  imem->vaddr_use, imem->vaddr_max);
+
+out:
+       spin_unlock_irqrestore(&imem->lock, flags);
+}
+
+static void *
+gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
+{
+       struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
+       struct gk20a_instmem *imem = node->base.imem;
+       struct device *dev = imem->base.subdev.device->dev;
+
+       gk20a_instobj_dtor(&node->base);
+
+       if (unlikely(!node->cpuaddr))
+               goto out;
+
+       dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->cpuaddr,
+                      node->handle, &imem->attrs);
+
+out:
+       return node;
+}
+
+static void *
+gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
 {
-       struct gk20a_instobj_iommu *node = (void *)_node;
-       struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+       struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
+       struct gk20a_instmem *imem = node->base.imem;
+       struct device *dev = imem->base.subdev.device->dev;
        struct nvkm_mm_node *r;
        int i;
 
-       if (unlikely(list_empty(&_node->mem->regions)))
-               return;
+       gk20a_instobj_dtor(&node->base);
 
-       r = list_first_entry(&_node->mem->regions, struct nvkm_mm_node,
+       if (unlikely(list_empty(&node->base.mem.regions)))
+               goto out;
+
+       r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
                             rl_entry);
 
-       /* clear bit 34 to unmap pages */
-       r->offset &= ~BIT(34 - priv->iommu_pgshift);
+       /* clear IOMMU bit to unmap pages */
+       r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
 
        /* Unmap pages from GPU address space and free them */
-       for (i = 0; i < _node->mem->size; i++) {
-               iommu_unmap(priv->domain,
-                           (r->offset + i) << priv->iommu_pgshift, PAGE_SIZE);
+       for (i = 0; i < node->base.mem.size; i++) {
+               iommu_unmap(imem->domain,
+                           (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
+               dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
+                              DMA_BIDIRECTIONAL);
                __free_page(node->pages[i]);
        }
 
        /* Release area from GPU address space */
-       mutex_lock(priv->mm_mutex);
-       nvkm_mm_free(priv->mm, &r);
-       mutex_unlock(priv->mm_mutex);
-}
+       mutex_lock(imem->mm_mutex);
+       nvkm_mm_free(imem->mm, &r);
+       mutex_unlock(imem->mm_mutex);
 
-static void
-gk20a_instobj_dtor(struct nvkm_object *object)
-{
-       struct gk20a_instobj_priv *node = (void *)object;
-       struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+out:
+       return node;
+}
 
-       if (priv->domain)
-               gk20a_instobj_dtor_iommu(node);
-       else
-               gk20a_instobj_dtor_dma(node);
+static const struct nvkm_memory_func
+gk20a_instobj_func_dma = {
+       .dtor = gk20a_instobj_dtor_dma,
+       .target = gk20a_instobj_target,
+       .addr = gk20a_instobj_addr,
+       .size = gk20a_instobj_size,
+       .acquire = gk20a_instobj_acquire,
+       .release = gk20a_instobj_release,
+       .rd32 = gk20a_instobj_rd32,
+       .wr32 = gk20a_instobj_wr32,
+       .map = gk20a_instobj_map,
+};
 
-       nvkm_instobj_destroy(&node->base);
-}
+static const struct nvkm_memory_func
+gk20a_instobj_func_iommu = {
+       .dtor = gk20a_instobj_dtor_iommu,
+       .target = gk20a_instobj_target,
+       .addr = gk20a_instobj_addr,
+       .size = gk20a_instobj_size,
+       .acquire = gk20a_instobj_acquire,
+       .release = gk20a_instobj_release,
+       .rd32 = gk20a_instobj_rd32,
+       .wr32 = gk20a_instobj_wr32,
+       .map = gk20a_instobj_map,
+};
 
 static int
-gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
-                      struct nvkm_oclass *oclass, u32 npages, u32 align,
-                      struct gk20a_instobj_priv **_node)
+gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
+                      struct gk20a_instobj **_node)
 {
        struct gk20a_instobj_dma *node;
-       struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
-       struct device *dev = nv_device_base(nv_device(parent));
-       int ret;
+       struct nvkm_subdev *subdev = &imem->base.subdev;
+       struct device *dev = subdev->device->dev;
 
-       ret = nvkm_instobj_create_(parent, engine, oclass, sizeof(*node),
-                                  (void **)&node);
+       if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
+               return -ENOMEM;
        *_node = &node->base;
-       if (ret)
-               return ret;
+
+       nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
 
        node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
                                        &node->handle, GFP_KERNEL,
-                                       &priv->attrs);
+                                       &imem->attrs);
        if (!node->cpuaddr) {
-               nv_error(priv, "cannot allocate DMA memory\n");
+               nvkm_error(subdev, "cannot allocate DMA memory\n");
                return -ENOMEM;
        }
 
        /* alignment check */
        if (unlikely(node->handle & (align - 1)))
-               nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n",
-                       &node->handle, align);
+               nvkm_warn(subdev,
+                         "memory not aligned as requested: %pad (0x%x)\n",
+                         &node->handle, align);
 
        /* present memory for being mapped using small pages */
        node->r.type = 12;
        node->r.offset = node->handle >> 12;
        node->r.length = (npages << PAGE_SHIFT) >> 12;
 
-       node->base._mem.offset = node->handle;
+       node->base.mem.offset = node->handle;
 
-       INIT_LIST_HEAD(&node->base._mem.regions);
-       list_add_tail(&node->r.rl_entry, &node->base._mem.regions);
+       INIT_LIST_HEAD(&node->base.mem.regions);
+       list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
 
        return 0;
 }
 
 static int
-gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
-                        struct nvkm_oclass *oclass, u32 npages, u32 align,
-                        struct gk20a_instobj_priv **_node)
+gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
+                        struct gk20a_instobj **_node)
 {
        struct gk20a_instobj_iommu *node;
-       struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+       struct nvkm_subdev *subdev = &imem->base.subdev;
+       struct device *dev = subdev->device->dev;
        struct nvkm_mm_node *r;
        int ret;
        int i;
 
-       ret = nvkm_instobj_create_(parent, engine, oclass,
-                               sizeof(*node) + sizeof(node->pages[0]) * npages,
-                               (void **)&node);
+       /*
+        * despite their variable size, instmem allocations are small enough
+        * (< 1 page) to be handled by kzalloc
+        */
+       if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) +
+                            sizeof(*node->dma_addrs)) * npages), GFP_KERNEL)))
+               return -ENOMEM;
        *_node = &node->base;
-       if (ret)
-               return ret;
+       node->dma_addrs = (void *)(node->pages + npages);
+
+       nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
 
        /* Allocate backing memory */
        for (i = 0; i < npages; i++) {
                struct page *p = alloc_page(GFP_KERNEL);
+               dma_addr_t dma_adr;
 
                if (p == NULL) {
                        ret = -ENOMEM;
                        goto free_pages;
                }
                node->pages[i] = p;
+               dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(dev, dma_adr)) {
+                       nvkm_error(subdev, "DMA mapping error!\n");
+                       ret = -ENOMEM;
+                       goto free_pages;
+               }
+               node->dma_addrs[i] = dma_adr;
        }
 
-       mutex_lock(priv->mm_mutex);
+       mutex_lock(imem->mm_mutex);
        /* Reserve area from GPU address space */
-       ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages,
-                          align >> priv->iommu_pgshift, &r);
-       mutex_unlock(priv->mm_mutex);
+       ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
+                          align >> imem->iommu_pgshift, &r);
+       mutex_unlock(imem->mm_mutex);
        if (ret) {
-               nv_error(priv, "virtual space is full!\n");
+               nvkm_error(subdev, "IOMMU space is full!\n");
                goto free_pages;
        }
 
        /* Map into GPU address space */
        for (i = 0; i < npages; i++) {
-               struct page *p = node->pages[i];
-               u32 offset = (r->offset + i) << priv->iommu_pgshift;
+               u32 offset = (r->offset + i) << imem->iommu_pgshift;
 
-               ret = iommu_map(priv->domain, offset, page_to_phys(p),
+               ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
                                PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
                if (ret < 0) {
-                       nv_error(priv, "IOMMU mapping failure: %d\n", ret);
+                       nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
 
                        while (i-- > 0) {
                                offset -= PAGE_SIZE;
-                               iommu_unmap(priv->domain, offset, PAGE_SIZE);
+                               iommu_unmap(imem->domain, offset, PAGE_SIZE);
                        }
                        goto release_area;
                }
        }
 
-       /* Bit 34 tells that an address is to be resolved through the IOMMU */
-       r->offset |= BIT(34 - priv->iommu_pgshift);
+       /* IOMMU bit tells that an address is to be resolved through the IOMMU */
+       r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
 
-       node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift;
+       node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
 
-       INIT_LIST_HEAD(&node->base._mem.regions);
-       list_add_tail(&r->rl_entry, &node->base._mem.regions);
+       INIT_LIST_HEAD(&node->base.mem.regions);
+       list_add_tail(&r->rl_entry, &node->base.mem.regions);
 
        return 0;
 
 release_area:
-       mutex_lock(priv->mm_mutex);
-       nvkm_mm_free(priv->mm, &r);
-       mutex_unlock(priv->mm_mutex);
+       mutex_lock(imem->mm_mutex);
+       nvkm_mm_free(imem->mm, &r);
+       mutex_unlock(imem->mm_mutex);
 
 free_pages:
-       for (i = 0; i < npages && node->pages[i] != NULL; i++)
+       for (i = 0; i < npages && node->pages[i] != NULL; i++) {
+               dma_addr_t dma_addr = node->dma_addrs[i];
+               if (dma_addr)
+                       dma_unmap_page(dev, dma_addr, PAGE_SIZE,
+                                      DMA_BIDIRECTIONAL);
                __free_page(node->pages[i]);
+       }
 
        return ret;
 }
 
 static int
-gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-                  struct nvkm_oclass *oclass, void *data, u32 _size,
-                  struct nvkm_object **pobject)
+gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+                 struct nvkm_memory **pmemory)
 {
-       struct nvkm_instobj_args *args = data;
-       struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
-       struct gk20a_instobj_priv *node;
-       u32 size, align;
+       struct gk20a_instmem *imem = gk20a_instmem(base);
+       struct nvkm_subdev *subdev = &imem->base.subdev;
+       struct gk20a_instobj *node = NULL;
        int ret;
 
-       nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__,
-                priv->domain ? "IOMMU" : "DMA", args->size, args->align);
+       nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
+                  imem->domain ? "IOMMU" : "DMA", size, align);
 
        /* Round size and align to page bounds */
-       size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
-       align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE);
+       size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
+       align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
 
-       if (priv->domain)
-               ret = gk20a_instobj_ctor_iommu(parent, engine, oclass,
-                                             size >> PAGE_SHIFT, align, &node);
+       if (imem->domain)
+               ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
+                                              align, &node);
        else
-               ret = gk20a_instobj_ctor_dma(parent, engine, oclass,
-                                            size >> PAGE_SHIFT, align, &node);
-       *pobject = nv_object(node);
+               ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
+                                            align, &node);
+       *pmemory = node ? &node->memory : NULL;
        if (ret)
                return ret;
 
-       node->mem = &node->_mem;
+       node->imem = imem;
 
        /* present memory for being mapped using small pages */
-       node->mem->size = size >> 12;
-       node->mem->memtype = 0;
-       node->mem->page_shift = 12;
-
-       node->base.addr = node->mem->offset;
-       node->base.size = size;
+       node->mem.size = size >> 12;
+       node->mem.memtype = 0;
+       node->mem.page_shift = 12;
 
-       nv_debug(parent, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
-                size, align, node->mem->offset);
+       nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
+                  size, align, node->mem.offset);
 
        return 0;
 }
 
-static struct nvkm_instobj_impl
-gk20a_instobj_oclass = {
-       .base.ofuncs = &(struct nvkm_ofuncs) {
-               .ctor = gk20a_instobj_ctor,
-               .dtor = gk20a_instobj_dtor,
-               .init = _nvkm_instobj_init,
-               .fini = _nvkm_instobj_fini,
-               .rd32 = gk20a_instobj_rd32,
-               .wr32 = gk20a_instobj_wr32,
-       },
-};
+static void *
+gk20a_instmem_dtor(struct nvkm_instmem *base)
+{
+       struct gk20a_instmem *imem = gk20a_instmem(base);
 
+       /* perform some sanity checks... */
+       if (!list_empty(&imem->vaddr_lru))
+               nvkm_warn(&base->subdev, "instobj LRU not empty!\n");
 
+       if (imem->vaddr_use != 0)
+               nvkm_warn(&base->subdev, "instobj vmap area not empty! "
+                         "0x%x bytes still mapped\n", imem->vaddr_use);
 
-static int
-gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
-{
-       struct gk20a_instmem_priv *priv = (void *)object;
-       priv->addr = ~0ULL;
-       return nvkm_instmem_fini(&priv->base, suspend);
+       return imem;
 }
 
-static int
-gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-                  struct nvkm_oclass *oclass, void *data, u32 size,
-                  struct nvkm_object **pobject)
-{
-       struct gk20a_instmem_priv *priv;
-       struct nouveau_platform_device *plat;
-       int ret;
-
-       ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       spin_lock_init(&priv->lock);
+static const struct nvkm_instmem_func
+gk20a_instmem = {
+       .dtor = gk20a_instmem_dtor,
+       .memory_new = gk20a_instobj_new,
+       .persistent = true,
+       .zero = false,
+};
 
-       plat = nv_device_to_platform(nv_device(parent));
-       if (plat->gpu->iommu.domain) {
-               priv->domain = plat->gpu->iommu.domain;
-               priv->mm = plat->gpu->iommu.mm;
-               priv->iommu_pgshift = plat->gpu->iommu.pgshift;
-               priv->mm_mutex = &plat->gpu->iommu.mutex;
+int
+gk20a_instmem_new(struct nvkm_device *device, int index,
+                 struct nvkm_instmem **pimem)
+{
+       struct nvkm_device_tegra *tdev = device->func->tegra(device);
+       struct gk20a_instmem *imem;
 
-               nv_info(priv, "using IOMMU\n");
+       if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+               return -ENOMEM;
+       nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
+       spin_lock_init(&imem->lock);
+       *pimem = &imem->base;
+
+       /* do not allow more than 1MB of CPU-mapped instmem */
+       imem->vaddr_use = 0;
+       imem->vaddr_max = 0x100000;
+       INIT_LIST_HEAD(&imem->vaddr_lru);
+
+       if (tdev->iommu.domain) {
+               imem->mm_mutex = &tdev->iommu.mutex;
+               imem->mm = &tdev->iommu.mm;
+               imem->domain = tdev->iommu.domain;
+               imem->iommu_pgshift = tdev->iommu.pgshift;
+               imem->cpu_map = gk20a_instobj_cpu_map_iommu;
+               imem->iommu_bit = tdev->func->iommu_bit;
+
+               nvkm_info(&imem->base.subdev, "using IOMMU\n");
        } else {
-               init_dma_attrs(&priv->attrs);
-               /*
-                * We will access instmem through PRAMIN and thus do not need a
-                * consistent CPU pointer or kernel mapping
-                */
-               dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs);
-               dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs);
-               dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs);
-               dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs);
-
-               nv_info(priv, "using DMA API\n");
+               init_dma_attrs(&imem->attrs);
+               /* We will access the memory through our own mapping */
+               dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
+               dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
+               dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
+               dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
+               imem->cpu_map = gk20a_instobj_cpu_map_dma;
+
+               nvkm_info(&imem->base.subdev, "using DMA API\n");
        }
 
        return 0;
 }
-
-struct nvkm_oclass *
-gk20a_instmem_oclass = &(struct nvkm_instmem_impl) {
-       .base.handle = NV_SUBDEV(INSTMEM, 0xea),
-       .base.ofuncs = &(struct nvkm_ofuncs) {
-               .ctor = gk20a_instmem_ctor,
-               .dtor = _nvkm_instmem_dtor,
-               .init = _nvkm_instmem_init,
-               .fini = gk20a_instmem_fini,
-       },
-       .instobj = &gk20a_instobj_oclass.base,
-}.base;