X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?p=kvmfornfv.git;a=blobdiff_plain;f=kernel%2Fdrivers%2Fgpu%2Fdrm%2Fnouveau%2Fnvkm%2Fsubdev%2Fmmu%2Fnv41.c;h=c6a26f907009cc746e8276c75272b1e74f6a9fb7;hp=61ee3ab11660a6ab72e3b0aeed1ac7b92cd4a0e3;hb=e09b41010ba33a20a87472ee821fa407a5b8da36;hpb=f93b97fd65072de626c074dbe099a1fff05ce060 diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c index 61ee3ab11..c6a26f907 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c @@ -23,7 +23,6 @@ */ #include "nv04.h" -#include #include #include #include @@ -36,45 +35,50 @@ ******************************************************************************/ static void -nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, +nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt, struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) { pte = pte * 4; + nvkm_kmap(pgt); while (cnt) { u32 page = PAGE_SIZE / NV41_GART_PAGE; u64 phys = (u64)*list++; while (cnt && page--) { - nv_wo32(pgt, pte, (phys >> 7) | 1); + nvkm_wo32(pgt, pte, (phys >> 7) | 1); phys += NV41_GART_PAGE; pte += 4; cnt -= 1; } } + nvkm_done(pgt); } static void -nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) +nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt) { pte = pte * 4; + nvkm_kmap(pgt); while (cnt--) { - nv_wo32(pgt, pte, 0x00000000); + nvkm_wo32(pgt, pte, 0x00000000); pte += 4; } + nvkm_done(pgt); } static void nv41_vm_flush(struct nvkm_vm *vm) { - struct nv04_mmu_priv *priv = (void *)vm->mmu; - - mutex_lock(&nv_subdev(priv)->mutex); - nv_wr32(priv, 0x100810, 0x00000022); - if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) { - nv_warn(priv, "flush timeout, 0x%08x\n", - nv_rd32(priv, 0x100810)); - } - nv_wr32(priv, 0x100810, 0x00000000); - mutex_unlock(&nv_subdev(priv)->mutex); + struct nv04_mmu *mmu = nv04_mmu(vm->mmu); + struct nvkm_device *device = mmu->base.subdev.device; + + mutex_lock(&mmu->base.subdev.mutex); + nvkm_wr32(device, 0x100810, 0x00000022); + nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x100810) & 0x00000020) + break; + ); + nvkm_wr32(device, 0x100810, 0x00000000); + mutex_unlock(&mmu->base.subdev.mutex); } /******************************************************************************* @@ -82,76 +86,56 @@ nv41_vm_flush(struct nvkm_vm *vm) ******************************************************************************/ static int -nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, void *data, u32 size, - struct nvkm_object **pobject) +nv41_mmu_oneinit(struct nvkm_mmu *base) { - struct nvkm_device *device = nv_device(parent); - struct nv04_mmu_priv *priv; + struct nv04_mmu *mmu = nv04_mmu(base); + struct nvkm_device *device = mmu->base.subdev.device; int ret; - if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || - !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) { - return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass, - data, size, pobject); - } - - ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART", - "pciegart", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.create = nv04_vm_create; - priv->base.limit = NV41_GART_SIZE; - priv->base.dma_bits = 39; - priv->base.pgt_bits = 32 - 12; - priv->base.spg_shift = 12; - priv->base.lpg_shift = 12; - priv->base.map_sg = nv41_vm_map_sg; - priv->base.unmap = nv41_vm_unmap; - priv->base.flush = nv41_vm_flush; - - ret = nvkm_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096, - &priv->vm); - if (ret) - return ret; - - ret = nvkm_gpuobj_new(nv_object(priv), NULL, - (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, - NVOBJ_FLAG_ZERO_ALLOC, - &priv->vm->pgt[0].obj[0]); - priv->vm->pgt[0].refcount[0] = 1; + ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL, + &mmu->vm); if (ret) return ret; - return 0; + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, + (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true, + &mmu->vm->pgt[0].mem[0]); + mmu->vm->pgt[0].refcount[0] = 1; + return ret; } -static int -nv41_mmu_init(struct nvkm_object *object) +static void +nv41_mmu_init(struct nvkm_mmu *base) { - struct nv04_mmu_priv *priv = (void *)object; - struct nvkm_gpuobj *dma = priv->vm->pgt[0].obj[0]; - int ret; - - ret = nvkm_mmu_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x100800, dma->addr | 0x00000002); - nv_mask(priv, 0x10008c, 0x00000100, 0x00000100); - nv_wr32(priv, 0x100820, 0x00000000); - return 0; + struct nv04_mmu *mmu = nv04_mmu(base); + struct nvkm_device *device = mmu->base.subdev.device; + struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0]; + nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma)); + nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100); + nvkm_wr32(device, 0x100820, 0x00000000); } -struct nvkm_oclass -nv41_mmu_oclass = { - .handle = NV_SUBDEV(MMU, 0x41), - .ofuncs = &(struct nvkm_ofuncs) { - .ctor = nv41_mmu_ctor, - .dtor = nv04_mmu_dtor, - .init = nv41_mmu_init, - .fini = _nvkm_mmu_fini, - }, +static const struct nvkm_mmu_func +nv41_mmu = { + .dtor = nv04_mmu_dtor, + .oneinit = nv41_mmu_oneinit, + .init = nv41_mmu_init, + .limit = NV41_GART_SIZE, + .dma_bits = 39, + .pgt_bits = 32 - 12, + .spg_shift = 12, + .lpg_shift = 12, + .map_sg = nv41_vm_map_sg, + .unmap = nv41_vm_unmap, + .flush = nv41_vm_flush, }; + +int +nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +{ + if (device->type == NVKM_DEVICE_AGP || + !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) + return nv04_mmu_new(device, index, pmmu); + + return nv04_mmu_new_(&nv41_mmu, device, index, pmmu); +}