X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?p=kvmfornfv.git;a=blobdiff_plain;f=kernel%2Fdrivers%2Fgpu%2Fdrm%2Fnouveau%2Fnouveau_chan.c;h=1860f389f21f157fa7b1795e1a5b7a5ccab64098;hp=0589babc506eb3ca186b31e8058dc7e86fc04f9f;hb=e09b41010ba33a20a87472ee821fa407a5b8da36;hpb=f93b97fd65072de626c074dbe099a1fff05ce060 diff --git a/kernel/drivers/gpu/drm/nouveau/nouveau_chan.c b/kernel/drivers/gpu/drm/nouveau/nouveau_chan.c index 0589babc5..1860f389f 100644 --- a/kernel/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/kernel/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -24,6 +24,7 @@ #include #include +#include /*XXX*/ #include @@ -42,20 +43,24 @@ module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); int nouveau_channel_idle(struct nouveau_channel *chan) { - struct nouveau_cli *cli = (void *)nvif_client(chan->object); - struct nouveau_fence *fence = NULL; - int ret; + if (likely(chan && chan->fence)) { + struct nouveau_cli *cli = (void *)chan->user.client; + struct nouveau_fence *fence = NULL; + int ret; + + ret = nouveau_fence_new(chan, false, &fence); + if (!ret) { + ret = nouveau_fence_wait(fence, false, false); + nouveau_fence_unref(&fence); + } - ret = nouveau_fence_new(chan, false, &fence); - if (!ret) { - ret = nouveau_fence_wait(fence, false, false); - nouveau_fence_unref(&fence); + if (ret) { + NV_PRINTK(err, cli, "failed to idle channel %d [%s]\n", + chan->chid, nvxx_client(&cli->base)->name); + return ret; + } } - - if (ret) - NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n", - chan->object->handle, nvxx_client(&cli->base)->name); - return ret; + return 0; } void @@ -63,21 +68,18 @@ nouveau_channel_del(struct nouveau_channel **pchan) { struct nouveau_channel *chan = *pchan; if (chan) { - if (chan->fence) { - nouveau_channel_idle(chan); + if (chan->fence) nouveau_fence(chan->drm)->context_del(chan); - } nvif_object_fini(&chan->nvsw); nvif_object_fini(&chan->gart); nvif_object_fini(&chan->vram); - nvif_object_ref(NULL, &chan->object); + nvif_object_fini(&chan->user); nvif_object_fini(&chan->push.ctxdma); nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); nouveau_bo_unmap(chan->push.buffer); if (chan->push.buffer && chan->push.buffer->pin_refcnt) nouveau_bo_unpin(chan->push.buffer); nouveau_bo_ref(NULL, &chan->push.buffer); - nvif_device_ref(NULL, &chan->device); kfree(chan); } *pchan = NULL; @@ -85,9 +87,9 @@ nouveau_channel_del(struct nouveau_channel **pchan) static int nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, - u32 handle, u32 size, struct nouveau_channel **pchan) + u32 size, struct nouveau_channel **pchan) { - struct nouveau_cli *cli = (void *)nvif_client(&device->base); + struct nouveau_cli *cli = (void *)device->object.client; struct nvkm_mmu *mmu = nvxx_mmu(device); struct nv_dma_v0 args = {}; struct nouveau_channel *chan; @@ -98,7 +100,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, if (!chan) return -ENOMEM; - nvif_device_ref(device, &chan->device); + chan->device = device; chan->drm = drm; /* allocate memory for dma push buffer */ @@ -146,7 +148,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, */ args.target = NV_DMA_V0_TARGET_PCI; args.access = NV_DMA_V0_ACCESS_RDWR; - args.start = nv_device_resource_start(nvxx_device(device), 1); + args.start = nvxx_device(device)->func-> + resource_addr(nvxx_device(device), 1); args.limit = args.start + device->info.ram_user - 1; } else { args.target = NV_DMA_V0_TARGET_VRAM; @@ -155,7 +158,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, args.limit = device->info.ram_user - 1; } } else { - if (chan->drm->agp.stat == ENABLED) { + if (chan->drm->agp.bridge) { args.target = NV_DMA_V0_TARGET_AGP; args.access = NV_DMA_V0_ACCESS_RDWR; args.start = chan->drm->agp.base; @@ -169,8 +172,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, } } - ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH | - (handle & 0xffff), NV_DMA_FROM_MEMORY, + ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY, &args, sizeof(args), &chan->push.ctxdma); if (ret) { nouveau_channel_del(pchan); @@ -182,7 +184,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, static int nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, - u32 handle, u32 engine, struct nouveau_channel **pchan) + u32 engine, struct nouveau_channel **pchan) { static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A, KEPLER_CHANNEL_GPFIFO_A, @@ -193,14 +195,15 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, const u16 *oclass = oclasses; union { struct nv50_channel_gpfifo_v0 nv50; + struct fermi_channel_gpfifo_v0 fermi; struct kepler_channel_gpfifo_a_v0 kepler; - } args, *retn; + } args; struct nouveau_channel *chan; u32 size; int ret; /* allocate dma push buffer */ - ret = nouveau_channel_prep(drm, device, handle, 0x12000, &chan); + ret = nouveau_channel_prep(drm, device, 0x12000, &chan); *pchan = chan; if (ret) return ret; @@ -210,26 +213,36 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) { args.kepler.version = 0; args.kepler.engine = engine; - args.kepler.pushbuf = chan->push.ctxdma.handle; args.kepler.ilength = 0x02000; args.kepler.ioffset = 0x10000 + chan->push.vma.offset; + args.kepler.vm = 0; size = sizeof(args.kepler); + } else + if (oclass[0] >= FERMI_CHANNEL_GPFIFO) { + args.fermi.version = 0; + args.fermi.ilength = 0x02000; + args.fermi.ioffset = 0x10000 + chan->push.vma.offset; + args.fermi.vm = 0; + size = sizeof(args.fermi); } else { args.nv50.version = 0; - args.nv50.pushbuf = chan->push.ctxdma.handle; args.nv50.ilength = 0x02000; args.nv50.ioffset = 0x10000 + chan->push.vma.offset; + args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma); + args.nv50.vm = 0; size = sizeof(args.nv50); } - ret = nvif_object_new(nvif_object(device), handle, *oclass++, - &args, size, &chan->object); + ret = nvif_object_init(&device->object, 0, *oclass++, + &args, size, &chan->user); if (ret == 0) { - retn = chan->object->data; - if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A) - chan->chid = retn->kepler.chid; + if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A) + chan->chid = args.kepler.chid; + else + if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) + chan->chid = args.fermi.chid; else - chan->chid = retn->nv50.chid; + chan->chid = args.nv50.chid; return ret; } } while (*oclass); @@ -240,7 +253,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, static int nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device, - u32 handle, struct nouveau_channel **pchan) + struct nouveau_channel **pchan) { static const u16 oclasses[] = { NV40_CHANNEL_DMA, NV17_CHANNEL_DMA, @@ -248,27 +261,26 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device, NV03_CHANNEL_DMA, 0 }; const u16 *oclass = oclasses; - struct nv03_channel_dma_v0 args, *retn; + struct nv03_channel_dma_v0 args; struct nouveau_channel *chan; int ret; /* allocate dma push buffer */ - ret = nouveau_channel_prep(drm, device, handle, 0x10000, &chan); + ret = nouveau_channel_prep(drm, device, 0x10000, &chan); *pchan = chan; if (ret) return ret; /* create channel object */ args.version = 0; - args.pushbuf = chan->push.ctxdma.handle; + args.pushbuf = nvif_handle(&chan->push.ctxdma); args.offset = chan->push.vma.offset; do { - ret = nvif_object_new(nvif_object(device), handle, *oclass++, - &args, sizeof(args), &chan->object); + ret = nvif_object_init(&device->object, 0, *oclass++, + &args, sizeof(args), &chan->user); if (ret == 0) { - retn = chan->object->data; - chan->chid = retn->chid; + chan->chid = args.chid; return ret; } } while (ret && *oclass); @@ -281,13 +293,12 @@ static int nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) { struct nvif_device *device = chan->device; - struct nouveau_cli *cli = (void *)nvif_client(&device->base); + struct nouveau_cli *cli = (void *)chan->user.client; struct nvkm_mmu *mmu = nvxx_mmu(device); - struct nvkm_sw_chan *swch; struct nv_dma_v0 args = {}; int ret, i; - nvif_object_map(chan->object); + nvif_object_map(&chan->user); /* allocate dma objects to cover all allowed vram, and gart */ if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { @@ -303,9 +314,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) args.limit = device->info.ram_user - 1; } - ret = nvif_object_init(chan->object, NULL, vram, - NV_DMA_IN_MEMORY, &args, - sizeof(args), &chan->vram); + ret = nvif_object_init(&chan->user, vram, NV_DMA_IN_MEMORY, + &args, sizeof(args), &chan->vram); if (ret) return ret; @@ -315,7 +325,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) args.start = 0; args.limit = cli->vm->mmu->limit - 1; } else - if (chan->drm->agp.stat == ENABLED) { + if (chan->drm->agp.bridge) { args.target = NV_DMA_V0_TARGET_AGP; args.access = NV_DMA_V0_ACCESS_RDWR; args.start = chan->drm->agp.base; @@ -328,15 +338,14 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) args.limit = mmu->limit - 1; } - ret = nvif_object_init(chan->object, NULL, gart, - NV_DMA_IN_MEMORY, &args, - sizeof(args), &chan->gart); + ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY, + &args, sizeof(args), &chan->gart); if (ret) return ret; } /* initialise dma tracking parameters */ - switch (chan->object->oclass & 0x00ff) { + switch (chan->user.oclass & 0x00ff) { case 0x006b: case 0x006e: chan->user_put = 0x40; @@ -368,15 +377,12 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) /* allocate software object class (used for fences on <= nv05) */ if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) { - ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e, + ret = nvif_object_init(&chan->user, 0x006e, + NVIF_IOCTL_NEW_V0_SW_NV04, NULL, 0, &chan->nvsw); if (ret) return ret; - swch = (void *)nvxx_object(&chan->nvsw)->parent; - swch->flip = nouveau_flip_complete; - swch->flip_data = chan; - ret = RING_SPACE(chan, 2); if (ret) return ret; @@ -392,10 +398,9 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) int nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, - u32 handle, u32 arg0, u32 arg1, - struct nouveau_channel **pchan) + u32 arg0, u32 arg1, struct nouveau_channel **pchan) { - struct nouveau_cli *cli = (void *)nvif_client(&device->base); + struct nouveau_cli *cli = (void *)device->object.client; bool super; int ret; @@ -403,19 +408,19 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, super = cli->base.super; cli->base.super = true; - ret = nouveau_channel_ind(drm, device, handle, arg0, pchan); + ret = nouveau_channel_ind(drm, device, arg0, pchan); if (ret) { - NV_PRINTK(debug, cli, "ib channel create, %d\n", ret); - ret = nouveau_channel_dma(drm, device, handle, pchan); + NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret); + ret = nouveau_channel_dma(drm, device, pchan); if (ret) { - NV_PRINTK(debug, cli, "dma channel create, %d\n", ret); + NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret); goto done; } } ret = nouveau_channel_init(*pchan, arg0, arg1); if (ret) { - NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret); + NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret); nouveau_channel_del(pchan); }