X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=kernel%2Fdrivers%2Fgpu%2Fdrm%2Fnouveau%2Fnvkm%2Fsubdev%2Finstmem%2Fnv40.c;h=c0543875e4904def189f51dd67f3aa13f6aa79ba;hb=e09b41010ba33a20a87472ee821fa407a5b8da36;hp=b42b8588fc0e8ba00522740a4e215839c27ee401;hpb=9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00;p=kvmfornfv.git diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c index b42b8588f..c0543875e 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c @@ -21,116 +21,239 @@ * * Authors: Ben Skeggs */ -#include "nv04.h" +#define nv40_instmem(p) container_of((p), struct nv40_instmem, base) +#include "priv.h" +#include #include #include +struct nv40_instmem { + struct nvkm_instmem base; + struct nvkm_mm heap; + void __iomem *iomem; +}; + /****************************************************************************** - * instmem subdev implementation + * instmem object implementation *****************************************************************************/ +#define nv40_instobj(p) container_of((p), struct nv40_instobj, memory) + +struct nv40_instobj { + struct nvkm_memory memory; + struct nv40_instmem *imem; + struct nvkm_mm_node *node; +}; + +static enum nvkm_memory_target +nv40_instobj_target(struct nvkm_memory *memory) +{ + return NVKM_MEM_TARGET_INST; +} + +static u64 +nv40_instobj_addr(struct nvkm_memory *memory) +{ + return nv40_instobj(memory)->node->offset; +} + +static u64 +nv40_instobj_size(struct nvkm_memory *memory) +{ + return nv40_instobj(memory)->node->length; +} + +static void __iomem * +nv40_instobj_acquire(struct nvkm_memory *memory) +{ + struct nv40_instobj *iobj = nv40_instobj(memory); + return iobj->imem->iomem + iobj->node->offset; +} + +static void +nv40_instobj_release(struct nvkm_memory *memory) +{ +} static u32 -nv40_instmem_rd32(struct nvkm_object *object, u64 addr) +nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset) { - struct nv04_instmem_priv *priv = (void *)object; - return ioread32_native(priv->iomem + addr); + struct nv40_instobj *iobj = nv40_instobj(memory); + return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset); } static void -nv40_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data) +nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) +{ + struct nv40_instobj *iobj = nv40_instobj(memory); + iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset); +} + +static void * +nv40_instobj_dtor(struct nvkm_memory *memory) { - struct nv04_instmem_priv *priv = (void *)object; - iowrite32_native(data, priv->iomem + addr); + struct nv40_instobj *iobj = nv40_instobj(memory); + mutex_lock(&iobj->imem->base.subdev.mutex); + nvkm_mm_free(&iobj->imem->heap, &iobj->node); + mutex_unlock(&iobj->imem->base.subdev.mutex); + return iobj; } +static const struct nvkm_memory_func +nv40_instobj_func = { + .dtor = nv40_instobj_dtor, + .target = nv40_instobj_target, + .size = nv40_instobj_size, + .addr = nv40_instobj_addr, + .acquire = nv40_instobj_acquire, + .release = nv40_instobj_release, + .rd32 = nv40_instobj_rd32, + .wr32 = nv40_instobj_wr32, +}; + static int -nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, void *data, u32 size, - struct nvkm_object **pobject) +nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, + struct nvkm_memory **pmemory) { - struct nvkm_device *device = nv_device(parent); - struct nv04_instmem_priv *priv; - int ret, bar, vs; + struct nv40_instmem *imem = nv40_instmem(base); + struct nv40_instobj *iobj; + int ret; - ret = nvkm_instmem_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; + if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) + return -ENOMEM; + *pmemory = &iobj->memory; - /* map bar */ - if (nv_device_resource_len(device, 2)) - bar = 2; - else - bar = 3; + nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory); + iobj->imem = imem; - priv->iomem = ioremap(nv_device_resource_start(device, bar), - nv_device_resource_len(device, bar)); - if (!priv->iomem) { - nv_error(priv, "unable to map PRAMIN BAR\n"); - return -EFAULT; - } + mutex_lock(&imem->base.subdev.mutex); + ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, + align ? align : 1, &iobj->node); + mutex_unlock(&imem->base.subdev.mutex); + return ret; +} + +/****************************************************************************** + * instmem subdev implementation + *****************************************************************************/ + +static u32 +nv40_instmem_rd32(struct nvkm_instmem *base, u32 addr) +{ + return ioread32_native(nv40_instmem(base)->iomem + addr); +} + +static void +nv40_instmem_wr32(struct nvkm_instmem *base, u32 addr, u32 data) +{ + iowrite32_native(data, nv40_instmem(base)->iomem + addr); +} + +static int +nv40_instmem_oneinit(struct nvkm_instmem *base) +{ + struct nv40_instmem *imem = nv40_instmem(base); + struct nvkm_device *device = imem->base.subdev.device; + int ret, vs; /* PRAMIN aperture maps over the end of vram, reserve enough space * to fit graphics contexts for every channel, the magics come * from engine/gr/nv40.c */ - vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8); - if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs; - else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs; - else if (nv44_gr_class(priv)) priv->base.reserved = 0x4980 * vs; - else priv->base.reserved = 0x4a40 * vs; - priv->base.reserved += 16 * 1024; - priv->base.reserved *= 32; /* per-channel */ - priv->base.reserved += 512 * 1024; /* pci(e)gart table */ - priv->base.reserved += 512 * 1024; /* object storage */ - - priv->base.reserved = round_up(priv->base.reserved, 4096); - - ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1); + vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8); + if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs; + else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs; + else if (nv44_gr_class(device)) imem->base.reserved = 0x4980 * vs; + else imem->base.reserved = 0x4a40 * vs; + imem->base.reserved += 16 * 1024; + imem->base.reserved *= 32; /* per-channel */ + imem->base.reserved += 512 * 1024; /* pci(e)gart table */ + imem->base.reserved += 512 * 1024; /* object storage */ + imem->base.reserved = round_up(imem->base.reserved, 4096); + + ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1); if (ret) return ret; /* 0x00000-0x10000: reserve for probable vbios image */ - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0, - &priv->vbios); + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false, + &imem->base.vbios); if (ret) return ret; /* 0x10000-0x18000: reserve for RAMHT */ - ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht); + ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht); if (ret) return ret; /* 0x18000-0x18200: reserve for RAMRO * 0x18200-0x20000: padding */ - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0, - &priv->ramro); + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false, + &imem->base.ramro); if (ret) return ret; /* 0x20000-0x21000: reserve for RAMFC * 0x21000-0x40000: padding and some unknown crap */ - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, - NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true, + &imem->base.ramfc); if (ret) return ret; return 0; } -struct nvkm_oclass * -nv40_instmem_oclass = &(struct nvkm_instmem_impl) { - .base.handle = NV_SUBDEV(INSTMEM, 0x40), - .base.ofuncs = &(struct nvkm_ofuncs) { - .ctor = nv40_instmem_ctor, - .dtor = nv04_instmem_dtor, - .init = _nvkm_instmem_init, - .fini = _nvkm_instmem_fini, - .rd32 = nv40_instmem_rd32, - .wr32 = nv40_instmem_wr32, - }, - .instobj = &nv04_instobj_oclass.base, -}.base; +static void * +nv40_instmem_dtor(struct nvkm_instmem *base) +{ + struct nv40_instmem *imem = nv40_instmem(base); + nvkm_memory_del(&imem->base.ramfc); + nvkm_memory_del(&imem->base.ramro); + nvkm_ramht_del(&imem->base.ramht); + nvkm_memory_del(&imem->base.vbios); + nvkm_mm_fini(&imem->heap); + if (imem->iomem) + iounmap(imem->iomem); + return imem; +} + +static const struct nvkm_instmem_func +nv40_instmem = { + .dtor = nv40_instmem_dtor, + .oneinit = nv40_instmem_oneinit, + .rd32 = nv40_instmem_rd32, + .wr32 = nv40_instmem_wr32, + .memory_new = nv40_instobj_new, + .persistent = false, + .zero = false, +}; + +int +nv40_instmem_new(struct nvkm_device *device, int index, + struct nvkm_instmem **pimem) +{ + struct nv40_instmem *imem; + int bar; + + if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) + return -ENOMEM; + nvkm_instmem_ctor(&nv40_instmem, device, index, &imem->base); + *pimem = &imem->base; + + /* map bar */ + if (device->func->resource_size(device, 2)) + bar = 2; + else + bar = 3; + + imem->iomem = ioremap(device->func->resource_addr(device, bar), + device->func->resource_size(device, bar)); + if (!imem->iomem) { + nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n"); + return -EFAULT; + } + + return 0; +}