2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/client.h>
27 #include <core/device.h>
28 #include <core/engctx.h>
29 #include <core/ramht.h>
30 #include <subdev/fb.h>
31 #include <subdev/instmem/nv04.h>
33 #include <nvif/class.h>
34 #include <nvif/unpack.h>
36 static struct ramfc_desc
38 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
39 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
40 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
41 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
42 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
43 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
44 { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
45 { 2, 28, 0x18, 28, 0x002058 },
46 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
47 { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
48 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
49 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
50 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
51 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
52 { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
53 { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
54 { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
55 { 32, 0, 0x40, 0, 0x0032e4 },
56 { 32, 0, 0x44, 0, 0x0032e8 },
57 { 32, 0, 0x4c, 0, 0x002088 },
58 { 32, 0, 0x50, 0, 0x003300 },
59 { 32, 0, 0x54, 0, 0x00330c },
63 /*******************************************************************************
64 * FIFO channel objects
65 ******************************************************************************/
68 nv40_fifo_object_attach(struct nvkm_object *parent,
69 struct nvkm_object *object, u32 handle)
71 struct nv04_fifo_priv *priv = (void *)parent->engine;
72 struct nv04_fifo_chan *chan = (void *)parent;
73 u32 context, chid = chan->base.chid;
76 if (nv_iclass(object, NV_GPUOBJ_CLASS))
77 context = nv_gpuobj(object)->addr >> 4;
79 context = 0x00000004; /* just non-zero */
81 switch (nv_engidx(object->engine)) {
82 case NVDEV_ENGINE_DMAOBJ:
84 context |= 0x00000000;
87 context |= 0x00100000;
89 case NVDEV_ENGINE_MPEG:
90 context |= 0x00200000;
96 context |= chid << 23;
98 mutex_lock(&nv_subdev(priv)->mutex);
99 ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
100 mutex_unlock(&nv_subdev(priv)->mutex);
105 nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
107 struct nv04_fifo_priv *priv = (void *)parent->engine;
108 struct nv04_fifo_chan *chan = (void *)parent;
112 switch (nv_engidx(engctx->engine)) {
113 case NVDEV_ENGINE_SW:
115 case NVDEV_ENGINE_GR:
119 case NVDEV_ENGINE_MPEG:
127 spin_lock_irqsave(&priv->base.lock, flags);
128 nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
129 nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
131 if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
132 nv_wr32(priv, reg, nv_engctx(engctx)->addr);
133 nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
135 nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
136 spin_unlock_irqrestore(&priv->base.lock, flags);
141 nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
142 struct nvkm_object *engctx)
144 struct nv04_fifo_priv *priv = (void *)parent->engine;
145 struct nv04_fifo_chan *chan = (void *)parent;
149 switch (nv_engidx(engctx->engine)) {
150 case NVDEV_ENGINE_SW:
152 case NVDEV_ENGINE_GR:
156 case NVDEV_ENGINE_MPEG:
164 spin_lock_irqsave(&priv->base.lock, flags);
165 nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
167 if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
168 nv_wr32(priv, reg, 0x00000000);
169 nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
171 nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
172 spin_unlock_irqrestore(&priv->base.lock, flags);
177 nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
178 struct nvkm_oclass *oclass, void *data, u32 size,
179 struct nvkm_object **pobject)
182 struct nv03_channel_dma_v0 v0;
184 struct nv04_fifo_priv *priv = (void *)engine;
185 struct nv04_fifo_chan *chan;
188 nv_ioctl(parent, "create channel dma size %d\n", size);
189 if (nvif_unpack(args->v0, 0, 0, false)) {
190 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
191 "offset %016llx\n", args->v0.version,
192 args->v0.pushbuf, args->v0.offset);
196 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
197 0x1000, args->v0.pushbuf,
198 (1ULL << NVDEV_ENGINE_DMAOBJ) |
199 (1ULL << NVDEV_ENGINE_SW) |
200 (1ULL << NVDEV_ENGINE_GR) |
201 (1ULL << NVDEV_ENGINE_MPEG), &chan);
202 *pobject = nv_object(chan);
206 args->v0.chid = chan->base.chid;
208 nv_parent(chan)->context_attach = nv40_fifo_context_attach;
209 nv_parent(chan)->context_detach = nv40_fifo_context_detach;
210 nv_parent(chan)->object_attach = nv40_fifo_object_attach;
211 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
212 chan->ramfc = chan->base.chid * 128;
214 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
215 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
216 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
217 nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
218 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
219 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
221 NV_PFIFO_CACHE1_BIG_ENDIAN |
223 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
224 nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
228 static struct nvkm_ofuncs
230 .ctor = nv40_fifo_chan_ctor,
231 .dtor = nv04_fifo_chan_dtor,
232 .init = nv04_fifo_chan_init,
233 .fini = nv04_fifo_chan_fini,
234 .map = _nvkm_fifo_channel_map,
235 .rd32 = _nvkm_fifo_channel_rd32,
236 .wr32 = _nvkm_fifo_channel_wr32,
237 .ntfy = _nvkm_fifo_channel_ntfy
240 static struct nvkm_oclass
241 nv40_fifo_sclass[] = {
242 { NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
246 /*******************************************************************************
247 * FIFO context - basically just the instmem reserved for the channel
248 ******************************************************************************/
250 static struct nvkm_oclass
252 .handle = NV_ENGCTX(FIFO, 0x40),
253 .ofuncs = &(struct nvkm_ofuncs) {
254 .ctor = nv04_fifo_context_ctor,
255 .dtor = _nvkm_fifo_context_dtor,
256 .init = _nvkm_fifo_context_init,
257 .fini = _nvkm_fifo_context_fini,
258 .rd32 = _nvkm_fifo_context_rd32,
259 .wr32 = _nvkm_fifo_context_wr32,
263 /*******************************************************************************
265 ******************************************************************************/
268 nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
269 struct nvkm_oclass *oclass, void *data, u32 size,
270 struct nvkm_object **pobject)
272 struct nv04_instmem_priv *imem = nv04_instmem(parent);
273 struct nv04_fifo_priv *priv;
276 ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
277 *pobject = nv_object(priv);
281 nvkm_ramht_ref(imem->ramht, &priv->ramht);
282 nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
283 nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
285 nv_subdev(priv)->unit = 0x00000100;
286 nv_subdev(priv)->intr = nv04_fifo_intr;
287 nv_engine(priv)->cclass = &nv40_fifo_cclass;
288 nv_engine(priv)->sclass = nv40_fifo_sclass;
289 priv->base.pause = nv04_fifo_pause;
290 priv->base.start = nv04_fifo_start;
291 priv->ramfc_desc = nv40_ramfc;
296 nv40_fifo_init(struct nvkm_object *object)
298 struct nv04_fifo_priv *priv = (void *)object;
299 struct nvkm_fb *pfb = nvkm_fb(object);
302 ret = nvkm_fifo_init(&priv->base);
306 nv_wr32(priv, 0x002040, 0x000000ff);
307 nv_wr32(priv, 0x002044, 0x2101ffff);
308 nv_wr32(priv, 0x002058, 0x00000001);
310 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
311 ((priv->ramht->bits - 9) << 16) |
312 (priv->ramht->gpuobj.addr >> 8));
313 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
315 switch (nv_device(priv)->chipset) {
319 nv_wr32(priv, 0x002230, 0x00000001);
326 nv_wr32(priv, 0x002220, 0x00030002);
329 nv_wr32(priv, 0x002230, 0x00000000);
330 nv_wr32(priv, 0x002220, ((pfb->ram->size - 512 * 1024 +
331 priv->ramfc->addr) >> 16) |
336 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
338 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
339 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
341 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
342 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
343 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
348 nv40_fifo_oclass = &(struct nvkm_oclass) {
349 .handle = NV_ENGINE(FIFO, 0x40),
350 .ofuncs = &(struct nvkm_ofuncs) {
351 .ctor = nv40_fifo_ctor,
352 .dtor = nv04_fifo_dtor,
353 .init = nv40_fifo_init,
354 .fini = _nvkm_fifo_fini,