These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / gpu / drm / nouveau / nouveau_chan.c
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24
25 #include <nvif/os.h>
26 #include <nvif/class.h>
27 #include <nvif/ioctl.h>
28
29 /*XXX*/
30 #include <core/client.h>
31
32 #include "nouveau_drm.h"
33 #include "nouveau_dma.h"
34 #include "nouveau_bo.h"
35 #include "nouveau_chan.h"
36 #include "nouveau_fence.h"
37 #include "nouveau_abi16.h"
38
39 MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
40 int nouveau_vram_pushbuf;
41 module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
42
43 int
44 nouveau_channel_idle(struct nouveau_channel *chan)
45 {
46         if (likely(chan && chan->fence)) {
47                 struct nouveau_cli *cli = (void *)chan->user.client;
48                 struct nouveau_fence *fence = NULL;
49                 int ret;
50
51                 ret = nouveau_fence_new(chan, false, &fence);
52                 if (!ret) {
53                         ret = nouveau_fence_wait(fence, false, false);
54                         nouveau_fence_unref(&fence);
55                 }
56
57                 if (ret) {
58                         NV_PRINTK(err, cli, "failed to idle channel %d [%s]\n",
59                                   chan->chid, nvxx_client(&cli->base)->name);
60                         return ret;
61                 }
62         }
63         return 0;
64 }
65
66 void
67 nouveau_channel_del(struct nouveau_channel **pchan)
68 {
69         struct nouveau_channel *chan = *pchan;
70         if (chan) {
71                 if (chan->fence)
72                         nouveau_fence(chan->drm)->context_del(chan);
73                 nvif_object_fini(&chan->nvsw);
74                 nvif_object_fini(&chan->gart);
75                 nvif_object_fini(&chan->vram);
76                 nvif_object_fini(&chan->user);
77                 nvif_object_fini(&chan->push.ctxdma);
78                 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
79                 nouveau_bo_unmap(chan->push.buffer);
80                 if (chan->push.buffer && chan->push.buffer->pin_refcnt)
81                         nouveau_bo_unpin(chan->push.buffer);
82                 nouveau_bo_ref(NULL, &chan->push.buffer);
83                 kfree(chan);
84         }
85         *pchan = NULL;
86 }
87
88 static int
89 nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
90                      u32 size, struct nouveau_channel **pchan)
91 {
92         struct nouveau_cli *cli = (void *)device->object.client;
93         struct nvkm_mmu *mmu = nvxx_mmu(device);
94         struct nv_dma_v0 args = {};
95         struct nouveau_channel *chan;
96         u32 target;
97         int ret;
98
99         chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
100         if (!chan)
101                 return -ENOMEM;
102
103         chan->device = device;
104         chan->drm = drm;
105
106         /* allocate memory for dma push buffer */
107         target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
108         if (nouveau_vram_pushbuf)
109                 target = TTM_PL_FLAG_VRAM;
110
111         ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
112                             &chan->push.buffer);
113         if (ret == 0) {
114                 ret = nouveau_bo_pin(chan->push.buffer, target, false);
115                 if (ret == 0)
116                         ret = nouveau_bo_map(chan->push.buffer);
117         }
118
119         if (ret) {
120                 nouveau_channel_del(pchan);
121                 return ret;
122         }
123
124         /* create dma object covering the *entire* memory space that the
125          * pushbuf lives in, this is because the GEM code requires that
126          * we be able to call out to other (indirect) push buffers
127          */
128         chan->push.vma.offset = chan->push.buffer->bo.offset;
129
130         if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
131                 ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
132                                         &chan->push.vma);
133                 if (ret) {
134                         nouveau_channel_del(pchan);
135                         return ret;
136                 }
137
138                 args.target = NV_DMA_V0_TARGET_VM;
139                 args.access = NV_DMA_V0_ACCESS_VM;
140                 args.start = 0;
141                 args.limit = cli->vm->mmu->limit - 1;
142         } else
143         if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
144                 if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
145                         /* nv04 vram pushbuf hack, retarget to its location in
146                          * the framebuffer bar rather than direct vram access..
147                          * nfi why this exists, it came from the -nv ddx.
148                          */
149                         args.target = NV_DMA_V0_TARGET_PCI;
150                         args.access = NV_DMA_V0_ACCESS_RDWR;
151                         args.start = nvxx_device(device)->func->
152                                 resource_addr(nvxx_device(device), 1);
153                         args.limit = args.start + device->info.ram_user - 1;
154                 } else {
155                         args.target = NV_DMA_V0_TARGET_VRAM;
156                         args.access = NV_DMA_V0_ACCESS_RDWR;
157                         args.start = 0;
158                         args.limit = device->info.ram_user - 1;
159                 }
160         } else {
161                 if (chan->drm->agp.bridge) {
162                         args.target = NV_DMA_V0_TARGET_AGP;
163                         args.access = NV_DMA_V0_ACCESS_RDWR;
164                         args.start = chan->drm->agp.base;
165                         args.limit = chan->drm->agp.base +
166                                      chan->drm->agp.size - 1;
167                 } else {
168                         args.target = NV_DMA_V0_TARGET_VM;
169                         args.access = NV_DMA_V0_ACCESS_RDWR;
170                         args.start = 0;
171                         args.limit = mmu->limit - 1;
172                 }
173         }
174
175         ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
176                                &args, sizeof(args), &chan->push.ctxdma);
177         if (ret) {
178                 nouveau_channel_del(pchan);
179                 return ret;
180         }
181
182         return 0;
183 }
184
185 static int
186 nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
187                     u32 engine, struct nouveau_channel **pchan)
188 {
189         static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A,
190                                         KEPLER_CHANNEL_GPFIFO_A,
191                                         FERMI_CHANNEL_GPFIFO,
192                                         G82_CHANNEL_GPFIFO,
193                                         NV50_CHANNEL_GPFIFO,
194                                         0 };
195         const u16 *oclass = oclasses;
196         union {
197                 struct nv50_channel_gpfifo_v0 nv50;
198                 struct fermi_channel_gpfifo_v0 fermi;
199                 struct kepler_channel_gpfifo_a_v0 kepler;
200         } args;
201         struct nouveau_channel *chan;
202         u32 size;
203         int ret;
204
205         /* allocate dma push buffer */
206         ret = nouveau_channel_prep(drm, device, 0x12000, &chan);
207         *pchan = chan;
208         if (ret)
209                 return ret;
210
211         /* create channel object */
212         do {
213                 if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
214                         args.kepler.version = 0;
215                         args.kepler.engine  = engine;
216                         args.kepler.ilength = 0x02000;
217                         args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
218                         args.kepler.vm = 0;
219                         size = sizeof(args.kepler);
220                 } else
221                 if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
222                         args.fermi.version = 0;
223                         args.fermi.ilength = 0x02000;
224                         args.fermi.ioffset = 0x10000 + chan->push.vma.offset;
225                         args.fermi.vm = 0;
226                         size = sizeof(args.fermi);
227                 } else {
228                         args.nv50.version = 0;
229                         args.nv50.ilength = 0x02000;
230                         args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
231                         args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
232                         args.nv50.vm = 0;
233                         size = sizeof(args.nv50);
234                 }
235
236                 ret = nvif_object_init(&device->object, 0, *oclass++,
237                                        &args, size, &chan->user);
238                 if (ret == 0) {
239                         if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A)
240                                 chan->chid = args.kepler.chid;
241                         else
242                         if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO)
243                                 chan->chid = args.fermi.chid;
244                         else
245                                 chan->chid = args.nv50.chid;
246                         return ret;
247                 }
248         } while (*oclass);
249
250         nouveau_channel_del(pchan);
251         return ret;
252 }
253
254 static int
255 nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
256                     struct nouveau_channel **pchan)
257 {
258         static const u16 oclasses[] = { NV40_CHANNEL_DMA,
259                                         NV17_CHANNEL_DMA,
260                                         NV10_CHANNEL_DMA,
261                                         NV03_CHANNEL_DMA,
262                                         0 };
263         const u16 *oclass = oclasses;
264         struct nv03_channel_dma_v0 args;
265         struct nouveau_channel *chan;
266         int ret;
267
268         /* allocate dma push buffer */
269         ret = nouveau_channel_prep(drm, device, 0x10000, &chan);
270         *pchan = chan;
271         if (ret)
272                 return ret;
273
274         /* create channel object */
275         args.version = 0;
276         args.pushbuf = nvif_handle(&chan->push.ctxdma);
277         args.offset = chan->push.vma.offset;
278
279         do {
280                 ret = nvif_object_init(&device->object, 0, *oclass++,
281                                        &args, sizeof(args), &chan->user);
282                 if (ret == 0) {
283                         chan->chid = args.chid;
284                         return ret;
285                 }
286         } while (ret && *oclass);
287
288         nouveau_channel_del(pchan);
289         return ret;
290 }
291
292 static int
293 nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
294 {
295         struct nvif_device *device = chan->device;
296         struct nouveau_cli *cli = (void *)chan->user.client;
297         struct nvkm_mmu *mmu = nvxx_mmu(device);
298         struct nv_dma_v0 args = {};
299         int ret, i;
300
301         nvif_object_map(&chan->user);
302
303         /* allocate dma objects to cover all allowed vram, and gart */
304         if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
305                 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
306                         args.target = NV_DMA_V0_TARGET_VM;
307                         args.access = NV_DMA_V0_ACCESS_VM;
308                         args.start = 0;
309                         args.limit = cli->vm->mmu->limit - 1;
310                 } else {
311                         args.target = NV_DMA_V0_TARGET_VRAM;
312                         args.access = NV_DMA_V0_ACCESS_RDWR;
313                         args.start = 0;
314                         args.limit = device->info.ram_user - 1;
315                 }
316
317                 ret = nvif_object_init(&chan->user, vram, NV_DMA_IN_MEMORY,
318                                        &args, sizeof(args), &chan->vram);
319                 if (ret)
320                         return ret;
321
322                 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
323                         args.target = NV_DMA_V0_TARGET_VM;
324                         args.access = NV_DMA_V0_ACCESS_VM;
325                         args.start = 0;
326                         args.limit = cli->vm->mmu->limit - 1;
327                 } else
328                 if (chan->drm->agp.bridge) {
329                         args.target = NV_DMA_V0_TARGET_AGP;
330                         args.access = NV_DMA_V0_ACCESS_RDWR;
331                         args.start = chan->drm->agp.base;
332                         args.limit = chan->drm->agp.base +
333                                      chan->drm->agp.size - 1;
334                 } else {
335                         args.target = NV_DMA_V0_TARGET_VM;
336                         args.access = NV_DMA_V0_ACCESS_RDWR;
337                         args.start = 0;
338                         args.limit = mmu->limit - 1;
339                 }
340
341                 ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
342                                        &args, sizeof(args), &chan->gart);
343                 if (ret)
344                         return ret;
345         }
346
347         /* initialise dma tracking parameters */
348         switch (chan->user.oclass & 0x00ff) {
349         case 0x006b:
350         case 0x006e:
351                 chan->user_put = 0x40;
352                 chan->user_get = 0x44;
353                 chan->dma.max = (0x10000 / 4) - 2;
354                 break;
355         default:
356                 chan->user_put = 0x40;
357                 chan->user_get = 0x44;
358                 chan->user_get_hi = 0x60;
359                 chan->dma.ib_base =  0x10000 / 4;
360                 chan->dma.ib_max  = (0x02000 / 8) - 1;
361                 chan->dma.ib_put  = 0;
362                 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
363                 chan->dma.max = chan->dma.ib_base;
364                 break;
365         }
366
367         chan->dma.put = 0;
368         chan->dma.cur = chan->dma.put;
369         chan->dma.free = chan->dma.max - chan->dma.cur;
370
371         ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
372         if (ret)
373                 return ret;
374
375         for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
376                 OUT_RING(chan, 0x00000000);
377
378         /* allocate software object class (used for fences on <= nv05) */
379         if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
380                 ret = nvif_object_init(&chan->user, 0x006e,
381                                        NVIF_IOCTL_NEW_V0_SW_NV04,
382                                        NULL, 0, &chan->nvsw);
383                 if (ret)
384                         return ret;
385
386                 ret = RING_SPACE(chan, 2);
387                 if (ret)
388                         return ret;
389
390                 BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
391                 OUT_RING  (chan, chan->nvsw.handle);
392                 FIRE_RING (chan);
393         }
394
395         /* initialise synchronisation */
396         return nouveau_fence(chan->drm)->context_new(chan);
397 }
398
399 int
400 nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
401                     u32 arg0, u32 arg1, struct nouveau_channel **pchan)
402 {
403         struct nouveau_cli *cli = (void *)device->object.client;
404         bool super;
405         int ret;
406
407         /* hack until fencenv50 is fixed, and agp access relaxed */
408         super = cli->base.super;
409         cli->base.super = true;
410
411         ret = nouveau_channel_ind(drm, device, arg0, pchan);
412         if (ret) {
413                 NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
414                 ret = nouveau_channel_dma(drm, device, pchan);
415                 if (ret) {
416                         NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
417                         goto done;
418                 }
419         }
420
421         ret = nouveau_channel_init(*pchan, arg0, arg1);
422         if (ret) {
423                 NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
424                 nouveau_channel_del(pchan);
425         }
426
427 done:
428         cli->base.super = super;
429         return ret;
430 }