Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / gf100.c
1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include <subdev/mmu.h>
25 #include <subdev/bar.h>
26 #include <subdev/fb.h>
27 #include <subdev/ltc.h>
28 #include <subdev/timer.h>
29
30 #include <core/gpuobj.h>
31
32 struct gf100_mmu_priv {
33         struct nvkm_mmu base;
34 };
35
36
37 /* Map from compressed to corresponding uncompressed storage type.
38  * The value 0xff represents an invalid storage type.
39  */
40 const u8 gf100_pte_storage_type_map[256] =
41 {
42         0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
43         0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
44         0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
45         0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
46         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
47         0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
48         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
49         0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
50         0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
51         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
52         0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
53         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
54         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
55         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
56         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
57         0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
58         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
59         0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
60         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
61         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
62         0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
63         0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
64         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
65         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
66         0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
67         0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
68         0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
69         0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
70         0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
71         0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
72         0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
73         0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
74 };
75
76
77 static void
78 gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2])
79 {
80         u32 pde[2] = { 0, 0 };
81
82         if (pgt[0])
83                 pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
84         if (pgt[1])
85                 pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
86
87         nv_wo32(pgd, (index * 8) + 0, pde[0]);
88         nv_wo32(pgd, (index * 8) + 4, pde[1]);
89 }
90
91 static inline u64
92 gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
93 {
94         phys >>= 8;
95
96         phys |= 0x00000001; /* present */
97         if (vma->access & NV_MEM_ACCESS_SYS)
98                 phys |= 0x00000002;
99
100         phys |= ((u64)target  << 32);
101         phys |= ((u64)memtype << 36);
102         return phys;
103 }
104
105 static void
106 gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
107              struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
108 {
109         u64 next = 1 << (vma->node->type - 8);
110
111         phys  = gf100_vm_addr(vma, phys, mem->memtype, 0);
112         pte <<= 3;
113
114         if (mem->tag) {
115                 struct nvkm_ltc *ltc = nvkm_ltc(vma->vm->mmu);
116                 u32 tag = mem->tag->offset + (delta >> 17);
117                 phys |= (u64)tag << (32 + 12);
118                 next |= (u64)1   << (32 + 12);
119                 ltc->tags_clear(ltc, tag, cnt);
120         }
121
122         while (cnt--) {
123                 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
124                 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
125                 phys += next;
126                 pte  += 8;
127         }
128 }
129
130 static void
131 gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
132                 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
133 {
134         u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
135         /* compressed storage types are invalid for system memory */
136         u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
137
138         pte <<= 3;
139         while (cnt--) {
140                 u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
141                 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
142                 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
143                 pte += 8;
144         }
145 }
146
147 static void
148 gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
149 {
150         pte <<= 3;
151         while (cnt--) {
152                 nv_wo32(pgt, pte + 0, 0x00000000);
153                 nv_wo32(pgt, pte + 4, 0x00000000);
154                 pte += 8;
155         }
156 }
157
158 static void
159 gf100_vm_flush(struct nvkm_vm *vm)
160 {
161         struct gf100_mmu_priv *priv = (void *)vm->mmu;
162         struct nvkm_bar *bar = nvkm_bar(priv);
163         struct nvkm_vm_pgd *vpgd;
164         u32 type;
165
166         bar->flush(bar);
167
168         type = 0x00000001; /* PAGE_ALL */
169         if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR]))
170                 type |= 0x00000004; /* HUB_ONLY */
171
172         mutex_lock(&nv_subdev(priv)->mutex);
173         list_for_each_entry(vpgd, &vm->pgd_list, head) {
174                 /* looks like maybe a "free flush slots" counter, the
175                  * faster you write to 0x100cbc to more it decreases
176                  */
177                 if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) {
178                         nv_error(priv, "vm timeout 0: 0x%08x %d\n",
179                                  nv_rd32(priv, 0x100c80), type);
180                 }
181
182                 nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8);
183                 nv_wr32(priv, 0x100cbc, 0x80000000 | type);
184
185                 /* wait for flush to be queued? */
186                 if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) {
187                         nv_error(priv, "vm timeout 1: 0x%08x %d\n",
188                                  nv_rd32(priv, 0x100c80), type);
189                 }
190         }
191         mutex_unlock(&nv_subdev(priv)->mutex);
192 }
193
194 static int
195 gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
196                 struct nvkm_vm **pvm)
197 {
198         return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, pvm);
199 }
200
201 static int
202 gf100_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
203                struct nvkm_oclass *oclass, void *data, u32 size,
204                struct nvkm_object **pobject)
205 {
206         struct gf100_mmu_priv *priv;
207         int ret;
208
209         ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
210         *pobject = nv_object(priv);
211         if (ret)
212                 return ret;
213
214         priv->base.limit = 1ULL << 40;
215         priv->base.dma_bits = 40;
216         priv->base.pgt_bits  = 27 - 12;
217         priv->base.spg_shift = 12;
218         priv->base.lpg_shift = 17;
219         priv->base.create = gf100_vm_create;
220         priv->base.map_pgt = gf100_vm_map_pgt;
221         priv->base.map = gf100_vm_map;
222         priv->base.map_sg = gf100_vm_map_sg;
223         priv->base.unmap = gf100_vm_unmap;
224         priv->base.flush = gf100_vm_flush;
225         return 0;
226 }
227
228 struct nvkm_oclass
229 gf100_mmu_oclass = {
230         .handle = NV_SUBDEV(MMU, 0xc0),
231         .ofuncs = &(struct nvkm_ofuncs) {
232                 .ctor = gf100_mmu_ctor,
233                 .dtor = _nvkm_mmu_dtor,
234                 .init = _nvkm_mmu_init,
235                 .fini = _nvkm_mmu_fini,
236         },
237 };