2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <subdev/mmu.h>
25 #include <subdev/fb.h>
27 #include <core/gpuobj.h>
30 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
32 struct nvkm_vm *vm = vma->vm;
33 struct nvkm_mmu *mmu = vm->mmu;
34 struct nvkm_mm_node *r;
35 int big = vma->node->type != mmu->spg_shift;
36 u32 offset = vma->node->offset + (delta >> 12);
37 u32 bits = vma->node->type - 12;
38 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde;
39 u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
40 u32 max = 1 << (mmu->pgt_bits - bits);
44 list_for_each_entry(r, &node->regions, rl_entry) {
45 u64 phys = (u64)r->offset << 12;
46 u32 num = r->length >> bits;
49 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
52 if (unlikely(end >= max))
56 mmu->map(vma, pgt, node, pte, len, phys, delta);
60 if (unlikely(end >= max)) {
61 phys += len << (bits + 12);
66 delta += (u64)len << vma->node->type;
74 nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
77 struct nvkm_vm *vm = vma->vm;
78 struct nvkm_mmu *mmu = vm->mmu;
79 int big = vma->node->type != mmu->spg_shift;
80 u32 offset = vma->node->offset + (delta >> 12);
81 u32 bits = vma->node->type - 12;
82 u32 num = length >> vma->node->type;
83 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde;
84 u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
85 u32 max = 1 << (mmu->pgt_bits - bits);
89 struct scatterlist *sg;
91 for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
92 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
93 sglen = sg_dma_len(sg) >> PAGE_SHIFT;
96 if (unlikely(end >= max))
100 for (m = 0; m < len; m++) {
101 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
103 mmu->map_sg(vma, pgt, mem, pte, 1, &addr);
110 if (unlikely(end >= max)) {
115 for (; m < sglen; m++) {
116 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
118 mmu->map_sg(vma, pgt, mem, pte, 1, &addr);
132 nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
133 struct nvkm_mem *mem)
135 struct nvkm_vm *vm = vma->vm;
136 struct nvkm_mmu *mmu = vm->mmu;
137 dma_addr_t *list = mem->pages;
138 int big = vma->node->type != mmu->spg_shift;
139 u32 offset = vma->node->offset + (delta >> 12);
140 u32 bits = vma->node->type - 12;
141 u32 num = length >> vma->node->type;
142 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde;
143 u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
144 u32 max = 1 << (mmu->pgt_bits - bits);
148 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
151 if (unlikely(end >= max))
155 mmu->map_sg(vma, pgt, mem, pte, len, list);
160 if (unlikely(end >= max)) {
170 nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
173 nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
176 nvkm_vm_map_sg(vma, 0, node->size << 12, node);
178 nvkm_vm_map_at(vma, 0, node);
182 nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
184 struct nvkm_vm *vm = vma->vm;
185 struct nvkm_mmu *mmu = vm->mmu;
186 int big = vma->node->type != mmu->spg_shift;
187 u32 offset = vma->node->offset + (delta >> 12);
188 u32 bits = vma->node->type - 12;
189 u32 num = length >> vma->node->type;
190 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde;
191 u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
192 u32 max = 1 << (mmu->pgt_bits - bits);
196 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
199 if (unlikely(end >= max))
203 mmu->unmap(pgt, pte, len);
207 if (unlikely(end >= max)) {
217 nvkm_vm_unmap(struct nvkm_vma *vma)
219 nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
223 nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
225 struct nvkm_mmu *mmu = vm->mmu;
226 struct nvkm_vm_pgd *vpgd;
227 struct nvkm_vm_pgt *vpgt;
228 struct nvkm_gpuobj *pgt;
231 for (pde = fpde; pde <= lpde; pde++) {
232 vpgt = &vm->pgt[pde - vm->fpde];
233 if (--vpgt->refcount[big])
236 pgt = vpgt->obj[big];
237 vpgt->obj[big] = NULL;
239 list_for_each_entry(vpgd, &vm->pgd_list, head) {
240 mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
243 mutex_unlock(&nv_subdev(mmu)->mutex);
244 nvkm_gpuobj_ref(NULL, &pgt);
245 mutex_lock(&nv_subdev(mmu)->mutex);
250 nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
252 struct nvkm_mmu *mmu = vm->mmu;
253 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
254 struct nvkm_vm_pgd *vpgd;
255 struct nvkm_gpuobj *pgt;
256 int big = (type != mmu->spg_shift);
260 pgt_size = (1 << (mmu->pgt_bits + 12)) >> type;
263 mutex_unlock(&nv_subdev(mmu)->mutex);
264 ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000,
265 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
266 mutex_lock(&nv_subdev(mmu)->mutex);
270 /* someone beat us to filling the PDE while we didn't have the lock */
271 if (unlikely(vpgt->refcount[big]++)) {
272 mutex_unlock(&nv_subdev(mmu)->mutex);
273 nvkm_gpuobj_ref(NULL, &pgt);
274 mutex_lock(&nv_subdev(mmu)->mutex);
278 vpgt->obj[big] = pgt;
279 list_for_each_entry(vpgd, &vm->pgd_list, head) {
280 mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
287 nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
288 struct nvkm_vma *vma)
290 struct nvkm_mmu *mmu = vm->mmu;
291 u32 align = (1 << page_shift) >> 12;
292 u32 msize = size >> 12;
296 mutex_lock(&nv_subdev(mmu)->mutex);
297 ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
299 if (unlikely(ret != 0)) {
300 mutex_unlock(&nv_subdev(mmu)->mutex);
304 fpde = (vma->node->offset >> mmu->pgt_bits);
305 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
307 for (pde = fpde; pde <= lpde; pde++) {
308 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
309 int big = (vma->node->type != mmu->spg_shift);
311 if (likely(vpgt->refcount[big])) {
312 vpgt->refcount[big]++;
316 ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
319 nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
320 nvkm_mm_free(&vm->mm, &vma->node);
321 mutex_unlock(&nv_subdev(mmu)->mutex);
325 mutex_unlock(&nv_subdev(mmu)->mutex);
328 nvkm_vm_ref(vm, &vma->vm, NULL);
329 vma->offset = (u64)vma->node->offset << 12;
330 vma->access = access;
335 nvkm_vm_put(struct nvkm_vma *vma)
337 struct nvkm_vm *vm = vma->vm;
338 struct nvkm_mmu *mmu = vm->mmu;
341 if (unlikely(vma->node == NULL))
343 fpde = (vma->node->offset >> mmu->pgt_bits);
344 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
346 mutex_lock(&nv_subdev(mmu)->mutex);
347 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde);
348 nvkm_mm_free(&vm->mm, &vma->node);
349 mutex_unlock(&nv_subdev(mmu)->mutex);
351 nvkm_vm_ref(NULL, &vma->vm, NULL);
355 nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
356 u32 block, struct nvkm_vm **pvm)
359 u64 mm_length = (offset + length) - mm_offset;
362 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
366 INIT_LIST_HEAD(&vm->pgd_list);
368 kref_init(&vm->refcount);
369 vm->fpde = offset >> (mmu->pgt_bits + 12);
370 vm->lpde = (offset + length - 1) >> (mmu->pgt_bits + 12);
372 vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
378 ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
392 nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
393 struct nvkm_vm **pvm)
395 struct nvkm_mmu *mmu = nvkm_mmu(device);
396 return mmu->create(mmu, offset, length, mm_offset, pvm);
400 nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
402 struct nvkm_mmu *mmu = vm->mmu;
403 struct nvkm_vm_pgd *vpgd;
409 vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
413 nvkm_gpuobj_ref(pgd, &vpgd->obj);
415 mutex_lock(&nv_subdev(mmu)->mutex);
416 for (i = vm->fpde; i <= vm->lpde; i++)
417 mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
418 list_add(&vpgd->head, &vm->pgd_list);
419 mutex_unlock(&nv_subdev(mmu)->mutex);
424 nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
426 struct nvkm_mmu *mmu = vm->mmu;
427 struct nvkm_vm_pgd *vpgd, *tmp;
428 struct nvkm_gpuobj *pgd = NULL;
433 mutex_lock(&nv_subdev(mmu)->mutex);
434 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
435 if (vpgd->obj == mpgd) {
437 list_del(&vpgd->head);
442 mutex_unlock(&nv_subdev(mmu)->mutex);
444 nvkm_gpuobj_ref(NULL, &pgd);
448 nvkm_vm_del(struct kref *kref)
450 struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
451 struct nvkm_vm_pgd *vpgd, *tmp;
453 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
454 nvkm_vm_unlink(vm, vpgd->obj);
457 nvkm_mm_fini(&vm->mm);
463 nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
466 int ret = nvkm_vm_link(ref, pgd);
470 kref_get(&ref->refcount);
474 nvkm_vm_unlink(*ptr, pgd);
475 kref_put(&(*ptr)->refcount, nvkm_vm_del);