Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / gpu / drm / gma500 / mmu.c
1 /**************************************************************************
2  * Copyright (c) 2007, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  **************************************************************************/
18 #include <drm/drmP.h>
19 #include "psb_drv.h"
20 #include "psb_reg.h"
21 #include "mmu.h"
22
23 /*
24  * Code for the SGX MMU:
25  */
26
27 /*
28  * clflush on one processor only:
29  * clflush should apparently flush the cache line on all processors in an
30  * SMP system.
31  */
32
33 /*
34  * kmap atomic:
35  * The usage of the slots must be completely encapsulated within a spinlock, and
36  * no other functions that may be using the locks for other purposed may be
37  * called from within the locked region.
38  * Since the slots are per processor, this will guarantee that we are the only
39  * user.
40  */
41
42 /*
43  * TODO: Inserting ptes from an interrupt handler:
44  * This may be desirable for some SGX functionality where the GPU can fault in
45  * needed pages. For that, we need to make an atomic insert_pages function, that
46  * may fail.
47  * If it fails, the caller need to insert the page using a workqueue function,
48  * but on average it should be fast.
49  */
50
51 static inline uint32_t psb_mmu_pt_index(uint32_t offset)
52 {
53         return (offset >> PSB_PTE_SHIFT) & 0x3FF;
54 }
55
56 static inline uint32_t psb_mmu_pd_index(uint32_t offset)
57 {
58         return offset >> PSB_PDE_SHIFT;
59 }
60
61 #if defined(CONFIG_X86)
62 static inline void psb_clflush(void *addr)
63 {
64         __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
65 }
66
67 static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
68 {
69         if (!driver->has_clflush)
70                 return;
71
72         mb();
73         psb_clflush(addr);
74         mb();
75 }
76 #else
77
78 static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
79 {;
80 }
81
82 #endif
83
84 static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
85 {
86         struct drm_device *dev = driver->dev;
87         struct drm_psb_private *dev_priv = dev->dev_private;
88
89         if (atomic_read(&driver->needs_tlbflush) || force) {
90                 uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
91                 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
92
93                 /* Make sure data cache is turned off before enabling it */
94                 wmb();
95                 PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
96                 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
97                 if (driver->msvdx_mmu_invaldc)
98                         atomic_set(driver->msvdx_mmu_invaldc, 1);
99         }
100         atomic_set(&driver->needs_tlbflush, 0);
101 }
102
103 #if 0
104 static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
105 {
106         down_write(&driver->sem);
107         psb_mmu_flush_pd_locked(driver, force);
108         up_write(&driver->sem);
109 }
110 #endif
111
112 void psb_mmu_flush(struct psb_mmu_driver *driver)
113 {
114         struct drm_device *dev = driver->dev;
115         struct drm_psb_private *dev_priv = dev->dev_private;
116         uint32_t val;
117
118         down_write(&driver->sem);
119         val = PSB_RSGX32(PSB_CR_BIF_CTRL);
120         if (atomic_read(&driver->needs_tlbflush))
121                 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
122         else
123                 PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
124
125         /* Make sure data cache is turned off and MMU is flushed before
126            restoring bank interface control register */
127         wmb();
128         PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
129                    PSB_CR_BIF_CTRL);
130         (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
131
132         atomic_set(&driver->needs_tlbflush, 0);
133         if (driver->msvdx_mmu_invaldc)
134                 atomic_set(driver->msvdx_mmu_invaldc, 1);
135         up_write(&driver->sem);
136 }
137
138 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
139 {
140         struct drm_device *dev = pd->driver->dev;
141         struct drm_psb_private *dev_priv = dev->dev_private;
142         uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
143                           PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
144
145         down_write(&pd->driver->sem);
146         PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
147         wmb();
148         psb_mmu_flush_pd_locked(pd->driver, 1);
149         pd->hw_context = hw_context;
150         up_write(&pd->driver->sem);
151
152 }
153
154 static inline unsigned long psb_pd_addr_end(unsigned long addr,
155                                             unsigned long end)
156 {
157         addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
158         return (addr < end) ? addr : end;
159 }
160
161 static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
162 {
163         uint32_t mask = PSB_PTE_VALID;
164
165         if (type & PSB_MMU_CACHED_MEMORY)
166                 mask |= PSB_PTE_CACHED;
167         if (type & PSB_MMU_RO_MEMORY)
168                 mask |= PSB_PTE_RO;
169         if (type & PSB_MMU_WO_MEMORY)
170                 mask |= PSB_PTE_WO;
171
172         return (pfn << PAGE_SHIFT) | mask;
173 }
174
175 struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
176                                     int trap_pagefaults, int invalid_type)
177 {
178         struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
179         uint32_t *v;
180         int i;
181
182         if (!pd)
183                 return NULL;
184
185         pd->p = alloc_page(GFP_DMA32);
186         if (!pd->p)
187                 goto out_err1;
188         pd->dummy_pt = alloc_page(GFP_DMA32);
189         if (!pd->dummy_pt)
190                 goto out_err2;
191         pd->dummy_page = alloc_page(GFP_DMA32);
192         if (!pd->dummy_page)
193                 goto out_err3;
194
195         if (!trap_pagefaults) {
196                 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
197                                                    invalid_type);
198                 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
199                                                    invalid_type);
200         } else {
201                 pd->invalid_pde = 0;
202                 pd->invalid_pte = 0;
203         }
204
205         v = kmap(pd->dummy_pt);
206         for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
207                 v[i] = pd->invalid_pte;
208
209         kunmap(pd->dummy_pt);
210
211         v = kmap(pd->p);
212         for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
213                 v[i] = pd->invalid_pde;
214
215         kunmap(pd->p);
216
217         clear_page(kmap(pd->dummy_page));
218         kunmap(pd->dummy_page);
219
220         pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
221         if (!pd->tables)
222                 goto out_err4;
223
224         pd->hw_context = -1;
225         pd->pd_mask = PSB_PTE_VALID;
226         pd->driver = driver;
227
228         return pd;
229
230 out_err4:
231         __free_page(pd->dummy_page);
232 out_err3:
233         __free_page(pd->dummy_pt);
234 out_err2:
235         __free_page(pd->p);
236 out_err1:
237         kfree(pd);
238         return NULL;
239 }
240
241 static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
242 {
243         __free_page(pt->p);
244         kfree(pt);
245 }
246
247 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
248 {
249         struct psb_mmu_driver *driver = pd->driver;
250         struct drm_device *dev = driver->dev;
251         struct drm_psb_private *dev_priv = dev->dev_private;
252         struct psb_mmu_pt *pt;
253         int i;
254
255         down_write(&driver->sem);
256         if (pd->hw_context != -1) {
257                 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
258                 psb_mmu_flush_pd_locked(driver, 1);
259         }
260
261         /* Should take the spinlock here, but we don't need to do that
262            since we have the semaphore in write mode. */
263
264         for (i = 0; i < 1024; ++i) {
265                 pt = pd->tables[i];
266                 if (pt)
267                         psb_mmu_free_pt(pt);
268         }
269
270         vfree(pd->tables);
271         __free_page(pd->dummy_page);
272         __free_page(pd->dummy_pt);
273         __free_page(pd->p);
274         kfree(pd);
275         up_write(&driver->sem);
276 }
277
278 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
279 {
280         struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
281         void *v;
282         uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
283         uint32_t clflush_count = PAGE_SIZE / clflush_add;
284         spinlock_t *lock = &pd->driver->lock;
285         uint8_t *clf;
286         uint32_t *ptes;
287         int i;
288
289         if (!pt)
290                 return NULL;
291
292         pt->p = alloc_page(GFP_DMA32);
293         if (!pt->p) {
294                 kfree(pt);
295                 return NULL;
296         }
297
298         spin_lock(lock);
299
300         v = kmap_atomic(pt->p);
301         clf = (uint8_t *) v;
302         ptes = (uint32_t *) v;
303         for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
304                 *ptes++ = pd->invalid_pte;
305
306 #if defined(CONFIG_X86)
307         if (pd->driver->has_clflush && pd->hw_context != -1) {
308                 mb();
309                 for (i = 0; i < clflush_count; ++i) {
310                         psb_clflush(clf);
311                         clf += clflush_add;
312                 }
313                 mb();
314         }
315 #endif
316         kunmap_atomic(v);
317         spin_unlock(lock);
318
319         pt->count = 0;
320         pt->pd = pd;
321         pt->index = 0;
322
323         return pt;
324 }
325
326 struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
327                                              unsigned long addr)
328 {
329         uint32_t index = psb_mmu_pd_index(addr);
330         struct psb_mmu_pt *pt;
331         uint32_t *v;
332         spinlock_t *lock = &pd->driver->lock;
333
334         spin_lock(lock);
335         pt = pd->tables[index];
336         while (!pt) {
337                 spin_unlock(lock);
338                 pt = psb_mmu_alloc_pt(pd);
339                 if (!pt)
340                         return NULL;
341                 spin_lock(lock);
342
343                 if (pd->tables[index]) {
344                         spin_unlock(lock);
345                         psb_mmu_free_pt(pt);
346                         spin_lock(lock);
347                         pt = pd->tables[index];
348                         continue;
349                 }
350
351                 v = kmap_atomic(pd->p);
352                 pd->tables[index] = pt;
353                 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
354                 pt->index = index;
355                 kunmap_atomic((void *) v);
356
357                 if (pd->hw_context != -1) {
358                         psb_mmu_clflush(pd->driver, (void *)&v[index]);
359                         atomic_set(&pd->driver->needs_tlbflush, 1);
360                 }
361         }
362         pt->v = kmap_atomic(pt->p);
363         return pt;
364 }
365
366 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
367                                               unsigned long addr)
368 {
369         uint32_t index = psb_mmu_pd_index(addr);
370         struct psb_mmu_pt *pt;
371         spinlock_t *lock = &pd->driver->lock;
372
373         spin_lock(lock);
374         pt = pd->tables[index];
375         if (!pt) {
376                 spin_unlock(lock);
377                 return NULL;
378         }
379         pt->v = kmap_atomic(pt->p);
380         return pt;
381 }
382
383 static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
384 {
385         struct psb_mmu_pd *pd = pt->pd;
386         uint32_t *v;
387
388         kunmap_atomic(pt->v);
389         if (pt->count == 0) {
390                 v = kmap_atomic(pd->p);
391                 v[pt->index] = pd->invalid_pde;
392                 pd->tables[pt->index] = NULL;
393
394                 if (pd->hw_context != -1) {
395                         psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
396                         atomic_set(&pd->driver->needs_tlbflush, 1);
397                 }
398                 kunmap_atomic(pt->v);
399                 spin_unlock(&pd->driver->lock);
400                 psb_mmu_free_pt(pt);
401                 return;
402         }
403         spin_unlock(&pd->driver->lock);
404 }
405
406 static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
407                                    uint32_t pte)
408 {
409         pt->v[psb_mmu_pt_index(addr)] = pte;
410 }
411
412 static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
413                                           unsigned long addr)
414 {
415         pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
416 }
417
418 struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
419 {
420         struct psb_mmu_pd *pd;
421
422         down_read(&driver->sem);
423         pd = driver->default_pd;
424         up_read(&driver->sem);
425
426         return pd;
427 }
428
429 /* Returns the physical address of the PD shared by sgx/msvdx */
430 uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
431 {
432         struct psb_mmu_pd *pd;
433
434         pd = psb_mmu_get_default_pd(driver);
435         return page_to_pfn(pd->p) << PAGE_SHIFT;
436 }
437
438 void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
439 {
440         struct drm_device *dev = driver->dev;
441         struct drm_psb_private *dev_priv = dev->dev_private;
442
443         PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
444         psb_mmu_free_pagedir(driver->default_pd);
445         kfree(driver);
446 }
447
448 struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
449                                            int trap_pagefaults,
450                                            int invalid_type,
451                                            atomic_t *msvdx_mmu_invaldc)
452 {
453         struct psb_mmu_driver *driver;
454         struct drm_psb_private *dev_priv = dev->dev_private;
455
456         driver = kmalloc(sizeof(*driver), GFP_KERNEL);
457
458         if (!driver)
459                 return NULL;
460
461         driver->dev = dev;
462         driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
463                                               invalid_type);
464         if (!driver->default_pd)
465                 goto out_err1;
466
467         spin_lock_init(&driver->lock);
468         init_rwsem(&driver->sem);
469         down_write(&driver->sem);
470         atomic_set(&driver->needs_tlbflush, 1);
471         driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
472
473         driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
474         PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
475                    PSB_CR_BIF_CTRL);
476         PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
477                    PSB_CR_BIF_CTRL);
478
479         driver->has_clflush = 0;
480
481 #if defined(CONFIG_X86)
482         if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
483                 uint32_t tfms, misc, cap0, cap4, clflush_size;
484
485                 /*
486                  * clflush size is determined at kernel setup for x86_64 but not
487                  * for i386. We have to do it here.
488                  */
489
490                 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
491                 clflush_size = ((misc >> 8) & 0xff) * 8;
492                 driver->has_clflush = 1;
493                 driver->clflush_add =
494                     PAGE_SIZE * clflush_size / sizeof(uint32_t);
495                 driver->clflush_mask = driver->clflush_add - 1;
496                 driver->clflush_mask = ~driver->clflush_mask;
497         }
498 #endif
499
500         up_write(&driver->sem);
501         return driver;
502
503 out_err1:
504         kfree(driver);
505         return NULL;
506 }
507
508 #if defined(CONFIG_X86)
509 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
510                                uint32_t num_pages, uint32_t desired_tile_stride,
511                                uint32_t hw_tile_stride)
512 {
513         struct psb_mmu_pt *pt;
514         uint32_t rows = 1;
515         uint32_t i;
516         unsigned long addr;
517         unsigned long end;
518         unsigned long next;
519         unsigned long add;
520         unsigned long row_add;
521         unsigned long clflush_add = pd->driver->clflush_add;
522         unsigned long clflush_mask = pd->driver->clflush_mask;
523
524         if (!pd->driver->has_clflush)
525                 return;
526
527         if (hw_tile_stride)
528                 rows = num_pages / desired_tile_stride;
529         else
530                 desired_tile_stride = num_pages;
531
532         add = desired_tile_stride << PAGE_SHIFT;
533         row_add = hw_tile_stride << PAGE_SHIFT;
534         mb();
535         for (i = 0; i < rows; ++i) {
536
537                 addr = address;
538                 end = addr + add;
539
540                 do {
541                         next = psb_pd_addr_end(addr, end);
542                         pt = psb_mmu_pt_map_lock(pd, addr);
543                         if (!pt)
544                                 continue;
545                         do {
546                                 psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
547                         } while (addr += clflush_add,
548                                  (addr & clflush_mask) < next);
549
550                         psb_mmu_pt_unmap_unlock(pt);
551                 } while (addr = next, next != end);
552                 address += row_add;
553         }
554         mb();
555 }
556 #else
557 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
558                                uint32_t num_pages, uint32_t desired_tile_stride,
559                                uint32_t hw_tile_stride)
560 {
561         drm_ttm_cache_flush();
562 }
563 #endif
564
565 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
566                                  unsigned long address, uint32_t num_pages)
567 {
568         struct psb_mmu_pt *pt;
569         unsigned long addr;
570         unsigned long end;
571         unsigned long next;
572         unsigned long f_address = address;
573
574         down_read(&pd->driver->sem);
575
576         addr = address;
577         end = addr + (num_pages << PAGE_SHIFT);
578
579         do {
580                 next = psb_pd_addr_end(addr, end);
581                 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
582                 if (!pt)
583                         goto out;
584                 do {
585                         psb_mmu_invalidate_pte(pt, addr);
586                         --pt->count;
587                 } while (addr += PAGE_SIZE, addr < next);
588                 psb_mmu_pt_unmap_unlock(pt);
589
590         } while (addr = next, next != end);
591
592 out:
593         if (pd->hw_context != -1)
594                 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
595
596         up_read(&pd->driver->sem);
597
598         if (pd->hw_context != -1)
599                 psb_mmu_flush(pd->driver);
600
601         return;
602 }
603
604 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
605                           uint32_t num_pages, uint32_t desired_tile_stride,
606                           uint32_t hw_tile_stride)
607 {
608         struct psb_mmu_pt *pt;
609         uint32_t rows = 1;
610         uint32_t i;
611         unsigned long addr;
612         unsigned long end;
613         unsigned long next;
614         unsigned long add;
615         unsigned long row_add;
616         unsigned long f_address = address;
617
618         if (hw_tile_stride)
619                 rows = num_pages / desired_tile_stride;
620         else
621                 desired_tile_stride = num_pages;
622
623         add = desired_tile_stride << PAGE_SHIFT;
624         row_add = hw_tile_stride << PAGE_SHIFT;
625
626         down_read(&pd->driver->sem);
627
628         /* Make sure we only need to flush this processor's cache */
629
630         for (i = 0; i < rows; ++i) {
631
632                 addr = address;
633                 end = addr + add;
634
635                 do {
636                         next = psb_pd_addr_end(addr, end);
637                         pt = psb_mmu_pt_map_lock(pd, addr);
638                         if (!pt)
639                                 continue;
640                         do {
641                                 psb_mmu_invalidate_pte(pt, addr);
642                                 --pt->count;
643
644                         } while (addr += PAGE_SIZE, addr < next);
645                         psb_mmu_pt_unmap_unlock(pt);
646
647                 } while (addr = next, next != end);
648                 address += row_add;
649         }
650         if (pd->hw_context != -1)
651                 psb_mmu_flush_ptes(pd, f_address, num_pages,
652                                    desired_tile_stride, hw_tile_stride);
653
654         up_read(&pd->driver->sem);
655
656         if (pd->hw_context != -1)
657                 psb_mmu_flush(pd->driver);
658 }
659
660 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
661                                 unsigned long address, uint32_t num_pages,
662                                 int type)
663 {
664         struct psb_mmu_pt *pt;
665         uint32_t pte;
666         unsigned long addr;
667         unsigned long end;
668         unsigned long next;
669         unsigned long f_address = address;
670         int ret = -ENOMEM;
671
672         down_read(&pd->driver->sem);
673
674         addr = address;
675         end = addr + (num_pages << PAGE_SHIFT);
676
677         do {
678                 next = psb_pd_addr_end(addr, end);
679                 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
680                 if (!pt) {
681                         ret = -ENOMEM;
682                         goto out;
683                 }
684                 do {
685                         pte = psb_mmu_mask_pte(start_pfn++, type);
686                         psb_mmu_set_pte(pt, addr, pte);
687                         pt->count++;
688                 } while (addr += PAGE_SIZE, addr < next);
689                 psb_mmu_pt_unmap_unlock(pt);
690
691         } while (addr = next, next != end);
692         ret = 0;
693
694 out:
695         if (pd->hw_context != -1)
696                 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
697
698         up_read(&pd->driver->sem);
699
700         if (pd->hw_context != -1)
701                 psb_mmu_flush(pd->driver);
702
703         return 0;
704 }
705
706 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
707                          unsigned long address, uint32_t num_pages,
708                          uint32_t desired_tile_stride, uint32_t hw_tile_stride,
709                          int type)
710 {
711         struct psb_mmu_pt *pt;
712         uint32_t rows = 1;
713         uint32_t i;
714         uint32_t pte;
715         unsigned long addr;
716         unsigned long end;
717         unsigned long next;
718         unsigned long add;
719         unsigned long row_add;
720         unsigned long f_address = address;
721         int ret = -ENOMEM;
722
723         if (hw_tile_stride) {
724                 if (num_pages % desired_tile_stride != 0)
725                         return -EINVAL;
726                 rows = num_pages / desired_tile_stride;
727         } else {
728                 desired_tile_stride = num_pages;
729         }
730
731         add = desired_tile_stride << PAGE_SHIFT;
732         row_add = hw_tile_stride << PAGE_SHIFT;
733
734         down_read(&pd->driver->sem);
735
736         for (i = 0; i < rows; ++i) {
737
738                 addr = address;
739                 end = addr + add;
740
741                 do {
742                         next = psb_pd_addr_end(addr, end);
743                         pt = psb_mmu_pt_alloc_map_lock(pd, addr);
744                         if (!pt)
745                                 goto out;
746                         do {
747                                 pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
748                                                        type);
749                                 psb_mmu_set_pte(pt, addr, pte);
750                                 pt->count++;
751                         } while (addr += PAGE_SIZE, addr < next);
752                         psb_mmu_pt_unmap_unlock(pt);
753
754                 } while (addr = next, next != end);
755
756                 address += row_add;
757         }
758
759         ret = 0;
760 out:
761         if (pd->hw_context != -1)
762                 psb_mmu_flush_ptes(pd, f_address, num_pages,
763                                    desired_tile_stride, hw_tile_stride);
764
765         up_read(&pd->driver->sem);
766
767         if (pd->hw_context != -1)
768                 psb_mmu_flush(pd->driver);
769
770         return ret;
771 }
772
773 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
774                            unsigned long *pfn)
775 {
776         int ret;
777         struct psb_mmu_pt *pt;
778         uint32_t tmp;
779         spinlock_t *lock = &pd->driver->lock;
780
781         down_read(&pd->driver->sem);
782         pt = psb_mmu_pt_map_lock(pd, virtual);
783         if (!pt) {
784                 uint32_t *v;
785
786                 spin_lock(lock);
787                 v = kmap_atomic(pd->p);
788                 tmp = v[psb_mmu_pd_index(virtual)];
789                 kunmap_atomic(v);
790                 spin_unlock(lock);
791
792                 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
793                     !(pd->invalid_pte & PSB_PTE_VALID)) {
794                         ret = -EINVAL;
795                         goto out;
796                 }
797                 ret = 0;
798                 *pfn = pd->invalid_pte >> PAGE_SHIFT;
799                 goto out;
800         }
801         tmp = pt->v[psb_mmu_pt_index(virtual)];
802         if (!(tmp & PSB_PTE_VALID)) {
803                 ret = -EINVAL;
804         } else {
805                 ret = 0;
806                 *pfn = tmp >> PAGE_SHIFT;
807         }
808         psb_mmu_pt_unmap_unlock(pt);
809 out:
810         up_read(&pd->driver->sem);
811         return ret;
812 }