2 * CPU-agnostic ARM page table allocator.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2014 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
29 #include "io-pgtable.h"
31 #define ARM_LPAE_MAX_ADDR_BITS 48
32 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
33 #define ARM_LPAE_MAX_LEVELS 4
35 /* Struct accessors */
36 #define io_pgtable_to_data(x) \
37 container_of((x), struct arm_lpae_io_pgtable, iop)
39 #define io_pgtable_ops_to_pgtable(x) \
40 container_of((x), struct io_pgtable, ops)
42 #define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
49 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
55 #define ARM_LPAE_LVL_SHIFT(l,d) \
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift)
59 #define ARM_LPAE_PAGES_PER_PGD(d) \
60 DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
63 * Calculate the index at level l used to map virtual address a using the
66 #define ARM_LPAE_PGD_IDX(l,d) \
67 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
69 #define ARM_LPAE_LVL_IDX(a,l,d) \
70 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
71 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
73 /* Calculate the block/page mapping size at level l for pagetable in d. */
74 #define ARM_LPAE_BLOCK_SIZE(l,d) \
75 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
76 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
79 #define ARM_LPAE_PTE_TYPE_SHIFT 0
80 #define ARM_LPAE_PTE_TYPE_MASK 0x3
82 #define ARM_LPAE_PTE_TYPE_BLOCK 1
83 #define ARM_LPAE_PTE_TYPE_TABLE 3
84 #define ARM_LPAE_PTE_TYPE_PAGE 3
86 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
87 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
88 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
89 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
90 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
91 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
92 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
93 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
95 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
96 /* Ignore the contiguous bit for block splitting */
97 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
98 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
99 ARM_LPAE_PTE_ATTR_HI_MASK)
102 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
103 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
104 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
105 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
108 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
109 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
110 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
111 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
112 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
113 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
116 #define ARM_32_LPAE_TCR_EAE (1 << 31)
117 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
119 #define ARM_LPAE_TCR_EPD1 (1 << 23)
121 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
122 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
123 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
125 #define ARM_LPAE_TCR_SH0_SHIFT 12
126 #define ARM_LPAE_TCR_SH0_MASK 0x3
127 #define ARM_LPAE_TCR_SH_NS 0
128 #define ARM_LPAE_TCR_SH_OS 2
129 #define ARM_LPAE_TCR_SH_IS 3
131 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
132 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
133 #define ARM_LPAE_TCR_RGN_MASK 0x3
134 #define ARM_LPAE_TCR_RGN_NC 0
135 #define ARM_LPAE_TCR_RGN_WBWA 1
136 #define ARM_LPAE_TCR_RGN_WT 2
137 #define ARM_LPAE_TCR_RGN_WB 3
139 #define ARM_LPAE_TCR_SL0_SHIFT 6
140 #define ARM_LPAE_TCR_SL0_MASK 0x3
142 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
143 #define ARM_LPAE_TCR_SZ_MASK 0xf
145 #define ARM_LPAE_TCR_PS_SHIFT 16
146 #define ARM_LPAE_TCR_PS_MASK 0x7
148 #define ARM_LPAE_TCR_IPS_SHIFT 32
149 #define ARM_LPAE_TCR_IPS_MASK 0x7
151 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
152 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
153 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
154 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
155 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
156 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
158 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
159 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
160 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
161 #define ARM_LPAE_MAIR_ATTR_NC 0x44
162 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
163 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
164 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
165 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
167 /* IOPTE accessors */
168 #define iopte_deref(pte,d) \
169 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
170 & ~((1ULL << (d)->pg_shift) - 1)))
172 #define iopte_type(pte,l) \
173 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
175 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
177 #define iopte_leaf(pte,l) \
178 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
179 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
180 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
182 #define iopte_to_pfn(pte,d) \
183 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
185 #define pfn_to_iopte(pfn,d) \
186 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
188 struct arm_lpae_io_pgtable {
189 struct io_pgtable iop;
193 unsigned long pg_shift;
194 unsigned long bits_per_level;
199 typedef u64 arm_lpae_iopte;
201 static bool selftest_running = false;
203 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
204 unsigned long iova, size_t size, int lvl,
205 arm_lpae_iopte *ptep);
207 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
208 unsigned long iova, phys_addr_t paddr,
209 arm_lpae_iopte prot, int lvl,
210 arm_lpae_iopte *ptep)
212 arm_lpae_iopte pte = prot;
214 if (iopte_leaf(*ptep, lvl)) {
215 /* We require an unmap first */
216 WARN_ON(!selftest_running);
218 } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
220 * We need to unmap and free the old table before
221 * overwriting it with a block entry.
223 arm_lpae_iopte *tblp;
224 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
226 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
227 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
231 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
232 pte |= ARM_LPAE_PTE_NS;
234 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
235 pte |= ARM_LPAE_PTE_TYPE_PAGE;
237 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
239 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
240 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
243 data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
247 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
248 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
249 int lvl, arm_lpae_iopte *ptep)
251 arm_lpae_iopte *cptep, pte;
252 void *cookie = data->iop.cookie;
253 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
255 /* Find our entry at the current level */
256 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
258 /* If we can install a leaf entry at this level, then do so */
259 if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
260 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
262 /* We can't allocate tables at the final level */
263 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
266 /* Grab a pointer to the next level */
269 cptep = alloc_pages_exact(1UL << data->pg_shift,
270 GFP_ATOMIC | __GFP_ZERO);
274 data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
276 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
277 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
278 pte |= ARM_LPAE_PTE_NSTABLE;
280 data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
282 cptep = iopte_deref(pte, data);
286 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
289 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
294 if (data->iop.fmt == ARM_64_LPAE_S1 ||
295 data->iop.fmt == ARM_32_LPAE_S1) {
296 pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
298 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
299 pte |= ARM_LPAE_PTE_AP_RDONLY;
301 if (prot & IOMMU_CACHE)
302 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
303 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
305 pte = ARM_LPAE_PTE_HAP_FAULT;
306 if (prot & IOMMU_READ)
307 pte |= ARM_LPAE_PTE_HAP_READ;
308 if (prot & IOMMU_WRITE)
309 pte |= ARM_LPAE_PTE_HAP_WRITE;
310 if (prot & IOMMU_CACHE)
311 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
313 pte |= ARM_LPAE_PTE_MEMATTR_NC;
316 if (prot & IOMMU_NOEXEC)
317 pte |= ARM_LPAE_PTE_XN;
322 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
323 phys_addr_t paddr, size_t size, int iommu_prot)
325 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
326 arm_lpae_iopte *ptep = data->pgd;
327 int lvl = ARM_LPAE_START_LVL(data);
330 /* If no access, then nothing to do */
331 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
334 prot = arm_lpae_prot_to_pte(data, iommu_prot);
335 return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
338 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
339 arm_lpae_iopte *ptep)
341 arm_lpae_iopte *start, *end;
342 unsigned long table_size;
344 /* Only leaf entries at the last level */
345 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
348 if (lvl == ARM_LPAE_START_LVL(data))
349 table_size = data->pgd_size;
351 table_size = 1UL << data->pg_shift;
354 end = (void *)ptep + table_size;
356 while (ptep != end) {
357 arm_lpae_iopte pte = *ptep++;
359 if (!pte || iopte_leaf(pte, lvl))
362 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
365 free_pages_exact(start, table_size);
368 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
370 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
372 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
376 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
377 unsigned long iova, size_t size,
378 arm_lpae_iopte prot, int lvl,
379 arm_lpae_iopte *ptep, size_t blk_size)
381 unsigned long blk_start, blk_end;
382 phys_addr_t blk_paddr;
383 arm_lpae_iopte table = 0;
384 void *cookie = data->iop.cookie;
385 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
387 blk_start = iova & ~(blk_size - 1);
388 blk_end = blk_start + blk_size;
389 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
391 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
392 arm_lpae_iopte *tablep;
395 if (blk_start == iova)
398 /* __arm_lpae_map expects a pointer to the start of the table */
399 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
400 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
403 /* Free the table we allocated */
404 tablep = iopte_deref(table, data);
405 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
407 return 0; /* Bytes unmapped */
412 tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
413 iova &= ~(blk_size - 1);
414 tlb->tlb_add_flush(iova, blk_size, true, cookie);
418 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
419 unsigned long iova, size_t size, int lvl,
420 arm_lpae_iopte *ptep)
423 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
424 void *cookie = data->iop.cookie;
425 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
427 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
430 /* Something went horribly wrong and we ran out of page table */
431 if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
434 /* If the size matches this level, we're in the right place */
435 if (size == blk_size) {
437 tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
439 if (!iopte_leaf(pte, lvl)) {
440 /* Also flush any partial walks */
441 tlb->tlb_add_flush(iova, size, false, cookie);
442 tlb->tlb_sync(data->iop.cookie);
443 ptep = iopte_deref(pte, data);
444 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
446 tlb->tlb_add_flush(iova, size, true, cookie);
450 } else if (iopte_leaf(pte, lvl)) {
452 * Insert a table at the next level to map the old region,
453 * minus the part we want to unmap
455 return arm_lpae_split_blk_unmap(data, iova, size,
456 iopte_prot(pte), lvl, ptep,
460 /* Keep on walkin' */
461 ptep = iopte_deref(pte, data);
462 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
465 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
469 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
470 struct io_pgtable *iop = &data->iop;
471 arm_lpae_iopte *ptep = data->pgd;
472 int lvl = ARM_LPAE_START_LVL(data);
474 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
476 iop->cfg.tlb->tlb_sync(iop->cookie);
481 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
484 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
485 arm_lpae_iopte pte, *ptep = data->pgd;
486 int lvl = ARM_LPAE_START_LVL(data);
489 /* Valid IOPTE pointer? */
493 /* Grab the IOPTE we're interested in */
494 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
501 if (iopte_leaf(pte,lvl))
502 goto found_translation;
504 /* Take it to the next level */
505 ptep = iopte_deref(pte, data);
506 } while (++lvl < ARM_LPAE_MAX_LEVELS);
508 /* Ran out of page tables to walk */
512 iova &= ((1 << data->pg_shift) - 1);
513 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
516 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
518 unsigned long granule;
521 * We need to restrict the supported page sizes to match the
522 * translation regime for a particular granule. Aim to match
523 * the CPU page size if possible, otherwise prefer smaller sizes.
524 * While we're at it, restrict the block sizes to match the
527 if (cfg->pgsize_bitmap & PAGE_SIZE)
529 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
530 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
531 else if (cfg->pgsize_bitmap & PAGE_MASK)
532 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
538 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
541 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
544 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
547 cfg->pgsize_bitmap = 0;
551 static struct arm_lpae_io_pgtable *
552 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
554 unsigned long va_bits, pgd_bits;
555 struct arm_lpae_io_pgtable *data;
557 arm_lpae_restrict_pgsizes(cfg);
559 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
562 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
565 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
568 data = kmalloc(sizeof(*data), GFP_KERNEL);
572 data->pg_shift = __ffs(cfg->pgsize_bitmap);
573 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
575 va_bits = cfg->ias - data->pg_shift;
576 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
578 /* Calculate the actual size of our pgd (without concatenation) */
579 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
580 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
582 data->iop.ops = (struct io_pgtable_ops) {
584 .unmap = arm_lpae_unmap,
585 .iova_to_phys = arm_lpae_iova_to_phys,
591 static struct io_pgtable *
592 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
595 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
601 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
602 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
603 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
605 switch (1 << data->pg_shift) {
607 reg |= ARM_LPAE_TCR_TG0_4K;
610 reg |= ARM_LPAE_TCR_TG0_16K;
613 reg |= ARM_LPAE_TCR_TG0_64K;
619 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
622 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
625 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
628 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
631 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
634 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
640 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
642 /* Disable speculative walks through TTBR1 */
643 reg |= ARM_LPAE_TCR_EPD1;
644 cfg->arm_lpae_s1_cfg.tcr = reg;
647 reg = (ARM_LPAE_MAIR_ATTR_NC
648 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
649 (ARM_LPAE_MAIR_ATTR_WBRWA
650 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
651 (ARM_LPAE_MAIR_ATTR_DEVICE
652 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
654 cfg->arm_lpae_s1_cfg.mair[0] = reg;
655 cfg->arm_lpae_s1_cfg.mair[1] = 0;
657 /* Looking good; allocate a pgd */
658 data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
662 cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
665 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
666 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
674 static struct io_pgtable *
675 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
678 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
684 * Concatenate PGDs at level 1 if possible in order to reduce
685 * the depth of the stage-2 walk.
687 if (data->levels == ARM_LPAE_MAX_LEVELS) {
688 unsigned long pgd_pages;
690 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
691 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
692 data->pgd_size = pgd_pages << data->pg_shift;
698 reg = ARM_64_LPAE_S2_TCR_RES1 |
699 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
700 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
701 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
703 sl = ARM_LPAE_START_LVL(data);
705 switch (1 << data->pg_shift) {
707 reg |= ARM_LPAE_TCR_TG0_4K;
708 sl++; /* SL0 format is different for 4K granule size */
711 reg |= ARM_LPAE_TCR_TG0_16K;
714 reg |= ARM_LPAE_TCR_TG0_64K;
720 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
723 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
726 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
729 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
732 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
735 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
741 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
742 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
743 cfg->arm_lpae_s2_cfg.vtcr = reg;
745 /* Allocate pgd pages */
746 data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
750 cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
753 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
761 static struct io_pgtable *
762 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
764 struct io_pgtable *iop;
766 if (cfg->ias > 32 || cfg->oas > 40)
769 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
770 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
772 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
773 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
779 static struct io_pgtable *
780 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
782 struct io_pgtable *iop;
784 if (cfg->ias > 40 || cfg->oas > 40)
787 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
788 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
790 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
795 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
796 .alloc = arm_64_lpae_alloc_pgtable_s1,
797 .free = arm_lpae_free_pgtable,
800 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
801 .alloc = arm_64_lpae_alloc_pgtable_s2,
802 .free = arm_lpae_free_pgtable,
805 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
806 .alloc = arm_32_lpae_alloc_pgtable_s1,
807 .free = arm_lpae_free_pgtable,
810 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
811 .alloc = arm_32_lpae_alloc_pgtable_s2,
812 .free = arm_lpae_free_pgtable,
815 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
817 static struct io_pgtable_cfg *cfg_cookie;
819 static void dummy_tlb_flush_all(void *cookie)
821 WARN_ON(cookie != cfg_cookie);
824 static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
827 WARN_ON(cookie != cfg_cookie);
828 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
831 static void dummy_tlb_sync(void *cookie)
833 WARN_ON(cookie != cfg_cookie);
836 static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
838 WARN_ON(cookie != cfg_cookie);
841 static struct iommu_gather_ops dummy_tlb_ops __initdata = {
842 .tlb_flush_all = dummy_tlb_flush_all,
843 .tlb_add_flush = dummy_tlb_add_flush,
844 .tlb_sync = dummy_tlb_sync,
845 .flush_pgtable = dummy_flush_pgtable,
848 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
850 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
851 struct io_pgtable_cfg *cfg = &data->iop.cfg;
853 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
854 cfg->pgsize_bitmap, cfg->ias);
855 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
856 data->levels, data->pgd_size, data->pg_shift,
857 data->bits_per_level, data->pgd);
860 #define __FAIL(ops, i) ({ \
861 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
862 arm_lpae_dump_ops(ops); \
863 selftest_running = false; \
867 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
869 static const enum io_pgtable_fmt fmts[] = {
877 struct io_pgtable_ops *ops;
879 selftest_running = true;
881 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
883 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
885 pr_err("selftest: failed to allocate io pgtable ops\n");
890 * Initial sanity checks.
891 * Empty page tables shouldn't provide any translations.
893 if (ops->iova_to_phys(ops, 42))
894 return __FAIL(ops, i);
896 if (ops->iova_to_phys(ops, SZ_1G + 42))
897 return __FAIL(ops, i);
899 if (ops->iova_to_phys(ops, SZ_2G + 42))
900 return __FAIL(ops, i);
903 * Distinct mappings of different granule sizes.
906 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
907 while (j != BITS_PER_LONG) {
910 if (ops->map(ops, iova, iova, size, IOMMU_READ |
914 return __FAIL(ops, i);
916 /* Overlapping mappings */
917 if (!ops->map(ops, iova, iova + size, size,
918 IOMMU_READ | IOMMU_NOEXEC))
919 return __FAIL(ops, i);
921 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
922 return __FAIL(ops, i);
926 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
930 size = 1UL << __ffs(cfg->pgsize_bitmap);
931 if (ops->unmap(ops, SZ_1G + size, size) != size)
932 return __FAIL(ops, i);
934 /* Remap of partial unmap */
935 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
936 return __FAIL(ops, i);
938 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
939 return __FAIL(ops, i);
943 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
944 while (j != BITS_PER_LONG) {
947 if (ops->unmap(ops, iova, size) != size)
948 return __FAIL(ops, i);
950 if (ops->iova_to_phys(ops, iova + 42))
951 return __FAIL(ops, i);
953 /* Remap full block */
954 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
955 return __FAIL(ops, i);
957 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
958 return __FAIL(ops, i);
962 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
965 free_io_pgtable_ops(ops);
968 selftest_running = false;
972 static int __init arm_lpae_do_selftests(void)
974 static const unsigned long pgsize[] = {
975 SZ_4K | SZ_2M | SZ_1G,
980 static const unsigned int ias[] = {
981 32, 36, 40, 42, 44, 48,
984 int i, j, pass = 0, fail = 0;
985 struct io_pgtable_cfg cfg = {
986 .tlb = &dummy_tlb_ops,
990 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
991 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
992 cfg.pgsize_bitmap = pgsize[i];
994 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
996 if (arm_lpae_run_tests(&cfg))
1003 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1004 return fail ? -EFAULT : 0;
1006 subsys_initcall(arm_lpae_do_selftests);