These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm64 / mm / mmu.c
1 /*
2  * Based on arch/arm/mm/mmu.c
3  *
4  * Copyright (C) 1995-2005 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/libfdt.h>
25 #include <linux/mman.h>
26 #include <linux/nodemask.h>
27 #include <linux/memblock.h>
28 #include <linux/fs.h>
29 #include <linux/io.h>
30 #include <linux/slab.h>
31 #include <linux/stop_machine.h>
32
33 #include <asm/cputype.h>
34 #include <asm/fixmap.h>
35 #include <asm/kernel-pgtable.h>
36 #include <asm/sections.h>
37 #include <asm/setup.h>
38 #include <asm/sizes.h>
39 #include <asm/tlb.h>
40 #include <asm/memblock.h>
41 #include <asm/mmu_context.h>
42
43 #include "mm.h"
44
45 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
46
47 /*
48  * Empty_zero_page is a special page that is used for zero-initialized data
49  * and COW.
50  */
51 struct page *empty_zero_page;
52 EXPORT_SYMBOL(empty_zero_page);
53
54 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
55                               unsigned long size, pgprot_t vma_prot)
56 {
57         if (!pfn_valid(pfn))
58                 return pgprot_noncached(vma_prot);
59         else if (file->f_flags & O_SYNC)
60                 return pgprot_writecombine(vma_prot);
61         return vma_prot;
62 }
63 EXPORT_SYMBOL(phys_mem_access_prot);
64
65 static void __init *early_alloc(unsigned long sz)
66 {
67         phys_addr_t phys;
68         void *ptr;
69
70         phys = memblock_alloc(sz, sz);
71         BUG_ON(!phys);
72         ptr = __va(phys);
73         memset(ptr, 0, sz);
74         return ptr;
75 }
76
77 /*
78  * remap a PMD into pages
79  */
80 static void split_pmd(pmd_t *pmd, pte_t *pte)
81 {
82         unsigned long pfn = pmd_pfn(*pmd);
83         int i = 0;
84
85         do {
86                 /*
87                  * Need to have the least restrictive permissions available
88                  * permissions will be fixed up later
89                  */
90                 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
91                 pfn++;
92         } while (pte++, i++, i < PTRS_PER_PTE);
93 }
94
95 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
96                                   unsigned long end, unsigned long pfn,
97                                   pgprot_t prot,
98                                   void *(*alloc)(unsigned long size))
99 {
100         pte_t *pte;
101
102         if (pmd_none(*pmd) || pmd_sect(*pmd)) {
103                 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
104                 if (pmd_sect(*pmd))
105                         split_pmd(pmd, pte);
106                 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
107                 flush_tlb_all();
108         }
109         BUG_ON(pmd_bad(*pmd));
110
111         pte = pte_offset_kernel(pmd, addr);
112         do {
113                 set_pte(pte, pfn_pte(pfn, prot));
114                 pfn++;
115         } while (pte++, addr += PAGE_SIZE, addr != end);
116 }
117
118 static void split_pud(pud_t *old_pud, pmd_t *pmd)
119 {
120         unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
121         pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
122         int i = 0;
123
124         do {
125                 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
126                 addr += PMD_SIZE;
127         } while (pmd++, i++, i < PTRS_PER_PMD);
128 }
129
130 static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
131                                   unsigned long addr, unsigned long end,
132                                   phys_addr_t phys, pgprot_t prot,
133                                   void *(*alloc)(unsigned long size))
134 {
135         pmd_t *pmd;
136         unsigned long next;
137
138         /*
139          * Check for initial section mappings in the pgd/pud and remove them.
140          */
141         if (pud_none(*pud) || pud_sect(*pud)) {
142                 pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
143                 if (pud_sect(*pud)) {
144                         /*
145                          * need to have the 1G of mappings continue to be
146                          * present
147                          */
148                         split_pud(pud, pmd);
149                 }
150                 pud_populate(mm, pud, pmd);
151                 flush_tlb_all();
152         }
153         BUG_ON(pud_bad(*pud));
154
155         pmd = pmd_offset(pud, addr);
156         do {
157                 next = pmd_addr_end(addr, end);
158                 /* try section mapping first */
159                 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
160                         pmd_t old_pmd =*pmd;
161                         set_pmd(pmd, __pmd(phys |
162                                            pgprot_val(mk_sect_prot(prot))));
163                         /*
164                          * Check for previous table entries created during
165                          * boot (__create_page_tables) and flush them.
166                          */
167                         if (!pmd_none(old_pmd)) {
168                                 flush_tlb_all();
169                                 if (pmd_table(old_pmd)) {
170                                         phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
171                                         if (!WARN_ON_ONCE(slab_is_available()))
172                                                 memblock_free(table, PAGE_SIZE);
173                                 }
174                         }
175                 } else {
176                         alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
177                                        prot, alloc);
178                 }
179                 phys += next - addr;
180         } while (pmd++, addr = next, addr != end);
181 }
182
183 static inline bool use_1G_block(unsigned long addr, unsigned long next,
184                         unsigned long phys)
185 {
186         if (PAGE_SHIFT != 12)
187                 return false;
188
189         if (((addr | next | phys) & ~PUD_MASK) != 0)
190                 return false;
191
192         return true;
193 }
194
195 static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
196                                   unsigned long addr, unsigned long end,
197                                   phys_addr_t phys, pgprot_t prot,
198                                   void *(*alloc)(unsigned long size))
199 {
200         pud_t *pud;
201         unsigned long next;
202
203         if (pgd_none(*pgd)) {
204                 pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
205                 pgd_populate(mm, pgd, pud);
206         }
207         BUG_ON(pgd_bad(*pgd));
208
209         pud = pud_offset(pgd, addr);
210         do {
211                 next = pud_addr_end(addr, end);
212
213                 /*
214                  * For 4K granule only, attempt to put down a 1GB block
215                  */
216                 if (use_1G_block(addr, next, phys)) {
217                         pud_t old_pud = *pud;
218                         set_pud(pud, __pud(phys |
219                                            pgprot_val(mk_sect_prot(prot))));
220
221                         /*
222                          * If we have an old value for a pud, it will
223                          * be pointing to a pmd table that we no longer
224                          * need (from swapper_pg_dir).
225                          *
226                          * Look up the old pmd table and free it.
227                          */
228                         if (!pud_none(old_pud)) {
229                                 flush_tlb_all();
230                                 if (pud_table(old_pud)) {
231                                         phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
232                                         if (!WARN_ON_ONCE(slab_is_available()))
233                                                 memblock_free(table, PAGE_SIZE);
234                                 }
235                         }
236                 } else {
237                         alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
238                 }
239                 phys += next - addr;
240         } while (pud++, addr = next, addr != end);
241 }
242
243 /*
244  * Create the page directory entries and any necessary page tables for the
245  * mapping specified by 'md'.
246  */
247 static void  __create_mapping(struct mm_struct *mm, pgd_t *pgd,
248                                     phys_addr_t phys, unsigned long virt,
249                                     phys_addr_t size, pgprot_t prot,
250                                     void *(*alloc)(unsigned long size))
251 {
252         unsigned long addr, length, end, next;
253
254         addr = virt & PAGE_MASK;
255         length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
256
257         end = addr + length;
258         do {
259                 next = pgd_addr_end(addr, end);
260                 alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
261                 phys += next - addr;
262         } while (pgd++, addr = next, addr != end);
263 }
264
265 static void *late_alloc(unsigned long size)
266 {
267         void *ptr;
268
269         BUG_ON(size > PAGE_SIZE);
270         ptr = (void *)__get_free_page(PGALLOC_GFP);
271         BUG_ON(!ptr);
272         return ptr;
273 }
274
275 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
276                                   phys_addr_t size, pgprot_t prot)
277 {
278         if (virt < VMALLOC_START) {
279                 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
280                         &phys, virt);
281                 return;
282         }
283         __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
284                          size, prot, early_alloc);
285 }
286
287 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
288                                unsigned long virt, phys_addr_t size,
289                                pgprot_t prot)
290 {
291         __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
292                                 late_alloc);
293 }
294
295 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
296                                   phys_addr_t size, pgprot_t prot)
297 {
298         if (virt < VMALLOC_START) {
299                 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
300                         &phys, virt);
301                 return;
302         }
303
304         return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
305                                 phys, virt, size, prot, late_alloc);
306 }
307
308 #ifdef CONFIG_DEBUG_RODATA
309 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
310 {
311         /*
312          * Set up the executable regions using the existing section mappings
313          * for now. This will get more fine grained later once all memory
314          * is mapped
315          */
316         unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
317         unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
318
319         if (end < kernel_x_start) {
320                 create_mapping(start, __phys_to_virt(start),
321                         end - start, PAGE_KERNEL);
322         } else if (start >= kernel_x_end) {
323                 create_mapping(start, __phys_to_virt(start),
324                         end - start, PAGE_KERNEL);
325         } else {
326                 if (start < kernel_x_start)
327                         create_mapping(start, __phys_to_virt(start),
328                                 kernel_x_start - start,
329                                 PAGE_KERNEL);
330                 create_mapping(kernel_x_start,
331                                 __phys_to_virt(kernel_x_start),
332                                 kernel_x_end - kernel_x_start,
333                                 PAGE_KERNEL_EXEC);
334                 if (kernel_x_end < end)
335                         create_mapping(kernel_x_end,
336                                 __phys_to_virt(kernel_x_end),
337                                 end - kernel_x_end,
338                                 PAGE_KERNEL);
339         }
340
341 }
342 #else
343 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
344 {
345         create_mapping(start, __phys_to_virt(start), end - start,
346                         PAGE_KERNEL_EXEC);
347 }
348 #endif
349
350 static void __init map_mem(void)
351 {
352         struct memblock_region *reg;
353         phys_addr_t limit;
354
355         /*
356          * Temporarily limit the memblock range. We need to do this as
357          * create_mapping requires puds, pmds and ptes to be allocated from
358          * memory addressable from the initial direct kernel mapping.
359          *
360          * The initial direct kernel mapping, located at swapper_pg_dir, gives
361          * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
362          * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
363          * per Documentation/arm64/booting.txt).
364          */
365         limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
366         memblock_set_current_limit(limit);
367
368         /* map all the memory banks */
369         for_each_memblock(memory, reg) {
370                 phys_addr_t start = reg->base;
371                 phys_addr_t end = start + reg->size;
372
373                 if (start >= end)
374                         break;
375
376                 if (ARM64_SWAPPER_USES_SECTION_MAPS) {
377                         /*
378                          * For the first memory bank align the start address and
379                          * current memblock limit to prevent create_mapping() from
380                          * allocating pte page tables from unmapped memory. With
381                          * the section maps, if the first block doesn't end on section
382                          * size boundary, create_mapping() will try to allocate a pte
383                          * page, which may be returned from an unmapped area.
384                          * When section maps are not used, the pte page table for the
385                          * current limit is already present in swapper_pg_dir.
386                          */
387                         if (start < limit)
388                                 start = ALIGN(start, SECTION_SIZE);
389                         if (end < limit) {
390                                 limit = end & SECTION_MASK;
391                                 memblock_set_current_limit(limit);
392                         }
393                 }
394                 __map_memblock(start, end);
395         }
396
397         /* Limit no longer required. */
398         memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
399 }
400
401 static void __init fixup_executable(void)
402 {
403 #ifdef CONFIG_DEBUG_RODATA
404         /* now that we are actually fully mapped, make the start/end more fine grained */
405         if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
406                 unsigned long aligned_start = round_down(__pa(_stext),
407                                                          SWAPPER_BLOCK_SIZE);
408
409                 create_mapping(aligned_start, __phys_to_virt(aligned_start),
410                                 __pa(_stext) - aligned_start,
411                                 PAGE_KERNEL);
412         }
413
414         if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
415                 unsigned long aligned_end = round_up(__pa(__init_end),
416                                                           SWAPPER_BLOCK_SIZE);
417                 create_mapping(__pa(__init_end), (unsigned long)__init_end,
418                                 aligned_end - __pa(__init_end),
419                                 PAGE_KERNEL);
420         }
421 #endif
422 }
423
424 #ifdef CONFIG_DEBUG_RODATA
425 void mark_rodata_ro(void)
426 {
427         create_mapping_late(__pa(_stext), (unsigned long)_stext,
428                                 (unsigned long)_etext - (unsigned long)_stext,
429                                 PAGE_KERNEL_ROX);
430
431 }
432 #endif
433
434 void fixup_init(void)
435 {
436         create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
437                         (unsigned long)__init_end - (unsigned long)__init_begin,
438                         PAGE_KERNEL);
439 }
440
441 /*
442  * paging_init() sets up the page tables, initialises the zone memory
443  * maps and sets up the zero page.
444  */
445 void __init paging_init(void)
446 {
447         void *zero_page;
448
449         map_mem();
450         fixup_executable();
451
452         /* allocate the zero page. */
453         zero_page = early_alloc(PAGE_SIZE);
454
455         bootmem_init();
456
457         empty_zero_page = virt_to_page(zero_page);
458
459         /* Ensure the zero page is visible to the page table walker */
460         dsb(ishst);
461
462         /*
463          * TTBR0 is only used for the identity mapping at this stage. Make it
464          * point to zero page to avoid speculatively fetching new entries.
465          */
466         cpu_set_reserved_ttbr0();
467         local_flush_tlb_all();
468         cpu_set_default_tcr_t0sz();
469 }
470
471 /*
472  * Check whether a kernel address is valid (derived from arch/x86/).
473  */
474 int kern_addr_valid(unsigned long addr)
475 {
476         pgd_t *pgd;
477         pud_t *pud;
478         pmd_t *pmd;
479         pte_t *pte;
480
481         if ((((long)addr) >> VA_BITS) != -1UL)
482                 return 0;
483
484         pgd = pgd_offset_k(addr);
485         if (pgd_none(*pgd))
486                 return 0;
487
488         pud = pud_offset(pgd, addr);
489         if (pud_none(*pud))
490                 return 0;
491
492         if (pud_sect(*pud))
493                 return pfn_valid(pud_pfn(*pud));
494
495         pmd = pmd_offset(pud, addr);
496         if (pmd_none(*pmd))
497                 return 0;
498
499         if (pmd_sect(*pmd))
500                 return pfn_valid(pmd_pfn(*pmd));
501
502         pte = pte_offset_kernel(pmd, addr);
503         if (pte_none(*pte))
504                 return 0;
505
506         return pfn_valid(pte_pfn(*pte));
507 }
508 #ifdef CONFIG_SPARSEMEM_VMEMMAP
509 #if !ARM64_SWAPPER_USES_SECTION_MAPS
510 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
511 {
512         return vmemmap_populate_basepages(start, end, node);
513 }
514 #else   /* !ARM64_SWAPPER_USES_SECTION_MAPS */
515 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
516 {
517         unsigned long addr = start;
518         unsigned long next;
519         pgd_t *pgd;
520         pud_t *pud;
521         pmd_t *pmd;
522
523         do {
524                 next = pmd_addr_end(addr, end);
525
526                 pgd = vmemmap_pgd_populate(addr, node);
527                 if (!pgd)
528                         return -ENOMEM;
529
530                 pud = vmemmap_pud_populate(pgd, addr, node);
531                 if (!pud)
532                         return -ENOMEM;
533
534                 pmd = pmd_offset(pud, addr);
535                 if (pmd_none(*pmd)) {
536                         void *p = NULL;
537
538                         p = vmemmap_alloc_block_buf(PMD_SIZE, node);
539                         if (!p)
540                                 return -ENOMEM;
541
542                         set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
543                 } else
544                         vmemmap_verify((pte_t *)pmd, node, addr, next);
545         } while (addr = next, addr != end);
546
547         return 0;
548 }
549 #endif  /* CONFIG_ARM64_64K_PAGES */
550 void vmemmap_free(unsigned long start, unsigned long end)
551 {
552 }
553 #endif  /* CONFIG_SPARSEMEM_VMEMMAP */
554
555 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
556 #if CONFIG_PGTABLE_LEVELS > 2
557 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
558 #endif
559 #if CONFIG_PGTABLE_LEVELS > 3
560 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
561 #endif
562
563 static inline pud_t * fixmap_pud(unsigned long addr)
564 {
565         pgd_t *pgd = pgd_offset_k(addr);
566
567         BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
568
569         return pud_offset(pgd, addr);
570 }
571
572 static inline pmd_t * fixmap_pmd(unsigned long addr)
573 {
574         pud_t *pud = fixmap_pud(addr);
575
576         BUG_ON(pud_none(*pud) || pud_bad(*pud));
577
578         return pmd_offset(pud, addr);
579 }
580
581 static inline pte_t * fixmap_pte(unsigned long addr)
582 {
583         pmd_t *pmd = fixmap_pmd(addr);
584
585         BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
586
587         return pte_offset_kernel(pmd, addr);
588 }
589
590 void __init early_fixmap_init(void)
591 {
592         pgd_t *pgd;
593         pud_t *pud;
594         pmd_t *pmd;
595         unsigned long addr = FIXADDR_START;
596
597         pgd = pgd_offset_k(addr);
598         pgd_populate(&init_mm, pgd, bm_pud);
599         pud = pud_offset(pgd, addr);
600         pud_populate(&init_mm, pud, bm_pmd);
601         pmd = pmd_offset(pud, addr);
602         pmd_populate_kernel(&init_mm, pmd, bm_pte);
603
604         /*
605          * The boot-ioremap range spans multiple pmds, for which
606          * we are not preparted:
607          */
608         BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
609                      != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
610
611         if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
612              || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
613                 WARN_ON(1);
614                 pr_warn("pmd %p != %p, %p\n",
615                         pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
616                         fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
617                 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
618                         fix_to_virt(FIX_BTMAP_BEGIN));
619                 pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
620                         fix_to_virt(FIX_BTMAP_END));
621
622                 pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
623                 pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
624         }
625 }
626
627 void __set_fixmap(enum fixed_addresses idx,
628                                phys_addr_t phys, pgprot_t flags)
629 {
630         unsigned long addr = __fix_to_virt(idx);
631         pte_t *pte;
632
633         BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
634
635         pte = fixmap_pte(addr);
636
637         if (pgprot_val(flags)) {
638                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
639         } else {
640                 pte_clear(&init_mm, addr, pte);
641                 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
642         }
643 }
644
645 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
646 {
647         const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
648         pgprot_t prot = PAGE_KERNEL_RO;
649         int size, offset;
650         void *dt_virt;
651
652         /*
653          * Check whether the physical FDT address is set and meets the minimum
654          * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
655          * at least 8 bytes so that we can always access the size field of the
656          * FDT header after mapping the first chunk, double check here if that
657          * is indeed the case.
658          */
659         BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
660         if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
661                 return NULL;
662
663         /*
664          * Make sure that the FDT region can be mapped without the need to
665          * allocate additional translation table pages, so that it is safe
666          * to call create_mapping() this early.
667          *
668          * On 64k pages, the FDT will be mapped using PTEs, so we need to
669          * be in the same PMD as the rest of the fixmap.
670          * On 4k pages, we'll use section mappings for the FDT so we only
671          * have to be in the same PUD.
672          */
673         BUILD_BUG_ON(dt_virt_base % SZ_2M);
674
675         BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
676                      __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
677
678         offset = dt_phys % SWAPPER_BLOCK_SIZE;
679         dt_virt = (void *)dt_virt_base + offset;
680
681         /* map the first chunk so we can read the size from the header */
682         create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
683                        SWAPPER_BLOCK_SIZE, prot);
684
685         if (fdt_check_header(dt_virt) != 0)
686                 return NULL;
687
688         size = fdt_totalsize(dt_virt);
689         if (size > MAX_FDT_SIZE)
690                 return NULL;
691
692         if (offset + size > SWAPPER_BLOCK_SIZE)
693                 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
694                                round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
695
696         memblock_reserve(dt_phys, size);
697
698         return dt_virt;
699 }