These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm64 / mm / mmu.c
index 5b8b664..116ad65 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <linux/libfdt.h>
 #include <linux/mman.h>
 #include <linux/nodemask.h>
 #include <linux/memblock.h>
@@ -31,6 +32,7 @@
 
 #include <asm/cputype.h>
 #include <asm/fixmap.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/sizes.h>
@@ -62,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot);
 
 static void __init *early_alloc(unsigned long sz)
 {
-       void *ptr = __va(memblock_alloc(sz, sz));
-       BUG_ON(!ptr);
+       phys_addr_t phys;
+       void *ptr;
+
+       phys = memblock_alloc(sz, sz);
+       BUG_ON(!phys);
+       ptr = __va(phys);
        memset(ptr, 0, sz);
        return ptr;
 }
@@ -109,14 +115,14 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
-void split_pud(pud_t *old_pud, pmd_t *pmd)
+static void split_pud(pud_t *old_pud, pmd_t *pmd)
 {
        unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
        pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
        int i = 0;
 
        do {
-               set_pmd(pmd, __pmd(addr | prot));
+               set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
                addr += PMD_SIZE;
        } while (pmd++, i++, i < PTRS_PER_PMD);
 }
@@ -266,7 +272,7 @@ static void *late_alloc(unsigned long size)
        return ptr;
 }
 
-static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
+static void __init create_mapping(phys_addr_t phys, unsigned long virt,
                                  phys_addr_t size, pgprot_t prot)
 {
        if (virt < VMALLOC_START) {
@@ -307,8 +313,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
         * for now. This will get more fine grained later once all memory
         * is mapped
         */
-       unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
-       unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+       unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
+       unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
 
        if (end < kernel_x_start) {
                create_mapping(start, __phys_to_virt(start),
@@ -352,14 +358,11 @@ static void __init map_mem(void)
         * memory addressable from the initial direct kernel mapping.
         *
         * The initial direct kernel mapping, located at swapper_pg_dir, gives
-        * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
-        * PHYS_OFFSET (which must be aligned to 2MB as per
-        * Documentation/arm64/booting.txt).
+        * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
+        * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
+        * per Documentation/arm64/booting.txt).
         */
-       if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
-               limit = PHYS_OFFSET + PMD_SIZE;
-       else
-               limit = PHYS_OFFSET + PUD_SIZE;
+       limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
        memblock_set_current_limit(limit);
 
        /* map all the memory banks */
@@ -370,21 +373,24 @@ static void __init map_mem(void)
                if (start >= end)
                        break;
 
-#ifndef CONFIG_ARM64_64K_PAGES
-               /*
-                * For the first memory bank align the start address and
-                * current memblock limit to prevent create_mapping() from
-                * allocating pte page tables from unmapped memory.
-                * When 64K pages are enabled, the pte page table for the
-                * first PGDIR_SIZE is already present in swapper_pg_dir.
-                */
-               if (start < limit)
-                       start = ALIGN(start, PMD_SIZE);
-               if (end < limit) {
-                       limit = end & PMD_MASK;
-                       memblock_set_current_limit(limit);
+               if (ARM64_SWAPPER_USES_SECTION_MAPS) {
+                       /*
+                        * For the first memory bank align the start address and
+                        * current memblock limit to prevent create_mapping() from
+                        * allocating pte page tables from unmapped memory. With
+                        * the section maps, if the first block doesn't end on section
+                        * size boundary, create_mapping() will try to allocate a pte
+                        * page, which may be returned from an unmapped area.
+                        * When section maps are not used, the pte page table for the
+                        * current limit is already present in swapper_pg_dir.
+                        */
+                       if (start < limit)
+                               start = ALIGN(start, SECTION_SIZE);
+                       if (end < limit) {
+                               limit = end & SECTION_MASK;
+                               memblock_set_current_limit(limit);
+                       }
                }
-#endif
                __map_memblock(start, end);
        }
 
@@ -392,22 +398,22 @@ static void __init map_mem(void)
        memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
 }
 
-void __init fixup_executable(void)
+static void __init fixup_executable(void)
 {
 #ifdef CONFIG_DEBUG_RODATA
        /* now that we are actually fully mapped, make the start/end more fine grained */
-       if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
+       if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
                unsigned long aligned_start = round_down(__pa(_stext),
-                                                       SECTION_SIZE);
+                                                        SWAPPER_BLOCK_SIZE);
 
                create_mapping(aligned_start, __phys_to_virt(aligned_start),
                                __pa(_stext) - aligned_start,
                                PAGE_KERNEL);
        }
 
-       if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
+       if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
                unsigned long aligned_end = round_up(__pa(__init_end),
-                                                       SECTION_SIZE);
+                                                         SWAPPER_BLOCK_SIZE);
                create_mapping(__pa(__init_end), (unsigned long)__init_end,
                                aligned_end - __pa(__init_end),
                                PAGE_KERNEL);
@@ -420,7 +426,7 @@ void mark_rodata_ro(void)
 {
        create_mapping_late(__pa(_stext), (unsigned long)_stext,
                                (unsigned long)_etext - (unsigned long)_stext,
-                               PAGE_KERNEL_EXEC | PTE_RDONLY);
+                               PAGE_KERNEL_ROX);
 
 }
 #endif
@@ -450,26 +456,18 @@ void __init paging_init(void)
 
        empty_zero_page = virt_to_page(zero_page);
 
+       /* Ensure the zero page is visible to the page table walker */
+       dsb(ishst);
+
        /*
         * TTBR0 is only used for the identity mapping at this stage. Make it
         * point to zero page to avoid speculatively fetching new entries.
         */
        cpu_set_reserved_ttbr0();
-       flush_tlb_all();
+       local_flush_tlb_all();
        cpu_set_default_tcr_t0sz();
 }
 
-/*
- * Enable the identity mapping to allow the MMU disabling.
- */
-void setup_mm_for_reboot(void)
-{
-       cpu_set_reserved_ttbr0();
-       flush_tlb_all();
-       cpu_set_idmap_tcr_t0sz();
-       cpu_switch_mm(idmap_pg_dir, &init_mm);
-}
-
 /*
  * Check whether a kernel address is valid (derived from arch/x86/).
  */
@@ -508,12 +506,12 @@ int kern_addr_valid(unsigned long addr)
        return pfn_valid(pte_pfn(*pte));
 }
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-#ifdef CONFIG_ARM64_64K_PAGES
+#if !ARM64_SWAPPER_USES_SECTION_MAPS
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
 {
        return vmemmap_populate_basepages(start, end, node);
 }
-#else  /* !CONFIG_ARM64_64K_PAGES */
+#else  /* !ARM64_SWAPPER_USES_SECTION_MAPS */
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
 {
        unsigned long addr = start;
@@ -643,3 +641,59 @@ void __set_fixmap(enum fixed_addresses idx,
                flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
        }
 }
+
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
+{
+       const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
+       pgprot_t prot = PAGE_KERNEL_RO;
+       int size, offset;
+       void *dt_virt;
+
+       /*
+        * Check whether the physical FDT address is set and meets the minimum
+        * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
+        * at least 8 bytes so that we can always access the size field of the
+        * FDT header after mapping the first chunk, double check here if that
+        * is indeed the case.
+        */
+       BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
+       if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
+               return NULL;
+
+       /*
+        * Make sure that the FDT region can be mapped without the need to
+        * allocate additional translation table pages, so that it is safe
+        * to call create_mapping() this early.
+        *
+        * On 64k pages, the FDT will be mapped using PTEs, so we need to
+        * be in the same PMD as the rest of the fixmap.
+        * On 4k pages, we'll use section mappings for the FDT so we only
+        * have to be in the same PUD.
+        */
+       BUILD_BUG_ON(dt_virt_base % SZ_2M);
+
+       BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
+                    __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
+
+       offset = dt_phys % SWAPPER_BLOCK_SIZE;
+       dt_virt = (void *)dt_virt_base + offset;
+
+       /* map the first chunk so we can read the size from the header */
+       create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+                      SWAPPER_BLOCK_SIZE, prot);
+
+       if (fdt_check_header(dt_virt) != 0)
+               return NULL;
+
+       size = fdt_totalsize(dt_virt);
+       if (size > MAX_FDT_SIZE)
+               return NULL;
+
+       if (offset + size > SWAPPER_BLOCK_SIZE)
+               create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+                              round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
+
+       memblock_reserve(dt_phys, size);
+
+       return dt_virt;
+}