These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / x86 / mm / kasan_init_64.c
index 9a54dbe..d470cf2 100644 (file)
@@ -1,3 +1,4 @@
+#define pr_fmt(fmt) "kasan: " fmt
 #include <linux/bootmem.h>
 #include <linux/kasan.h>
 #include <linux/kdebug.h>
 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
 extern struct range pfn_mapped[E820_X_MAX];
 
-static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
-static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
-static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
-
-/*
- * This page used as early shadow. We don't use empty_zero_page
- * at early stages, stack instrumentation could write some garbage
- * to this page.
- * Latter we reuse it as zero shadow for large ranges of memory
- * that allowed to access, but not instrumented by kasan
- * (vmalloc/vmemmap ...).
- */
-static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
-
 static int __init map_range(struct range *range)
 {
        unsigned long start;
@@ -61,106 +48,6 @@ static void __init kasan_map_early_shadow(pgd_t *pgd)
        }
 }
 
-static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
-                               unsigned long end)
-{
-       pte_t *pte = pte_offset_kernel(pmd, addr);
-
-       while (addr + PAGE_SIZE <= end) {
-               WARN_ON(!pte_none(*pte));
-               set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
-                                       | __PAGE_KERNEL_RO));
-               addr += PAGE_SIZE;
-               pte = pte_offset_kernel(pmd, addr);
-       }
-       return 0;
-}
-
-static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
-                               unsigned long end)
-{
-       int ret = 0;
-       pmd_t *pmd = pmd_offset(pud, addr);
-
-       while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
-               WARN_ON(!pmd_none(*pmd));
-               set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
-                                       | _KERNPG_TABLE));
-               addr += PMD_SIZE;
-               pmd = pmd_offset(pud, addr);
-       }
-       if (addr < end) {
-               if (pmd_none(*pmd)) {
-                       void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
-                       if (!p)
-                               return -ENOMEM;
-                       set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
-               }
-               ret = zero_pte_populate(pmd, addr, end);
-       }
-       return ret;
-}
-
-
-static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
-                               unsigned long end)
-{
-       int ret = 0;
-       pud_t *pud = pud_offset(pgd, addr);
-
-       while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
-               WARN_ON(!pud_none(*pud));
-               set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
-                                       | _KERNPG_TABLE));
-               addr += PUD_SIZE;
-               pud = pud_offset(pgd, addr);
-       }
-
-       if (addr < end) {
-               if (pud_none(*pud)) {
-                       void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
-                       if (!p)
-                               return -ENOMEM;
-                       set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
-               }
-               ret = zero_pmd_populate(pud, addr, end);
-       }
-       return ret;
-}
-
-static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
-{
-       int ret = 0;
-       pgd_t *pgd = pgd_offset_k(addr);
-
-       while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
-               WARN_ON(!pgd_none(*pgd));
-               set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
-                                       | _KERNPG_TABLE));
-               addr += PGDIR_SIZE;
-               pgd = pgd_offset_k(addr);
-       }
-
-       if (addr < end) {
-               if (pgd_none(*pgd)) {
-                       void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
-                       if (!p)
-                               return -ENOMEM;
-                       set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
-               }
-               ret = zero_pud_populate(pgd, addr, end);
-       }
-       return ret;
-}
-
-
-static void __init populate_zero_shadow(const void *start, const void *end)
-{
-       if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
-               panic("kasan: unable to map zero shadow!");
-}
-
-
 #ifdef CONFIG_KASAN_INLINE
 static int kasan_die_handler(struct notifier_block *self,
                             unsigned long val,
@@ -212,7 +99,7 @@ void __init kasan_init(void)
 
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
-       populate_zero_shadow((void *)KASAN_SHADOW_START,
+       kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
                        kasan_mem_to_shadow((void *)PAGE_OFFSET));
 
        for (i = 0; i < E820_X_MAX; i++) {
@@ -222,14 +109,15 @@ void __init kasan_init(void)
                if (map_range(&pfn_mapped[i]))
                        panic("kasan: unable to allocate shadow!");
        }
-       populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
-                       kasan_mem_to_shadow((void *)__START_KERNEL_map));
+       kasan_populate_zero_shadow(
+               kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
+               kasan_mem_to_shadow((void *)__START_KERNEL_map));
 
        vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
                        (unsigned long)kasan_mem_to_shadow(_end),
                        NUMA_NO_NODE);
 
-       populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
+       kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
                        (void *)KASAN_SHADOW_END);
 
        memset(kasan_zero_page, 0, PAGE_SIZE);
@@ -237,4 +125,6 @@ void __init kasan_init(void)
        load_cr3(init_level4_pgt);
        __flush_tlb_all();
        init_task.kasan_depth = 0;
+
+       pr_info("KernelAddressSanitizer initialized\n");
 }