These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / powerpc / kernel / vdso.c
index 305eb0d..b457bfa 100644 (file)
 /* The alignment of the vDSO */
 #define VDSO_ALIGNMENT (1 << 16)
 
-extern char vdso32_start, vdso32_end;
-static void *vdso32_kbase = &vdso32_start;
 static unsigned int vdso32_pages;
+static void *vdso32_kbase;
 static struct page **vdso32_pagelist;
 unsigned long vdso32_sigtramp;
 unsigned long vdso32_rt_sigtramp;
 
+#ifdef CONFIG_VDSO32
+extern char vdso32_start, vdso32_end;
+#endif
+
 #ifdef CONFIG_PPC64
 extern char vdso64_start, vdso64_end;
 static void *vdso64_kbase = &vdso64_start;
@@ -140,50 +143,6 @@ struct lib64_elfinfo
 };
 
 
-#ifdef __DEBUG
-static void dump_one_vdso_page(struct page *pg, struct page *upg)
-{
-       printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT),
-              page_count(pg),
-              pg->flags);
-       if (upg && !IS_ERR(upg) /* && pg != upg*/) {
-               printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg)
-                                                      << PAGE_SHIFT),
-                      page_count(upg),
-                      upg->flags);
-       }
-       printk("\n");
-}
-
-static void dump_vdso_pages(struct vm_area_struct * vma)
-{
-       int i;
-
-       if (!vma || is_32bit_task()) {
-               printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase);
-               for (i=0; i<vdso32_pages; i++) {
-                       struct page *pg = virt_to_page(vdso32_kbase +
-                                                      i*PAGE_SIZE);
-                       struct page *upg = (vma && vma->vm_mm) ?
-                               follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0)
-                               : NULL;
-                       dump_one_vdso_page(pg, upg);
-               }
-       }
-       if (!vma || !is_32bit_task()) {
-               printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase);
-               for (i=0; i<vdso64_pages; i++) {
-                       struct page *pg = virt_to_page(vdso64_kbase +
-                                                      i*PAGE_SIZE);
-                       struct page *upg = (vma && vma->vm_mm) ?
-                               follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0)
-                               : NULL;
-                       dump_one_vdso_page(pg, upg);
-               }
-       }
-}
-#endif /* DEBUG */
-
 /*
  * This is called from binfmt_elf, we create the special vma for the
  * vDSO and insert it into the mm struct tree
@@ -292,6 +251,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
 
 
 
+#ifdef CONFIG_VDSO32
 static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
                                  unsigned long *size)
 {
@@ -379,6 +339,20 @@ static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32,
 
        return 0;
 }
+#else /* !CONFIG_VDSO32 */
+static unsigned long __init find_function32(struct lib32_elfinfo *lib,
+                                           const char *symname)
+{
+       return 0;
+}
+
+static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32,
+                                      struct lib64_elfinfo *v64,
+                                      const char *orig, const char *fix)
+{
+       return 0;
+}
+#endif /* CONFIG_VDSO32 */
 
 
 #ifdef CONFIG_PPC64
@@ -489,6 +463,7 @@ static __init int vdso_do_find_sections(struct lib32_elfinfo *v32,
         * Locate symbol tables & text section
         */
 
+#ifdef CONFIG_VDSO32
        v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize);
        v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL);
        if (v32->dynsym == NULL || v32->dynstr == NULL) {
@@ -501,6 +476,7 @@ static __init int vdso_do_find_sections(struct lib32_elfinfo *v32,
                return -1;
        }
        v32->text = sect - vdso32_kbase;
+#endif
 
 #ifdef CONFIG_PPC64
        v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize);
@@ -537,7 +513,9 @@ static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32,
 static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
                                       struct lib64_elfinfo *v64)
 {
+#ifdef CONFIG_VDSO32
        Elf32_Sym *sym32;
+#endif
 #ifdef CONFIG_PPC64
        Elf64_Sym *sym64;
 
@@ -552,6 +530,7 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
                (sym64->st_value - VDSO64_LBASE);
 #endif /* CONFIG_PPC64 */
 
+#ifdef CONFIG_VDSO32
        sym32 = find_symbol32(v32, "__kernel_datapage_offset");
        if (sym32 == NULL) {
                printk(KERN_ERR "vDSO32: Can't find symbol "
@@ -561,6 +540,7 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
        *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) =
                (vdso32_pages << PAGE_SHIFT) -
                (sym32->st_value - VDSO32_LBASE);
+#endif
 
        return 0;
 }
@@ -569,55 +549,54 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
 static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
                                      struct lib64_elfinfo *v64)
 {
-       void *start32;
-       unsigned long size32;
+       unsigned long size;
+       void *start;
 
 #ifdef CONFIG_PPC64
-       void *start64;
-       unsigned long size64;
-
-       start64 = find_section64(v64->hdr, "__ftr_fixup", &size64);
-       if (start64)
+       start = find_section64(v64->hdr, "__ftr_fixup", &size);
+       if (start)
                do_feature_fixups(cur_cpu_spec->cpu_features,
-                                 start64, start64 + size64);
+                                 start, start + size);
 
-       start64 = find_section64(v64->hdr, "__mmu_ftr_fixup", &size64);
-       if (start64)
+       start = find_section64(v64->hdr, "__mmu_ftr_fixup", &size);
+       if (start)
                do_feature_fixups(cur_cpu_spec->mmu_features,
-                                 start64, start64 + size64);
+                                 start, start + size);
 
-       start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
-       if (start64)
+       start = find_section64(v64->hdr, "__fw_ftr_fixup", &size);
+       if (start)
                do_feature_fixups(powerpc_firmware_features,
-                                 start64, start64 + size64);
+                                 start, start + size);
 
-       start64 = find_section64(v64->hdr, "__lwsync_fixup", &size64);
-       if (start64)
+       start = find_section64(v64->hdr, "__lwsync_fixup", &size);
+       if (start)
                do_lwsync_fixups(cur_cpu_spec->cpu_features,
-                                start64, start64 + size64);
+                                start, start + size);
 #endif /* CONFIG_PPC64 */
 
-       start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
-       if (start32)
+#ifdef CONFIG_VDSO32
+       start = find_section32(v32->hdr, "__ftr_fixup", &size);
+       if (start)
                do_feature_fixups(cur_cpu_spec->cpu_features,
-                                 start32, start32 + size32);
+                                 start, start + size);
 
-       start32 = find_section32(v32->hdr, "__mmu_ftr_fixup", &size32);
-       if (start32)
+       start = find_section32(v32->hdr, "__mmu_ftr_fixup", &size);
+       if (start)
                do_feature_fixups(cur_cpu_spec->mmu_features,
-                                 start32, start32 + size32);
+                                 start, start + size);
 
 #ifdef CONFIG_PPC64
-       start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
-       if (start32)
+       start = find_section32(v32->hdr, "__fw_ftr_fixup", &size);
+       if (start)
                do_feature_fixups(powerpc_firmware_features,
-                                 start32, start32 + size32);
+                                 start, start + size);
 #endif /* CONFIG_PPC64 */
 
-       start32 = find_section32(v32->hdr, "__lwsync_fixup", &size32);
-       if (start32)
+       start = find_section32(v32->hdr, "__lwsync_fixup", &size);
+       if (start)
                do_lwsync_fixups(cur_cpu_spec->cpu_features,
-                                start32, start32 + size32);
+                                start, start + size);
+#endif
 
        return 0;
 }
@@ -779,11 +758,15 @@ static int __init vdso_init(void)
 #endif /* CONFIG_PPC64 */
 
 
+#ifdef CONFIG_VDSO32
+       vdso32_kbase = &vdso32_start;
+
        /*
         * Calculate the size of the 32 bits vDSO
         */
        vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT;
        DBG("vdso32_kbase: %p, 0x%x pages\n", vdso32_kbase, vdso32_pages);
+#endif
 
 
        /*
@@ -804,6 +787,7 @@ static int __init vdso_init(void)
                return 0;
        }
 
+#ifdef CONFIG_VDSO32
        /* Make sure pages are in the correct state */
        vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 2),
                                  GFP_KERNEL);
@@ -816,6 +800,7 @@ static int __init vdso_init(void)
        }
        vdso32_pagelist[i++] = virt_to_page(vdso_data);
        vdso32_pagelist[i] = NULL;
+#endif
 
 #ifdef CONFIG_PPC64
        vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 2),