These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm / kernel / head.S
index 3637973..04286fd 100644 (file)
@@ -80,7 +80,7 @@
 ENTRY(stext)
  ARM_BE8(setend        be )                    @ ensure we are in BE8 mode
 
- THUMB(        adr     r9, BSYM(1f)    )       @ Kernel is always entered in ARM.
+ THUMB(        badr    r9, 1f          )       @ Kernel is always entered in ARM.
  THUMB(        bx      r9              )       @ If this is a Thumb-2 kernel,
  THUMB(        .thumb                  )       @ switch to Thumb now.
  THUMB(1:                      )
@@ -131,13 +131,30 @@ ENTRY(stext)
         * The following calls CPU specific code in a position independent
         * manner.  See arch/arm/mm/proc-*.S for details.  r10 = base of
         * xxx_proc_info structure selected by __lookup_processor_type
-        * above.  On return, the CPU will be ready for the MMU to be
-        * turned on, and r0 will hold the CPU control register value.
+        * above.
+        *
+        * The processor init function will be called with:
+        *  r1 - machine type
+        *  r2 - boot data (atags/dt) pointer
+        *  r4 - translation table base (low word)
+        *  r5 - translation table base (high word, if LPAE)
+        *  r8 - translation table base 1 (pfn if LPAE)
+        *  r9 - cpuid
+        *  r13 - virtual address for __enable_mmu -> __turn_mmu_on
+        *
+        * On return, the CPU will be ready for the MMU to be turned on,
+        * r0 will hold the CPU control register value, r1, r2, r4, and
+        * r9 will be preserved.  r5 will also be preserved if LPAE.
         */
        ldr     r13, =__mmap_switched           @ address to jump to after
                                                @ mmu has been enabled
-       adr     lr, BSYM(1f)                    @ return (PIC) address
+       badr    lr, 1f                          @ return (PIC) address
+#ifdef CONFIG_ARM_LPAE
+       mov     r5, #0                          @ high TTBR0
+       mov     r8, r4, lsr #12                 @ TTBR1 is swapper_pg_dir pfn
+#else
        mov     r8, r4                          @ set TTBR1 to swapper_pg_dir
+#endif
        ldr     r12, [r10, #PROCINFO_INITFUNC]
        add     r12, r12, r10
        ret     r12
@@ -158,7 +175,7 @@ ENDPROC(stext)
  *
  * Returns:
  *  r0, r3, r5-r7 corrupted
- *  r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
+ *  r4 = physical page table address
  */
 __create_page_tables:
        pgtbl   r4, r8                          @ page table address
@@ -333,7 +350,6 @@ __create_page_tables:
 #endif
 #ifdef CONFIG_ARM_LPAE
        sub     r4, r4, #0x1000         @ point to the PGD table
-       mov     r4, r4, lsr #ARCH_PGD_SHIFT
 #endif
        ret     lr
 ENDPROC(__create_page_tables)
@@ -346,9 +362,9 @@ __turn_mmu_on_loc:
 
 #if defined(CONFIG_SMP)
        .text
-ENTRY(secondary_startup_arm)
        .arm
- THUMB(        adr     r9, BSYM(1f)    )       @ Kernel is entered in ARM.
+ENTRY(secondary_startup_arm)
+ THUMB(        badr    r9, 1f          )       @ Kernel is entered in ARM.
  THUMB(        bx      r9              )       @ If this is a Thumb-2 kernel,
  THUMB(        .thumb                  )       @ switch to Thumb now.
  THUMB(1:                      )
@@ -381,10 +397,13 @@ ENTRY(secondary_startup)
        adr     r4, __secondary_data
        ldmia   r4, {r5, r7, r12}               @ address to jump to after
        sub     lr, r4, r5                      @ mmu has been enabled
-       ldr     r4, [r7, lr]                    @ get secondary_data.pgdir
-       add     r7, r7, #4
-       ldr     r8, [r7, lr]                    @ get secondary_data.swapper_pg_dir
-       adr     lr, BSYM(__enable_mmu)          @ return address
+       add     r3, r7, lr
+       ldrd    r4, [r3, #0]                    @ get secondary_data.pgdir
+ARM_BE8(eor    r4, r4, r5)                     @ Swap r5 and r4 in BE:
+ARM_BE8(eor    r5, r4, r5)                     @ it can be done in 3 steps
+ARM_BE8(eor    r4, r4, r5)                     @ without using a temp reg.
+       ldr     r8, [r3, #8]                    @ get secondary_data.swapper_pg_dir
+       badr    lr, __enable_mmu                @ return address
        mov     r13, r12                        @ __secondary_switched address
        ldr     r12, [r10, #PROCINFO_INITFUNC]
        add     r12, r12, r10                   @ initialise processor
@@ -397,7 +416,7 @@ ENDPROC(secondary_startup_arm)
         * r6  = &secondary_data
         */
 ENTRY(__secondary_switched)
-       ldr     sp, [r7, #4]                    @ get secondary_data.stack
+       ldr     sp, [r7, #12]                   @ get secondary_data.stack
        mov     fp, #0
        b       secondary_start_kernel
 ENDPROC(__secondary_switched)
@@ -416,12 +435,14 @@ __secondary_data:
 /*
  * Setup common bits before finally enabling the MMU.  Essentially
  * this is just loading the page table pointer and domain access
- * registers.
+ * registers.  All these registers need to be preserved by the
+ * processor setup function (or set in the case of r0)
  *
  *  r0  = cp#15 control register
  *  r1  = machine ID
  *  r2  = atags or dtb pointer
- *  r4  = page table (see ARCH_PGD_SHIFT in asm/memory.h)
+ *  r4  = TTBR pointer (low word)
+ *  r5  = TTBR pointer (high word if LPAE)
  *  r9  = processor ID
  *  r13 = *virtual* address to jump to upon completion
  */
@@ -440,11 +461,10 @@ __enable_mmu:
 #ifdef CONFIG_CPU_ICACHE_DISABLE
        bic     r0, r0, #CR_I
 #endif
-#ifndef CONFIG_ARM_LPAE
-       mov     r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
-                     domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
-                     domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-                     domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+#ifdef CONFIG_ARM_LPAE
+       mcrr    p15, 0, r4, r5, c2              @ load TTBR0
+#else
+       mov     r5, #DACR_INIT
        mcr     p15, 0, r5, c3, c0, 0           @ load domain access register
        mcr     p15, 0, r4, c2, c0, 0           @ load page table pointer
 #endif