These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm64 / kernel / entry.S
index 6515be3..dd8fd31 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/init.h>
 #include <linux/linkage.h>
 
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/asm-offsets.h>
 #include <asm/cpufeature.h>
        */
        .endm
 
-       .macro  kernel_exit, el, ret = 0
+       .macro  kernel_exit, el
        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
        .if     \el == 0
        ct_user_enter
        ldr     x23, [sp, #S_SP]                // load return stack pointer
        msr     sp_el0, x23
-
 #ifdef CONFIG_ARM64_ERRATUM_845719
-       alternative_insn                                                \
-       "nop",                                                          \
-       "tbz x22, #4, 1f",                                              \
-       ARM64_WORKAROUND_845719
+alternative_if_not ARM64_WORKAROUND_845719
+       nop
+       nop
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+       nop
+#endif
+alternative_else
+       tbz     x22, #4, 1f
 #ifdef CONFIG_PID_IN_CONTEXTIDR
-       alternative_insn                                                \
-       "nop; nop",                                                     \
-       "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:",         \
-       ARM64_WORKAROUND_845719
+       mrs     x29, contextidr_el1
+       msr     contextidr_el1, x29
 #else
-       alternative_insn                                                \
-       "nop",                                                          \
-       "msr contextidr_el1, xzr; 1:",                                  \
-       ARM64_WORKAROUND_845719
+       msr contextidr_el1, xzr
 #endif
+1:
+alternative_endif
 #endif
        .endif
        msr     elr_el1, x21                    // set up the return data
        msr     spsr_el1, x22
-       .if     \ret
-       ldr     x1, [sp, #S_X1]                 // preserve x0 (syscall return)
-       .else
        ldp     x0, x1, [sp, #16 * 0]
-       .endif
        ldp     x2, x3, [sp, #16 * 1]
        ldp     x4, x5, [sp, #16 * 2]
        ldp     x6, x7, [sp, #16 * 3]
@@ -349,8 +345,8 @@ el1_inv:
        // TODO: add support for undefined instructions in kernel mode
        enable_dbg
        mov     x0, sp
+       mov     x2, x1
        mov     x1, #BAD_SYNC
-       mrs     x2, esr_el1
        b       bad_mode
 ENDPROC(el1_sync)
 
@@ -440,6 +436,8 @@ el0_sync_compat:
        b.eq    el0_fpsimd_acc
        cmp     x24, #ESR_ELx_EC_FP_EXC32       // FP/ASIMD exception
        b.eq    el0_fpsimd_exc
+       cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
+       b.eq    el0_sp_pc
        cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL0
        b.eq    el0_undef
        cmp     x24, #ESR_ELx_EC_CP15_32        // CP15 MRC/MCR trap
@@ -556,7 +554,7 @@ el0_inv:
        ct_user_exit
        mov     x0, sp
        mov     x1, #BAD_SYNC
-       mrs     x2, esr_el1
+       mov     x2, x25
        bl      bad_mode
        b       ret_to_user
 ENDPROC(el0_sync)
@@ -588,7 +586,8 @@ ENDPROC(el0_irq)
  *
  */
 ENTRY(cpu_switch_to)
-       add     x8, x0, #THREAD_CPU_CONTEXT
+       mov     x10, #THREAD_CPU_CONTEXT
+       add     x8, x0, x10
        mov     x9, sp
        stp     x19, x20, [x8], #16             // store callee-saved registers
        stp     x21, x22, [x8], #16
@@ -597,7 +596,7 @@ ENTRY(cpu_switch_to)
        stp     x27, x28, [x8], #16
        stp     x29, x9, [x8], #16
        str     lr, [x8]
-       add     x8, x1, #THREAD_CPU_CONTEXT
+       add     x8, x1, x10
        ldp     x19, x20, [x8], #16             // restore callee-saved registers
        ldp     x21, x22, [x8], #16
        ldp     x23, x24, [x8], #16
@@ -615,17 +614,21 @@ ENDPROC(cpu_switch_to)
  */
 ret_fast_syscall:
        disable_irq                             // disable interrupts
-       ldr     x1, [tsk, #TI_FLAGS]
+       str     x0, [sp, #S_X0]                 // returned x0
+       ldr     x1, [tsk, #TI_FLAGS]            // re-check for syscall tracing
+       and     x2, x1, #_TIF_SYSCALL_WORK
+       cbnz    x2, ret_fast_syscall_trace
        and     x2, x1, #_TIF_WORK_MASK
-       cbnz    x2, fast_work_pending
+       cbnz    x2, work_pending
        enable_step_tsk x1, x2
-       kernel_exit 0, ret = 1
+       kernel_exit 0
+ret_fast_syscall_trace:
+       enable_irq                              // enable interrupts
+       b       __sys_trace_return_skipped      // we already saved x0
 
 /*
  * Ok, we need to do extra processing, enter the slow path.
  */
-fast_work_pending:
-       str     x0, [sp, #S_X0]                 // returned x0
 work_pending:
        tbnz    x1, #TIF_NEED_RESCHED, work_resched
        tbnz    x1, #TIF_NEED_RESCHED_LAZY, work_resched
@@ -650,7 +653,7 @@ ret_to_user:
        cbnz    x2, work_pending
        enable_step_tsk x1, x2
 no_work_pending:
-       kernel_exit 0, ret = 0
+       kernel_exit 0
 ENDPROC(ret_to_user)
 
 /*