2 * linux/arch/arm/kernel/entry-armv.S
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Low-level vector interface routines
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
18 #include <linux/init.h>
20 #include <asm/assembler.h>
21 #include <asm/memory.h>
22 #include <asm/glue-df.h>
23 #include <asm/glue-pf.h>
24 #include <asm/vfpmacros.h>
25 #ifndef CONFIG_MULTI_IRQ_HANDLER
26 #include <mach/entry-macro.S>
28 #include <asm/thread_notify.h>
29 #include <asm/unwind.h>
30 #include <asm/unistd.h>
32 #include <asm/system_info.h>
34 #include "entry-header.S"
35 #include <asm/entry-macro-multi.S>
36 #include <asm/probes.h>
42 #ifdef CONFIG_MULTI_IRQ_HANDLER
43 ldr r1, =handle_arch_irq
48 arch_irq_handler_default
54 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
58 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
67 @ Call the processor-specific abort handler:
70 @ r4 - aborted context pc
71 @ r5 - aborted context psr
73 @ The abort handler must return the aborted address in r0, and
74 @ the fault status register in r1. r9 must be preserved.
79 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
86 .section .kprobes.text,"ax",%progbits
92 * Invalid mode handlers
94 .macro inv_entry, reason
95 sub sp, sp, #S_FRAME_SIZE
96 ARM( stmib sp, {r1 - lr} )
97 THUMB( stmia sp, {r0 - r12} )
98 THUMB( str sp, [sp, #S_SP] )
99 THUMB( str lr, [sp, #S_LR] )
104 inv_entry BAD_PREFETCH
106 ENDPROC(__pabt_invalid)
111 ENDPROC(__dabt_invalid)
116 ENDPROC(__irq_invalid)
119 inv_entry BAD_UNDEFINSTR
122 @ XXX fall through to common_invalid
126 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
132 add r0, sp, #S_PC @ here for interlock avoidance
133 mov r7, #-1 @ "" "" "" ""
134 str r4, [sp] @ save preserved r0
135 stmia r0, {r5 - r7} @ lr_<exception>,
136 @ cpsr_<exception>, "old_r0"
140 ENDPROC(__und_invalid)
146 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
147 #define SPFIX(code...) code
149 #define SPFIX(code...)
152 .macro svc_entry, stack_hole=0, trace=1, uaccess=1
154 UNWIND(.save {r0 - pc} )
155 sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
156 #ifdef CONFIG_THUMB2_KERNEL
157 SPFIX( str r0, [sp] ) @ temporarily saved
159 SPFIX( tst r0, #4 ) @ test original stack alignment
160 SPFIX( ldr r0, [sp] ) @ restored
164 SPFIX( subeq sp, sp, #4 )
168 add r7, sp, #S_SP - 4 @ here for interlock avoidance
169 mov r6, #-1 @ "" "" "" ""
170 add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
171 SPFIX( addeq r2, r2, #4 )
172 str r3, [sp, #-4]! @ save the "real" r0 copied
173 @ from the exception stack
178 @ We are now ready to fill in the remaining blanks on the stack:
182 @ r4 - lr_<exception>, already fixed up for correct return/restart
183 @ r5 - spsr_<exception>
184 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
194 #ifdef CONFIG_TRACE_IRQFLAGS
195 bl trace_hardirqs_off
205 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
206 svc_exit r5 @ return from exception
215 #ifdef CONFIG_PREEMPT
217 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
218 teq r8, #0 @ if preempt count != 0
219 bne 1f @ return from exeption
220 ldr r0, [tsk, #TI_FLAGS] @ get flags
221 tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
222 blne svc_preempt @ preempt!
224 ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
225 teq r8, #0 @ if preempt lazy count != 0
226 movne r0, #0 @ force flags to 0
227 tst r0, #_TIF_NEED_RESCHED_LAZY
232 svc_exit r5, irq = 1 @ return from exception
238 #ifdef CONFIG_PREEMPT
241 1: bl preempt_schedule_irq @ irq en/disable is done inside
242 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
243 tst r0, #_TIF_NEED_RESCHED
245 tst r0, #_TIF_NEED_RESCHED_LAZY
247 ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
248 teq r0, #0 @ if preempt lazy count != 0
255 @ Correct the PC such that it is pointing at the instruction
256 @ which caused the fault. If the faulting instruction was ARM
257 @ the PC will be pointing at the next instruction, and have to
258 @ subtract 4. Otherwise, it is Thumb, and the PC will be
259 @ pointing at the second half of the Thumb instruction. We
260 @ have to subtract 2.
269 #ifdef CONFIG_KPROBES
270 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
271 @ it obviously needs free stack space which then will belong to
273 svc_entry MAX_STACK_SIZE
278 @ call emulation code, which returns using r9 if it has emulated
279 @ the instruction, or the more conventional lr if we are to treat
280 @ this as a real undefined instruction
284 #ifndef CONFIG_THUMB2_KERNEL
288 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
289 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
291 ldrh r9, [r4] @ bottom 16 bits
294 orr r0, r9, r0, lsl #16
296 badr r9, __und_svc_finish
300 mov r1, #4 @ PC correction to apply
302 mov r0, sp @ struct pt_regs *regs
306 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
307 svc_exit r5 @ return from exception
316 svc_exit r5 @ return from exception
323 mov r0, sp @ struct pt_regs *regs
340 * Abort mode handlers
344 @ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
345 @ and reuses the same macros. However in abort mode we must also
346 @ save/restore lr_abt and spsr_abt to make nested aborts safe.
352 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
353 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
354 THUMB( msr cpsr_c, r0 )
355 mov r1, lr @ Save lr_abt
356 mrs r2, spsr @ Save spsr_abt, abort is now safe
357 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
358 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
359 THUMB( msr cpsr_c, r0 )
362 add r0, sp, #8 @ struct pt_regs *regs
366 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
367 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
368 THUMB( msr cpsr_c, r0 )
369 mov lr, r1 @ Restore lr_abt, abort is unsafe
370 msr spsr_cxsf, r2 @ Restore spsr_abt
371 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
372 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
373 THUMB( msr cpsr_c, r0 )
382 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
385 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
386 #error "sizeof(struct pt_regs) must be a multiple of 8"
389 .macro usr_entry, trace=1, uaccess=1
391 UNWIND(.cantunwind ) @ don't unwind the user space
392 sub sp, sp, #S_FRAME_SIZE
393 ARM( stmib sp, {r1 - r12} )
394 THUMB( stmia sp, {r0 - r12} )
396 ATRAP( mrc p15, 0, r7, c1, c0, 0)
397 ATRAP( ldr r8, .LCcralign)
400 add r0, sp, #S_PC @ here for interlock avoidance
401 mov r6, #-1 @ "" "" "" ""
403 str r3, [sp] @ save the "real" r0 copied
404 @ from the exception stack
406 ATRAP( ldr r8, [r8, #0])
409 @ We are now ready to fill in the remaining blanks on the stack:
411 @ r4 - lr_<exception>, already fixed up for correct return/restart
412 @ r5 - spsr_<exception>
413 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
415 @ Also, separately save sp_usr and lr_usr
418 ARM( stmdb r0, {sp, lr}^ )
419 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
425 @ Enable the alignment trap while in kernel mode
427 ATRAP( mcrne p15, 0, r8, c1, c0, 0)
430 @ Clear FP to mark the first stack frame
435 #ifdef CONFIG_TRACE_IRQFLAGS
436 bl trace_hardirqs_off
438 ct_user_exit save = 0
442 .macro kuser_cmpxchg_check
443 #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
445 #warning "NPTL on non MMU needs fixing"
447 @ Make sure our user space atomic helper is restarted
448 @ if it was interrupted in a critical region. Here we
449 @ perform a quick test inline since it should be false
450 @ 99.9999% of the time. The rest is done out of line.
452 blhs kuser_cmpxchg64_fixup
474 b ret_to_user_from_irq
487 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
488 @ faulting instruction depending on Thumb mode.
489 @ r3 = regs->ARM_cpsr
491 @ The emulation code returns using r9 if it has emulated the
492 @ instruction, or the more conventional lr if we are to treat
493 @ this as a real undefined instruction
495 badr r9, ret_from_exception
497 @ IRQs must be enabled before attempting to read the instruction from
498 @ user space since that could cause a page/translation fault if the
499 @ page table was modified by another CPU.
502 tst r3, #PSR_T_BIT @ Thumb mode?
504 sub r4, r2, #4 @ ARM instr at LR - 4
506 ARM_BE8(rev r0, r0) @ little endian instruction
510 @ r0 = 32-bit ARM instruction which caused the exception
511 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
512 @ r4 = PC value for the faulting instruction
513 @ lr = 32-bit undefined instruction function
514 badr lr, __und_usr_fault_32
519 sub r4, r2, #2 @ First half of thumb instr at LR - 2
520 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
522 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
523 * can never be supported in a single kernel, this code is not applicable at
524 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
525 * made about .arch directives.
527 #if __LINUX_ARM_ARCH__ < 7
528 /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
529 #define NEED_CPU_ARCHITECTURE
530 ldr r5, .LCcpu_architecture
532 cmp r5, #CPU_ARCH_ARMv7
533 blo __und_usr_fault_16 @ 16bit undefined instruction
535 * The following code won't get run unless the running CPU really is v7, so
536 * coding round the lack of ldrht on older arches is pointless. Temporarily
537 * override the assembler target arch with the minimum required instead:
542 ARM_BE8(rev16 r5, r5) @ little endian instruction
543 cmp r5, #0xe800 @ 32bit instruction if xx != 0
544 blo __und_usr_fault_16_pan @ 16bit undefined instruction
546 ARM_BE8(rev16 r0, r0) @ little endian instruction
548 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
549 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
550 orr r0, r0, r5, lsl #16
551 badr lr, __und_usr_fault_32
552 @ r0 = the two 16-bit Thumb instructions which caused the exception
553 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
554 @ r4 = PC value for the first 16-bit Thumb instruction
555 @ lr = 32bit undefined instruction function
557 #if __LINUX_ARM_ARCH__ < 7
558 /* If the target arch was overridden, change it back: */
559 #ifdef CONFIG_CPU_32v6K
564 #endif /* __LINUX_ARM_ARCH__ < 7 */
565 #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
572 * The out of line fixup for the ldrt instructions above.
574 .pushsection .text.fixup, "ax"
576 4: str r4, [sp, #S_PC] @ retry current instruction
579 .pushsection __ex_table,"a"
581 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
588 * Check whether the instruction is a co-processor instruction.
589 * If yes, we need to call the relevant co-processor handler.
591 * Note that we don't do a full check here for the co-processor
592 * instructions; all instructions with bit 27 set are well
593 * defined. The only instructions that should fault are the
594 * co-processor instructions. However, we have to watch out
595 * for the ARM6/ARM7 SWI bug.
597 * NEON is a special case that has to be handled here. Not all
598 * NEON instructions are co-processor instructions, so we have
599 * to make a special case of checking for them. Plus, there's
600 * five groups of them, so we have a table of mask/opcode pairs
601 * to check against, and if any match then we branch off into the
604 * Emulators may wish to make use of the following registers:
605 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
606 * r2 = PC value to resume execution after successful emulation
607 * r9 = normal "successful" return address
608 * r10 = this threads thread_info structure
609 * lr = unrecognised instruction return address
610 * IRQs enabled, FIQs enabled.
613 @ Fall-through from Thumb-2 __und_usr
616 get_thread_info r10 @ get current thread
617 adr r6, .LCneon_thumb_opcodes
621 get_thread_info r10 @ get current thread
623 adr r6, .LCneon_arm_opcodes
624 2: ldr r5, [r6], #4 @ mask value
625 ldr r7, [r6], #4 @ opcode bits matching in mask
626 cmp r5, #0 @ end mask?
629 cmp r8, r7 @ NEON instruction?
632 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
633 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
634 b do_vfp @ let VFP handler handle this
637 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
638 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
640 and r8, r0, #0x00000f00 @ mask out CP number
641 THUMB( lsr r8, r8, #8 )
643 add r6, r10, #TI_USED_CP
644 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
645 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
647 @ Test if we need to give access to iWMMXt coprocessors
648 ldr r5, [r10, #TI_FLAGS]
649 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
650 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
651 bcs iwmmxt_task_enable
653 ARM( add pc, pc, r8, lsr #6 )
654 THUMB( lsl r8, r8, #2 )
659 W(b) do_fpe @ CP#1 (FPE)
660 W(b) do_fpe @ CP#2 (FPE)
663 b crunch_task_enable @ CP#4 (MaverickCrunch)
664 b crunch_task_enable @ CP#5 (MaverickCrunch)
665 b crunch_task_enable @ CP#6 (MaverickCrunch)
675 W(b) do_vfp @ CP#10 (VFP)
676 W(b) do_vfp @ CP#11 (VFP)
678 ret.w lr @ CP#10 (VFP)
679 ret.w lr @ CP#11 (VFP)
683 ret.w lr @ CP#14 (Debug)
684 ret.w lr @ CP#15 (Control)
686 #ifdef NEED_CPU_ARCHITECTURE
689 .word __cpu_architecture
696 .word 0xfe000000 @ mask
697 .word 0xf2000000 @ opcode
699 .word 0xff100000 @ mask
700 .word 0xf4000000 @ opcode
702 .word 0x00000000 @ mask
703 .word 0x00000000 @ opcode
705 .LCneon_thumb_opcodes:
706 .word 0xef000000 @ mask
707 .word 0xef000000 @ opcode
709 .word 0xff100000 @ mask
710 .word 0xf9000000 @ opcode
712 .word 0x00000000 @ mask
713 .word 0x00000000 @ opcode
718 add r10, r10, #TI_FPSTATE @ r10 = workspace
719 ldr pc, [r4] @ Call FP module USR entry point
722 * The FP module is called with these registers set:
725 * r9 = normal "successful" return address
727 * lr = unrecognised FP instruction return address
742 __und_usr_fault_16_pan:
747 badr lr, ret_from_exception
749 ENDPROC(__und_usr_fault_32)
750 ENDPROC(__und_usr_fault_16)
760 * This is the return code to user mode for abort handlers
762 ENTRY(ret_from_exception)
770 ENDPROC(ret_from_exception)
776 mov r0, sp @ struct pt_regs *regs
779 restore_user_regs fast = 0, offset = 0
784 * Register switch for ARMv3 and ARMv4 processors
785 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
786 * previous and next are guaranteed not to be the same.
791 add ip, r1, #TI_CPU_SAVE
792 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
793 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
794 THUMB( str sp, [ip], #4 )
795 THUMB( str lr, [ip], #4 )
796 ldr r4, [r2, #TI_TP_VALUE]
797 ldr r5, [r2, #TI_TP_VALUE + 4]
798 #ifdef CONFIG_CPU_USE_DOMAINS
799 mrc p15, 0, r6, c3, c0, 0 @ Get domain register
800 str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
801 ldr r6, [r2, #TI_CPU_DOMAIN]
803 switch_tls r1, r4, r5, r3, r7
804 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
805 ldr r7, [r2, #TI_TASK]
806 ldr r8, =__stack_chk_guard
807 ldr r7, [r7, #TSK_STACK_CANARY]
809 #ifdef CONFIG_CPU_USE_DOMAINS
810 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
813 add r4, r2, #TI_CPU_SAVE
814 ldr r0, =thread_notify_head
815 mov r1, #THREAD_NOTIFY_SWITCH
816 bl atomic_notifier_call_chain
817 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
822 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
823 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
824 THUMB( ldr sp, [ip], #4 )
825 THUMB( ldr pc, [ip] )
834 * Each segment is 32-byte aligned and will be moved to the top of the high
835 * vector page. New segments (if ever needed) must be added in front of
836 * existing ones. This mechanism should be used only for things that are
837 * really small and justified, and not be abused freely.
839 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
844 #ifdef CONFIG_ARM_THUMB
851 .macro kuser_pad, sym, size
853 .rept 4 - (. - \sym) & 3
857 .rept (\size - (. - \sym)) / 4
862 #ifdef CONFIG_KUSER_HELPERS
864 .globl __kuser_helper_start
865 __kuser_helper_start:
868 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
869 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
872 __kuser_cmpxchg64: @ 0xffff0f60
874 #if defined(CONFIG_CPU_32v6K)
876 stmfd sp!, {r4, r5, r6, r7}
877 ldrd r4, r5, [r0] @ load old val
878 ldrd r6, r7, [r1] @ load new val
880 1: ldrexd r0, r1, [r2] @ load current val
881 eors r3, r0, r4 @ compare with oldval (1)
882 eoreqs r3, r1, r5 @ compare with oldval (2)
883 strexdeq r3, r6, r7, [r2] @ store newval if eq
884 teqeq r3, #1 @ success?
885 beq 1b @ if no then retry
887 rsbs r0, r3, #0 @ set returned val and C flag
888 ldmfd sp!, {r4, r5, r6, r7}
891 #elif !defined(CONFIG_SMP)
896 * The only thing that can break atomicity in this cmpxchg64
897 * implementation is either an IRQ or a data abort exception
898 * causing another process/thread to be scheduled in the middle of
899 * the critical sequence. The same strategy as for cmpxchg is used.
901 stmfd sp!, {r4, r5, r6, lr}
902 ldmia r0, {r4, r5} @ load old val
903 ldmia r1, {r6, lr} @ load new val
904 1: ldmia r2, {r0, r1} @ load current val
905 eors r3, r0, r4 @ compare with oldval (1)
906 eoreqs r3, r1, r5 @ compare with oldval (2)
907 2: stmeqia r2, {r6, lr} @ store newval if eq
908 rsbs r0, r3, #0 @ set return val and C flag
909 ldmfd sp!, {r4, r5, r6, pc}
912 kuser_cmpxchg64_fixup:
913 @ Called from kuser_cmpxchg_fixup.
914 @ r4 = address of interrupted insn (must be preserved).
915 @ sp = saved regs. r7 and r8 are clobbered.
916 @ 1b = first critical insn, 2b = last critical insn.
917 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
919 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
921 rsbcss r8, r8, #(2b - 1b)
922 strcs r7, [sp, #S_PC]
923 #if __LINUX_ARM_ARCH__ < 6
924 bcc kuser_cmpxchg32_fixup
930 #warning "NPTL on non MMU needs fixing"
937 #error "incoherent kernel configuration"
940 kuser_pad __kuser_cmpxchg64, 64
942 __kuser_memory_barrier: @ 0xffff0fa0
946 kuser_pad __kuser_memory_barrier, 32
948 __kuser_cmpxchg: @ 0xffff0fc0
950 #if __LINUX_ARM_ARCH__ < 6
955 * The only thing that can break atomicity in this cmpxchg
956 * implementation is either an IRQ or a data abort exception
957 * causing another process/thread to be scheduled in the middle
958 * of the critical sequence. To prevent this, code is added to
959 * the IRQ and data abort exception handlers to set the pc back
960 * to the beginning of the critical section if it is found to be
961 * within that critical section (see kuser_cmpxchg_fixup).
963 1: ldr r3, [r2] @ load current val
964 subs r3, r3, r0 @ compare with oldval
965 2: streq r1, [r2] @ store newval if eq
966 rsbs r0, r3, #0 @ set return val and C flag
970 kuser_cmpxchg32_fixup:
971 @ Called from kuser_cmpxchg_check macro.
972 @ r4 = address of interrupted insn (must be preserved).
973 @ sp = saved regs. r7 and r8 are clobbered.
974 @ 1b = first critical insn, 2b = last critical insn.
975 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
977 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
979 rsbcss r8, r8, #(2b - 1b)
980 strcs r7, [sp, #S_PC]
985 #warning "NPTL on non MMU needs fixing"
1000 /* beware -- each __kuser slot must be 8 instructions max */
1001 ALT_SMP(b __kuser_memory_barrier)
1006 kuser_pad __kuser_cmpxchg, 32
1008 __kuser_get_tls: @ 0xffff0fe0
1009 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
1011 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
1012 kuser_pad __kuser_get_tls, 16
1014 .word 0 @ 0xffff0ff0 software TLS value, then
1015 .endr @ pad up to __kuser_helper_version
1017 __kuser_helper_version: @ 0xffff0ffc
1018 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
1020 .globl __kuser_helper_end
1030 * This code is copied to 0xffff1000 so we can use branches in the
1031 * vectors, rather than ldr's. Note that this code must not exceed
1034 * Common stub entry macro:
1035 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1037 * SP points to a minimal amount of processor-private memory, the address
1038 * of which is copied into r0 for the mode specific abort handler.
1040 .macro vector_stub, name, mode, correction=0
1045 sub lr, lr, #\correction
1049 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1052 stmia sp, {r0, lr} @ save r0, lr
1054 str lr, [sp, #8] @ save spsr
1057 @ Prepare for SVC32 mode. IRQs remain disabled.
1060 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1064 @ the branch table must immediately follow this code
1068 THUMB( ldr lr, [r0, lr, lsl #2] )
1070 ARM( ldr lr, [pc, lr, lsl #2] )
1071 movs pc, lr @ branch to handler in SVC mode
1072 ENDPROC(vector_\name)
1075 @ handler addresses follow this label
1079 .section .stubs, "ax", %progbits
1081 @ This must be the first word
1085 ARM( swi SYS_ERROR0 )
1091 * Interrupt dispatcher
1093 vector_stub irq, IRQ_MODE, 4
1095 .long __irq_usr @ 0 (USR_26 / USR_32)
1096 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1097 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1098 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1099 .long __irq_invalid @ 4
1100 .long __irq_invalid @ 5
1101 .long __irq_invalid @ 6
1102 .long __irq_invalid @ 7
1103 .long __irq_invalid @ 8
1104 .long __irq_invalid @ 9
1105 .long __irq_invalid @ a
1106 .long __irq_invalid @ b
1107 .long __irq_invalid @ c
1108 .long __irq_invalid @ d
1109 .long __irq_invalid @ e
1110 .long __irq_invalid @ f
1113 * Data abort dispatcher
1114 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1116 vector_stub dabt, ABT_MODE, 8
1118 .long __dabt_usr @ 0 (USR_26 / USR_32)
1119 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1120 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1121 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1122 .long __dabt_invalid @ 4
1123 .long __dabt_invalid @ 5
1124 .long __dabt_invalid @ 6
1125 .long __dabt_invalid @ 7
1126 .long __dabt_invalid @ 8
1127 .long __dabt_invalid @ 9
1128 .long __dabt_invalid @ a
1129 .long __dabt_invalid @ b
1130 .long __dabt_invalid @ c
1131 .long __dabt_invalid @ d
1132 .long __dabt_invalid @ e
1133 .long __dabt_invalid @ f
1136 * Prefetch abort dispatcher
1137 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1139 vector_stub pabt, ABT_MODE, 4
1141 .long __pabt_usr @ 0 (USR_26 / USR_32)
1142 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1143 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1144 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1145 .long __pabt_invalid @ 4
1146 .long __pabt_invalid @ 5
1147 .long __pabt_invalid @ 6
1148 .long __pabt_invalid @ 7
1149 .long __pabt_invalid @ 8
1150 .long __pabt_invalid @ 9
1151 .long __pabt_invalid @ a
1152 .long __pabt_invalid @ b
1153 .long __pabt_invalid @ c
1154 .long __pabt_invalid @ d
1155 .long __pabt_invalid @ e
1156 .long __pabt_invalid @ f
1159 * Undef instr entry dispatcher
1160 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1162 vector_stub und, UND_MODE
1164 .long __und_usr @ 0 (USR_26 / USR_32)
1165 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1166 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1167 .long __und_svc @ 3 (SVC_26 / SVC_32)
1168 .long __und_invalid @ 4
1169 .long __und_invalid @ 5
1170 .long __und_invalid @ 6
1171 .long __und_invalid @ 7
1172 .long __und_invalid @ 8
1173 .long __und_invalid @ 9
1174 .long __und_invalid @ a
1175 .long __und_invalid @ b
1176 .long __und_invalid @ c
1177 .long __und_invalid @ d
1178 .long __und_invalid @ e
1179 .long __und_invalid @ f
1183 /*=============================================================================
1184 * Address exception handler
1185 *-----------------------------------------------------------------------------
1186 * These aren't too critical.
1187 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1193 /*=============================================================================
1195 *-----------------------------------------------------------------------------
1196 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1199 vector_stub fiq, FIQ_MODE, 4
1201 .long __fiq_usr @ 0 (USR_26 / USR_32)
1202 .long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
1203 .long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
1204 .long __fiq_svc @ 3 (SVC_26 / SVC_32)
1218 .globl vector_fiq_offset
1219 .equ vector_fiq_offset, vector_fiq
1221 .section .vectors, "ax", %progbits
1225 W(ldr) pc, __vectors_start + 0x1000
1228 W(b) vector_addrexcptn
1238 #ifdef CONFIG_MULTI_IRQ_HANDLER
1239 .globl handle_arch_irq