4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
25 #include "qemu/path.h"
26 #include "qemu/cutils.h"
27 #include "qemu/help_option.h"
30 #include "qemu/timer.h"
31 #include "qemu/envlist.h"
38 static const char *filename;
39 static const char *argv0;
40 static int gdbstub_port;
41 static envlist_t *envlist;
42 static const char *cpu_model;
43 unsigned long mmap_min_addr;
44 unsigned long guest_base;
47 #define EXCP_DUMP(env, fmt, ...) \
49 CPUState *cs = ENV_GET_CPU(env); \
50 fprintf(stderr, fmt , ## __VA_ARGS__); \
51 cpu_dump_state(cs, stderr, fprintf, 0); \
52 if (qemu_log_separate()) { \
53 qemu_log(fmt, ## __VA_ARGS__); \
54 log_cpu_state(cs, 0); \
58 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
60 * When running 32-on-64 we should make sure we can fit all of the possible
61 * guest address space into a contiguous chunk of virtual host memory.
63 * This way we will never overlap with our own libraries or binaries or stack
64 * or anything else that QEMU maps.
67 /* MIPS only supports 31 bits of virtual address space for user space */
68 unsigned long reserved_va = 0x77000000;
70 unsigned long reserved_va = 0xf7000000;
73 unsigned long reserved_va;
76 static void usage(int exitcode);
78 static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
79 const char *qemu_uname_release;
81 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
82 we allocate a bigger stack. Need a better solution, for example
83 by remapping the process stack directly at the right place */
84 unsigned long guest_stack_size = 8 * 1024 * 1024UL;
86 void gemu_log(const char *fmt, ...)
91 vfprintf(stderr, fmt, ap);
95 #if defined(TARGET_I386)
96 int cpu_get_pic_interrupt(CPUX86State *env)
102 /***********************************************************/
103 /* Helper routines for implementing atomic operations. */
105 /* To implement exclusive operations we force all cpus to syncronise.
106 We don't require a full sync, only that no cpus are executing guest code.
107 The alternative is to map target atomic ops onto host equivalents,
108 which requires quite a lot of per host/target work. */
109 static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
110 static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
111 static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
112 static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
113 static int pending_cpus;
115 /* Make sure everything is in a consistent state for calling fork(). */
116 void fork_start(void)
118 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
119 pthread_mutex_lock(&exclusive_lock);
123 void fork_end(int child)
125 mmap_fork_end(child);
127 CPUState *cpu, *next_cpu;
128 /* Child processes created by fork() only have a single thread.
129 Discard information about the parent threads. */
130 CPU_FOREACH_SAFE(cpu, next_cpu) {
131 if (cpu != thread_cpu) {
132 QTAILQ_REMOVE(&cpus, thread_cpu, node);
136 pthread_mutex_init(&exclusive_lock, NULL);
137 pthread_mutex_init(&cpu_list_mutex, NULL);
138 pthread_cond_init(&exclusive_cond, NULL);
139 pthread_cond_init(&exclusive_resume, NULL);
140 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
141 gdbserver_fork(thread_cpu);
143 pthread_mutex_unlock(&exclusive_lock);
144 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
148 /* Wait for pending exclusive operations to complete. The exclusive lock
150 static inline void exclusive_idle(void)
152 while (pending_cpus) {
153 pthread_cond_wait(&exclusive_resume, &exclusive_lock);
157 /* Start an exclusive operation.
158 Must only be called from outside cpu_arm_exec. */
159 static inline void start_exclusive(void)
163 pthread_mutex_lock(&exclusive_lock);
167 /* Make all other cpus stop executing. */
168 CPU_FOREACH(other_cpu) {
169 if (other_cpu->running) {
174 if (pending_cpus > 1) {
175 pthread_cond_wait(&exclusive_cond, &exclusive_lock);
179 /* Finish an exclusive operation. */
180 static inline void __attribute__((unused)) end_exclusive(void)
183 pthread_cond_broadcast(&exclusive_resume);
184 pthread_mutex_unlock(&exclusive_lock);
187 /* Wait for exclusive ops to finish, and begin cpu execution. */
188 static inline void cpu_exec_start(CPUState *cpu)
190 pthread_mutex_lock(&exclusive_lock);
193 pthread_mutex_unlock(&exclusive_lock);
196 /* Mark cpu as not executing, and release pending exclusive ops. */
197 static inline void cpu_exec_end(CPUState *cpu)
199 pthread_mutex_lock(&exclusive_lock);
200 cpu->running = false;
201 if (pending_cpus > 1) {
203 if (pending_cpus == 1) {
204 pthread_cond_signal(&exclusive_cond);
208 pthread_mutex_unlock(&exclusive_lock);
211 void cpu_list_lock(void)
213 pthread_mutex_lock(&cpu_list_mutex);
216 void cpu_list_unlock(void)
218 pthread_mutex_unlock(&cpu_list_mutex);
223 /***********************************************************/
224 /* CPUX86 core interface */
226 uint64_t cpu_get_tsc(CPUX86State *env)
228 return cpu_get_host_ticks();
231 static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
236 e1 = (addr << 16) | (limit & 0xffff);
237 e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000);
244 static uint64_t *idt_table;
246 static void set_gate64(void *ptr, unsigned int type, unsigned int dpl,
247 uint64_t addr, unsigned int sel)
250 e1 = (addr & 0xffff) | (sel << 16);
251 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
255 p[2] = tswap32(addr >> 32);
258 /* only dpl matters as we do only user space emulation */
259 static void set_idt(int n, unsigned int dpl)
261 set_gate64(idt_table + n * 2, 0, dpl, 0, 0);
264 static void set_gate(void *ptr, unsigned int type, unsigned int dpl,
265 uint32_t addr, unsigned int sel)
268 e1 = (addr & 0xffff) | (sel << 16);
269 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
275 /* only dpl matters as we do only user space emulation */
276 static void set_idt(int n, unsigned int dpl)
278 set_gate(idt_table + n, 0, dpl, 0, 0);
282 void cpu_loop(CPUX86State *env)
284 CPUState *cs = CPU(x86_env_get_cpu(env));
287 target_siginfo_t info;
291 trapnr = cpu_x86_exec(cs);
295 /* linux syscall from int $0x80 */
296 env->regs[R_EAX] = do_syscall(env,
308 /* linux syscall from syscall instruction */
309 env->regs[R_EAX] = do_syscall(env,
322 info.si_signo = TARGET_SIGBUS;
324 info.si_code = TARGET_SI_KERNEL;
325 info._sifields._sigfault._addr = 0;
326 queue_signal(env, info.si_signo, &info);
329 /* XXX: potential problem if ABI32 */
330 #ifndef TARGET_X86_64
331 if (env->eflags & VM_MASK) {
332 handle_vm86_fault(env);
336 info.si_signo = TARGET_SIGSEGV;
338 info.si_code = TARGET_SI_KERNEL;
339 info._sifields._sigfault._addr = 0;
340 queue_signal(env, info.si_signo, &info);
344 info.si_signo = TARGET_SIGSEGV;
346 if (!(env->error_code & 1))
347 info.si_code = TARGET_SEGV_MAPERR;
349 info.si_code = TARGET_SEGV_ACCERR;
350 info._sifields._sigfault._addr = env->cr[2];
351 queue_signal(env, info.si_signo, &info);
354 #ifndef TARGET_X86_64
355 if (env->eflags & VM_MASK) {
356 handle_vm86_trap(env, trapnr);
360 /* division by zero */
361 info.si_signo = TARGET_SIGFPE;
363 info.si_code = TARGET_FPE_INTDIV;
364 info._sifields._sigfault._addr = env->eip;
365 queue_signal(env, info.si_signo, &info);
370 #ifndef TARGET_X86_64
371 if (env->eflags & VM_MASK) {
372 handle_vm86_trap(env, trapnr);
376 info.si_signo = TARGET_SIGTRAP;
378 if (trapnr == EXCP01_DB) {
379 info.si_code = TARGET_TRAP_BRKPT;
380 info._sifields._sigfault._addr = env->eip;
382 info.si_code = TARGET_SI_KERNEL;
383 info._sifields._sigfault._addr = 0;
385 queue_signal(env, info.si_signo, &info);
390 #ifndef TARGET_X86_64
391 if (env->eflags & VM_MASK) {
392 handle_vm86_trap(env, trapnr);
396 info.si_signo = TARGET_SIGSEGV;
398 info.si_code = TARGET_SI_KERNEL;
399 info._sifields._sigfault._addr = 0;
400 queue_signal(env, info.si_signo, &info);
404 info.si_signo = TARGET_SIGILL;
406 info.si_code = TARGET_ILL_ILLOPN;
407 info._sifields._sigfault._addr = env->eip;
408 queue_signal(env, info.si_signo, &info);
411 /* just indicate that signals should be handled asap */
417 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
422 info.si_code = TARGET_TRAP_BRKPT;
423 queue_signal(env, info.si_signo, &info);
428 pc = env->segs[R_CS].base + env->eip;
429 EXCP_DUMP(env, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
433 process_pending_signals(env);
440 #define get_user_code_u32(x, gaddr, env) \
441 ({ abi_long __r = get_user_u32((x), (gaddr)); \
442 if (!__r && bswap_code(arm_sctlr_b(env))) { \
448 #define get_user_code_u16(x, gaddr, env) \
449 ({ abi_long __r = get_user_u16((x), (gaddr)); \
450 if (!__r && bswap_code(arm_sctlr_b(env))) { \
456 #define get_user_data_u32(x, gaddr, env) \
457 ({ abi_long __r = get_user_u32((x), (gaddr)); \
458 if (!__r && arm_cpu_bswap_data(env)) { \
464 #define get_user_data_u16(x, gaddr, env) \
465 ({ abi_long __r = get_user_u16((x), (gaddr)); \
466 if (!__r && arm_cpu_bswap_data(env)) { \
472 #define put_user_data_u32(x, gaddr, env) \
473 ({ typeof(x) __x = (x); \
474 if (arm_cpu_bswap_data(env)) { \
475 __x = bswap32(__x); \
477 put_user_u32(__x, (gaddr)); \
480 #define put_user_data_u16(x, gaddr, env) \
481 ({ typeof(x) __x = (x); \
482 if (arm_cpu_bswap_data(env)) { \
483 __x = bswap16(__x); \
485 put_user_u16(__x, (gaddr)); \
489 /* Commpage handling -- there is no commpage for AArch64 */
492 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
494 * r0 = pointer to oldval
495 * r1 = pointer to newval
496 * r2 = pointer to target value
499 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
500 * C set if *ptr was changed, clear if no exchange happened
502 * Note segv's in kernel helpers are a bit tricky, we can set the
503 * data address sensibly but the PC address is just the entry point.
505 static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
507 uint64_t oldval, newval, val;
509 target_siginfo_t info;
511 /* Based on the 32 bit code in do_kernel_trap */
513 /* XXX: This only works between threads, not between processes.
514 It's probably possible to implement this with native host
515 operations. However things like ldrex/strex are much harder so
516 there's not much point trying. */
518 cpsr = cpsr_read(env);
521 if (get_user_u64(oldval, env->regs[0])) {
522 env->exception.vaddress = env->regs[0];
526 if (get_user_u64(newval, env->regs[1])) {
527 env->exception.vaddress = env->regs[1];
531 if (get_user_u64(val, addr)) {
532 env->exception.vaddress = addr;
539 if (put_user_u64(val, addr)) {
540 env->exception.vaddress = addr;
550 cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
556 /* We get the PC of the entry address - which is as good as anything,
557 on a real kernel what you get depends on which mode it uses. */
558 info.si_signo = TARGET_SIGSEGV;
560 /* XXX: check env->error_code */
561 info.si_code = TARGET_SEGV_MAPERR;
562 info._sifields._sigfault._addr = env->exception.vaddress;
563 queue_signal(env, info.si_signo, &info);
566 /* Handle a jump to the kernel code page. */
568 do_kernel_trap(CPUARMState *env)
574 switch (env->regs[15]) {
575 case 0xffff0fa0: /* __kernel_memory_barrier */
576 /* ??? No-op. Will need to do better for SMP. */
578 case 0xffff0fc0: /* __kernel_cmpxchg */
579 /* XXX: This only works between threads, not between processes.
580 It's probably possible to implement this with native host
581 operations. However things like ldrex/strex are much harder so
582 there's not much point trying. */
584 cpsr = cpsr_read(env);
586 /* FIXME: This should SEGV if the access fails. */
587 if (get_user_u32(val, addr))
589 if (val == env->regs[0]) {
591 /* FIXME: Check for segfaults. */
592 put_user_u32(val, addr);
599 cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
602 case 0xffff0fe0: /* __kernel_get_tls */
603 env->regs[0] = cpu_get_tls(env);
605 case 0xffff0f60: /* __kernel_cmpxchg64 */
606 arm_kernel_cmpxchg64_helper(env);
612 /* Jump back to the caller. */
613 addr = env->regs[14];
618 env->regs[15] = addr;
623 /* Store exclusive handling for AArch32 */
624 static int do_strex(CPUARMState *env)
632 if (env->exclusive_addr != env->exclusive_test) {
635 /* We know we're always AArch32 so the address is in uint32_t range
636 * unless it was the -1 exclusive-monitor-lost value (which won't
637 * match exclusive_test above).
639 assert(extract64(env->exclusive_addr, 32, 32) == 0);
640 addr = env->exclusive_addr;
641 size = env->exclusive_info & 0xf;
644 segv = get_user_u8(val, addr);
647 segv = get_user_data_u16(val, addr, env);
651 segv = get_user_data_u32(val, addr, env);
657 env->exception.vaddress = addr;
662 segv = get_user_data_u32(valhi, addr + 4, env);
664 env->exception.vaddress = addr + 4;
667 if (arm_cpu_bswap_data(env)) {
668 val = deposit64((uint64_t)valhi, 32, 32, val);
670 val = deposit64(val, 32, 32, valhi);
673 if (val != env->exclusive_val) {
677 val = env->regs[(env->exclusive_info >> 8) & 0xf];
680 segv = put_user_u8(val, addr);
683 segv = put_user_data_u16(val, addr, env);
687 segv = put_user_data_u32(val, addr, env);
691 env->exception.vaddress = addr;
695 val = env->regs[(env->exclusive_info >> 12) & 0xf];
696 segv = put_user_data_u32(val, addr + 4, env);
698 env->exception.vaddress = addr + 4;
705 env->regs[(env->exclusive_info >> 4) & 0xf] = rc;
711 void cpu_loop(CPUARMState *env)
713 CPUState *cs = CPU(arm_env_get_cpu(env));
715 unsigned int n, insn;
716 target_siginfo_t info;
721 trapnr = cpu_arm_exec(cs);
726 TaskState *ts = cs->opaque;
730 /* we handle the FPU emulation here, as Linux */
731 /* we get the opcode */
732 /* FIXME - what to do if get_user() fails? */
733 get_user_code_u32(opcode, env->regs[15], env);
735 rc = EmulateAll(opcode, &ts->fpa, env);
736 if (rc == 0) { /* illegal instruction */
737 info.si_signo = TARGET_SIGILL;
739 info.si_code = TARGET_ILL_ILLOPN;
740 info._sifields._sigfault._addr = env->regs[15];
741 queue_signal(env, info.si_signo, &info);
742 } else if (rc < 0) { /* FP exception */
745 /* translate softfloat flags to FPSR flags */
746 if (-rc & float_flag_invalid)
748 if (-rc & float_flag_divbyzero)
750 if (-rc & float_flag_overflow)
752 if (-rc & float_flag_underflow)
754 if (-rc & float_flag_inexact)
757 FPSR fpsr = ts->fpa.fpsr;
758 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
760 if (fpsr & (arm_fpe << 16)) { /* exception enabled? */
761 info.si_signo = TARGET_SIGFPE;
764 /* ordered by priority, least first */
765 if (arm_fpe & BIT_IXC) info.si_code = TARGET_FPE_FLTRES;
766 if (arm_fpe & BIT_UFC) info.si_code = TARGET_FPE_FLTUND;
767 if (arm_fpe & BIT_OFC) info.si_code = TARGET_FPE_FLTOVF;
768 if (arm_fpe & BIT_DZC) info.si_code = TARGET_FPE_FLTDIV;
769 if (arm_fpe & BIT_IOC) info.si_code = TARGET_FPE_FLTINV;
771 info._sifields._sigfault._addr = env->regs[15];
772 queue_signal(env, info.si_signo, &info);
777 /* accumulate unenabled exceptions */
778 if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC))
780 if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC))
782 if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC))
784 if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC))
786 if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC))
789 } else { /* everything OK */
800 if (trapnr == EXCP_BKPT) {
802 /* FIXME - what to do if get_user() fails? */
803 get_user_code_u16(insn, env->regs[15], env);
807 /* FIXME - what to do if get_user() fails? */
808 get_user_code_u32(insn, env->regs[15], env);
809 n = (insn & 0xf) | ((insn >> 4) & 0xff0);
814 /* FIXME - what to do if get_user() fails? */
815 get_user_code_u16(insn, env->regs[15] - 2, env);
818 /* FIXME - what to do if get_user() fails? */
819 get_user_code_u32(insn, env->regs[15] - 4, env);
824 if (n == ARM_NR_cacheflush) {
826 } else if (n == ARM_NR_semihosting
827 || n == ARM_NR_thumb_semihosting) {
828 env->regs[0] = do_arm_semihosting (env);
829 } else if (n == 0 || n >= ARM_SYSCALL_BASE || env->thumb) {
831 if (env->thumb || n == 0) {
834 n -= ARM_SYSCALL_BASE;
837 if ( n > ARM_NR_BASE) {
839 case ARM_NR_cacheflush:
843 cpu_set_tls(env, env->regs[0]);
846 case ARM_NR_breakpoint:
847 env->regs[15] -= env->thumb ? 2 : 4;
850 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
852 env->regs[0] = -TARGET_ENOSYS;
856 env->regs[0] = do_syscall(env,
872 /* just indicate that signals should be handled asap */
875 if (!do_strex(env)) {
878 /* fall through for segv */
879 case EXCP_PREFETCH_ABORT:
880 case EXCP_DATA_ABORT:
881 addr = env->exception.vaddress;
883 info.si_signo = TARGET_SIGSEGV;
885 /* XXX: check env->error_code */
886 info.si_code = TARGET_SEGV_MAPERR;
887 info._sifields._sigfault._addr = addr;
888 queue_signal(env, info.si_signo, &info);
896 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
901 info.si_code = TARGET_TRAP_BRKPT;
902 queue_signal(env, info.si_signo, &info);
906 case EXCP_KERNEL_TRAP:
907 if (do_kernel_trap(env))
911 /* nothing to do here for user-mode, just resume guest code */
915 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
918 process_pending_signals(env);
925 * Handle AArch64 store-release exclusive
927 * rs = gets the status result of store exclusive
928 * rt = is the register that is stored
929 * rt2 = is the second register store (in STP)
932 static int do_strex_a64(CPUARMState *env)
943 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
944 size = extract32(env->exclusive_info, 0, 2);
945 is_pair = extract32(env->exclusive_info, 2, 1);
946 rs = extract32(env->exclusive_info, 4, 5);
947 rt = extract32(env->exclusive_info, 9, 5);
948 rt2 = extract32(env->exclusive_info, 14, 5);
950 addr = env->exclusive_addr;
952 if (addr != env->exclusive_test) {
958 segv = get_user_u8(val, addr);
961 segv = get_user_u16(val, addr);
964 segv = get_user_u32(val, addr);
967 segv = get_user_u64(val, addr);
973 env->exception.vaddress = addr;
976 if (val != env->exclusive_val) {
981 segv = get_user_u32(val, addr + 4);
983 segv = get_user_u64(val, addr + 8);
986 env->exception.vaddress = addr + (size == 2 ? 4 : 8);
989 if (val != env->exclusive_high) {
993 /* handle the zero register */
994 val = rt == 31 ? 0 : env->xregs[rt];
997 segv = put_user_u8(val, addr);
1000 segv = put_user_u16(val, addr);
1003 segv = put_user_u32(val, addr);
1006 segv = put_user_u64(val, addr);
1013 /* handle the zero register */
1014 val = rt2 == 31 ? 0 : env->xregs[rt2];
1016 segv = put_user_u32(val, addr + 4);
1018 segv = put_user_u64(val, addr + 8);
1021 env->exception.vaddress = addr + (size == 2 ? 4 : 8);
1028 /* rs == 31 encodes a write to the ZR, thus throwing away
1029 * the status return. This is rather silly but valid.
1032 env->xregs[rs] = rc;
1035 /* instruction faulted, PC does not advance */
1036 /* either way a strex releases any exclusive lock we have */
1037 env->exclusive_addr = -1;
1042 /* AArch64 main loop */
1043 void cpu_loop(CPUARMState *env)
1045 CPUState *cs = CPU(arm_env_get_cpu(env));
1047 target_siginfo_t info;
1051 trapnr = cpu_arm_exec(cs);
1056 env->xregs[0] = do_syscall(env,
1066 case EXCP_INTERRUPT:
1067 /* just indicate that signals should be handled asap */
1070 info.si_signo = TARGET_SIGILL;
1072 info.si_code = TARGET_ILL_ILLOPN;
1073 info._sifields._sigfault._addr = env->pc;
1074 queue_signal(env, info.si_signo, &info);
1077 if (!do_strex_a64(env)) {
1080 /* fall through for segv */
1081 case EXCP_PREFETCH_ABORT:
1082 case EXCP_DATA_ABORT:
1083 info.si_signo = TARGET_SIGSEGV;
1085 /* XXX: check env->error_code */
1086 info.si_code = TARGET_SEGV_MAPERR;
1087 info._sifields._sigfault._addr = env->exception.vaddress;
1088 queue_signal(env, info.si_signo, &info);
1092 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1094 info.si_signo = sig;
1096 info.si_code = TARGET_TRAP_BRKPT;
1097 queue_signal(env, info.si_signo, &info);
1101 env->xregs[0] = do_arm_semihosting(env);
1104 /* nothing to do here for user-mode, just resume guest code */
1107 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
1110 process_pending_signals(env);
1111 /* Exception return on AArch64 always clears the exclusive monitor,
1112 * so any return to running guest code implies this.
1113 * A strex (successful or otherwise) also clears the monitor, so
1114 * we don't need to specialcase EXCP_STREX.
1116 env->exclusive_addr = -1;
1119 #endif /* ndef TARGET_ABI32 */
1123 #ifdef TARGET_UNICORE32
1125 void cpu_loop(CPUUniCore32State *env)
1127 CPUState *cs = CPU(uc32_env_get_cpu(env));
1129 unsigned int n, insn;
1130 target_siginfo_t info;
1134 trapnr = uc32_cpu_exec(cs);
1137 case UC32_EXCP_PRIV:
1140 get_user_u32(insn, env->regs[31] - 4);
1141 n = insn & 0xffffff;
1143 if (n >= UC32_SYSCALL_BASE) {
1145 n -= UC32_SYSCALL_BASE;
1146 if (n == UC32_SYSCALL_NR_set_tls) {
1147 cpu_set_tls(env, env->regs[0]);
1150 env->regs[0] = do_syscall(env,
1165 case UC32_EXCP_DTRAP:
1166 case UC32_EXCP_ITRAP:
1167 info.si_signo = TARGET_SIGSEGV;
1169 /* XXX: check env->error_code */
1170 info.si_code = TARGET_SEGV_MAPERR;
1171 info._sifields._sigfault._addr = env->cp0.c4_faultaddr;
1172 queue_signal(env, info.si_signo, &info);
1174 case EXCP_INTERRUPT:
1175 /* just indicate that signals should be handled asap */
1181 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1183 info.si_signo = sig;
1185 info.si_code = TARGET_TRAP_BRKPT;
1186 queue_signal(env, info.si_signo, &info);
1193 process_pending_signals(env);
1197 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
1203 #define SPARC64_STACK_BIAS 2047
1207 /* WARNING: dealing with register windows _is_ complicated. More info
1208 can be found at http://www.sics.se/~psm/sparcstack.html */
1209 static inline int get_reg_index(CPUSPARCState *env, int cwp, int index)
1211 index = (index + cwp * 16) % (16 * env->nwindows);
1212 /* wrap handling : if cwp is on the last window, then we use the
1213 registers 'after' the end */
1214 if (index < 8 && env->cwp == env->nwindows - 1)
1215 index += 16 * env->nwindows;
1219 /* save the register window 'cwp1' */
1220 static inline void save_window_offset(CPUSPARCState *env, int cwp1)
1225 sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
1226 #ifdef TARGET_SPARC64
1228 sp_ptr += SPARC64_STACK_BIAS;
1230 #if defined(DEBUG_WIN)
1231 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n",
1234 for(i = 0; i < 16; i++) {
1235 /* FIXME - what to do if put_user() fails? */
1236 put_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
1237 sp_ptr += sizeof(abi_ulong);
1241 static void save_window(CPUSPARCState *env)
1243 #ifndef TARGET_SPARC64
1244 unsigned int new_wim;
1245 new_wim = ((env->wim >> 1) | (env->wim << (env->nwindows - 1))) &
1246 ((1LL << env->nwindows) - 1);
1247 save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
1250 save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
1256 static void restore_window(CPUSPARCState *env)
1258 #ifndef TARGET_SPARC64
1259 unsigned int new_wim;
1261 unsigned int i, cwp1;
1264 #ifndef TARGET_SPARC64
1265 new_wim = ((env->wim << 1) | (env->wim >> (env->nwindows - 1))) &
1266 ((1LL << env->nwindows) - 1);
1269 /* restore the invalid window */
1270 cwp1 = cpu_cwp_inc(env, env->cwp + 1);
1271 sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
1272 #ifdef TARGET_SPARC64
1274 sp_ptr += SPARC64_STACK_BIAS;
1276 #if defined(DEBUG_WIN)
1277 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n",
1280 for(i = 0; i < 16; i++) {
1281 /* FIXME - what to do if get_user() fails? */
1282 get_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
1283 sp_ptr += sizeof(abi_ulong);
1285 #ifdef TARGET_SPARC64
1287 if (env->cleanwin < env->nwindows - 1)
1295 static void flush_windows(CPUSPARCState *env)
1301 /* if restore would invoke restore_window(), then we can stop */
1302 cwp1 = cpu_cwp_inc(env, env->cwp + offset);
1303 #ifndef TARGET_SPARC64
1304 if (env->wim & (1 << cwp1))
1307 if (env->canrestore == 0)
1312 save_window_offset(env, cwp1);
1315 cwp1 = cpu_cwp_inc(env, env->cwp + 1);
1316 #ifndef TARGET_SPARC64
1317 /* set wim so that restore will reload the registers */
1318 env->wim = 1 << cwp1;
1320 #if defined(DEBUG_WIN)
1321 printf("flush_windows: nb=%d\n", offset - 1);
1325 void cpu_loop (CPUSPARCState *env)
1327 CPUState *cs = CPU(sparc_env_get_cpu(env));
1330 target_siginfo_t info;
1334 trapnr = cpu_sparc_exec(cs);
1337 /* Compute PSR before exposing state. */
1338 if (env->cc_op != CC_OP_FLAGS) {
1343 #ifndef TARGET_SPARC64
1350 ret = do_syscall (env, env->gregs[1],
1351 env->regwptr[0], env->regwptr[1],
1352 env->regwptr[2], env->regwptr[3],
1353 env->regwptr[4], env->regwptr[5],
1355 if ((abi_ulong)ret >= (abi_ulong)(-515)) {
1356 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1357 env->xcc |= PSR_CARRY;
1359 env->psr |= PSR_CARRY;
1363 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1364 env->xcc &= ~PSR_CARRY;
1366 env->psr &= ~PSR_CARRY;
1369 env->regwptr[0] = ret;
1370 /* next instruction */
1372 env->npc = env->npc + 4;
1374 case 0x83: /* flush windows */
1379 /* next instruction */
1381 env->npc = env->npc + 4;
1383 #ifndef TARGET_SPARC64
1384 case TT_WIN_OVF: /* window overflow */
1387 case TT_WIN_UNF: /* window underflow */
1388 restore_window(env);
1393 info.si_signo = TARGET_SIGSEGV;
1395 /* XXX: check env->error_code */
1396 info.si_code = TARGET_SEGV_MAPERR;
1397 info._sifields._sigfault._addr = env->mmuregs[4];
1398 queue_signal(env, info.si_signo, &info);
1402 case TT_SPILL: /* window overflow */
1405 case TT_FILL: /* window underflow */
1406 restore_window(env);
1411 info.si_signo = TARGET_SIGSEGV;
1413 /* XXX: check env->error_code */
1414 info.si_code = TARGET_SEGV_MAPERR;
1415 if (trapnr == TT_DFAULT)
1416 info._sifields._sigfault._addr = env->dmmuregs[4];
1418 info._sifields._sigfault._addr = cpu_tsptr(env)->tpc;
1419 queue_signal(env, info.si_signo, &info);
1422 #ifndef TARGET_ABI32
1425 sparc64_get_context(env);
1429 sparc64_set_context(env);
1433 case EXCP_INTERRUPT:
1434 /* just indicate that signals should be handled asap */
1438 info.si_signo = TARGET_SIGILL;
1440 info.si_code = TARGET_ILL_ILLOPC;
1441 info._sifields._sigfault._addr = env->pc;
1442 queue_signal(env, info.si_signo, &info);
1449 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1452 info.si_signo = sig;
1454 info.si_code = TARGET_TRAP_BRKPT;
1455 queue_signal(env, info.si_signo, &info);
1460 printf ("Unhandled trap: 0x%x\n", trapnr);
1461 cpu_dump_state(cs, stderr, fprintf, 0);
1464 process_pending_signals (env);
1471 static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env)
1473 return cpu_get_host_ticks();
1476 uint64_t cpu_ppc_load_tbl(CPUPPCState *env)
1478 return cpu_ppc_get_tb(env);
1481 uint32_t cpu_ppc_load_tbu(CPUPPCState *env)
1483 return cpu_ppc_get_tb(env) >> 32;
1486 uint64_t cpu_ppc_load_atbl(CPUPPCState *env)
1488 return cpu_ppc_get_tb(env);
1491 uint32_t cpu_ppc_load_atbu(CPUPPCState *env)
1493 return cpu_ppc_get_tb(env) >> 32;
1496 uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env)
1497 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1499 uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env)
1501 return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
1504 /* XXX: to be fixed */
1505 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1510 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1515 static int do_store_exclusive(CPUPPCState *env)
1518 target_ulong page_addr;
1519 target_ulong val, val2 __attribute__((unused)) = 0;
1523 addr = env->reserve_ea;
1524 page_addr = addr & TARGET_PAGE_MASK;
1527 flags = page_get_flags(page_addr);
1528 if ((flags & PAGE_READ) == 0) {
1531 int reg = env->reserve_info & 0x1f;
1532 int size = env->reserve_info >> 5;
1535 if (addr == env->reserve_addr) {
1537 case 1: segv = get_user_u8(val, addr); break;
1538 case 2: segv = get_user_u16(val, addr); break;
1539 case 4: segv = get_user_u32(val, addr); break;
1540 #if defined(TARGET_PPC64)
1541 case 8: segv = get_user_u64(val, addr); break;
1543 segv = get_user_u64(val, addr);
1545 segv = get_user_u64(val2, addr + 8);
1552 if (!segv && val == env->reserve_val) {
1553 val = env->gpr[reg];
1555 case 1: segv = put_user_u8(val, addr); break;
1556 case 2: segv = put_user_u16(val, addr); break;
1557 case 4: segv = put_user_u32(val, addr); break;
1558 #if defined(TARGET_PPC64)
1559 case 8: segv = put_user_u64(val, addr); break;
1561 if (val2 == env->reserve_val2) {
1564 val = env->gpr[reg+1];
1566 val2 = env->gpr[reg+1];
1568 segv = put_user_u64(val, addr);
1570 segv = put_user_u64(val2, addr + 8);
1583 env->crf[0] = (stored << 1) | xer_so;
1584 env->reserve_addr = (target_ulong)-1;
1594 void cpu_loop(CPUPPCState *env)
1596 CPUState *cs = CPU(ppc_env_get_cpu(env));
1597 target_siginfo_t info;
1603 trapnr = cpu_ppc_exec(cs);
1606 case POWERPC_EXCP_NONE:
1609 case POWERPC_EXCP_CRITICAL: /* Critical input */
1610 cpu_abort(cs, "Critical interrupt while in user mode. "
1613 case POWERPC_EXCP_MCHECK: /* Machine check exception */
1614 cpu_abort(cs, "Machine check exception while in user mode. "
1617 case POWERPC_EXCP_DSI: /* Data storage exception */
1618 EXCP_DUMP(env, "Invalid data memory access: 0x" TARGET_FMT_lx "\n",
1620 /* XXX: check this. Seems bugged */
1621 switch (env->error_code & 0xFF000000) {
1623 info.si_signo = TARGET_SIGSEGV;
1625 info.si_code = TARGET_SEGV_MAPERR;
1628 info.si_signo = TARGET_SIGILL;
1630 info.si_code = TARGET_ILL_ILLADR;
1633 info.si_signo = TARGET_SIGSEGV;
1635 info.si_code = TARGET_SEGV_ACCERR;
1638 /* Let's send a regular segfault... */
1639 EXCP_DUMP(env, "Invalid segfault errno (%02x)\n",
1641 info.si_signo = TARGET_SIGSEGV;
1643 info.si_code = TARGET_SEGV_MAPERR;
1646 info._sifields._sigfault._addr = env->nip;
1647 queue_signal(env, info.si_signo, &info);
1649 case POWERPC_EXCP_ISI: /* Instruction storage exception */
1650 EXCP_DUMP(env, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1651 "\n", env->spr[SPR_SRR0]);
1652 /* XXX: check this */
1653 switch (env->error_code & 0xFF000000) {
1655 info.si_signo = TARGET_SIGSEGV;
1657 info.si_code = TARGET_SEGV_MAPERR;
1661 info.si_signo = TARGET_SIGSEGV;
1663 info.si_code = TARGET_SEGV_ACCERR;
1666 /* Let's send a regular segfault... */
1667 EXCP_DUMP(env, "Invalid segfault errno (%02x)\n",
1669 info.si_signo = TARGET_SIGSEGV;
1671 info.si_code = TARGET_SEGV_MAPERR;
1674 info._sifields._sigfault._addr = env->nip - 4;
1675 queue_signal(env, info.si_signo, &info);
1677 case POWERPC_EXCP_EXTERNAL: /* External input */
1678 cpu_abort(cs, "External interrupt while in user mode. "
1681 case POWERPC_EXCP_ALIGN: /* Alignment exception */
1682 EXCP_DUMP(env, "Unaligned memory access\n");
1683 /* XXX: check this */
1684 info.si_signo = TARGET_SIGBUS;
1686 info.si_code = TARGET_BUS_ADRALN;
1687 info._sifields._sigfault._addr = env->nip;
1688 queue_signal(env, info.si_signo, &info);
1690 case POWERPC_EXCP_PROGRAM: /* Program exception */
1691 /* XXX: check this */
1692 switch (env->error_code & ~0xF) {
1693 case POWERPC_EXCP_FP:
1694 EXCP_DUMP(env, "Floating point program exception\n");
1695 info.si_signo = TARGET_SIGFPE;
1697 switch (env->error_code & 0xF) {
1698 case POWERPC_EXCP_FP_OX:
1699 info.si_code = TARGET_FPE_FLTOVF;
1701 case POWERPC_EXCP_FP_UX:
1702 info.si_code = TARGET_FPE_FLTUND;
1704 case POWERPC_EXCP_FP_ZX:
1705 case POWERPC_EXCP_FP_VXZDZ:
1706 info.si_code = TARGET_FPE_FLTDIV;
1708 case POWERPC_EXCP_FP_XX:
1709 info.si_code = TARGET_FPE_FLTRES;
1711 case POWERPC_EXCP_FP_VXSOFT:
1712 info.si_code = TARGET_FPE_FLTINV;
1714 case POWERPC_EXCP_FP_VXSNAN:
1715 case POWERPC_EXCP_FP_VXISI:
1716 case POWERPC_EXCP_FP_VXIDI:
1717 case POWERPC_EXCP_FP_VXIMZ:
1718 case POWERPC_EXCP_FP_VXVC:
1719 case POWERPC_EXCP_FP_VXSQRT:
1720 case POWERPC_EXCP_FP_VXCVI:
1721 info.si_code = TARGET_FPE_FLTSUB;
1724 EXCP_DUMP(env, "Unknown floating point exception (%02x)\n",
1729 case POWERPC_EXCP_INVAL:
1730 EXCP_DUMP(env, "Invalid instruction\n");
1731 info.si_signo = TARGET_SIGILL;
1733 switch (env->error_code & 0xF) {
1734 case POWERPC_EXCP_INVAL_INVAL:
1735 info.si_code = TARGET_ILL_ILLOPC;
1737 case POWERPC_EXCP_INVAL_LSWX:
1738 info.si_code = TARGET_ILL_ILLOPN;
1740 case POWERPC_EXCP_INVAL_SPR:
1741 info.si_code = TARGET_ILL_PRVREG;
1743 case POWERPC_EXCP_INVAL_FP:
1744 info.si_code = TARGET_ILL_COPROC;
1747 EXCP_DUMP(env, "Unknown invalid operation (%02x)\n",
1748 env->error_code & 0xF);
1749 info.si_code = TARGET_ILL_ILLADR;
1753 case POWERPC_EXCP_PRIV:
1754 EXCP_DUMP(env, "Privilege violation\n");
1755 info.si_signo = TARGET_SIGILL;
1757 switch (env->error_code & 0xF) {
1758 case POWERPC_EXCP_PRIV_OPC:
1759 info.si_code = TARGET_ILL_PRVOPC;
1761 case POWERPC_EXCP_PRIV_REG:
1762 info.si_code = TARGET_ILL_PRVREG;
1765 EXCP_DUMP(env, "Unknown privilege violation (%02x)\n",
1766 env->error_code & 0xF);
1767 info.si_code = TARGET_ILL_PRVOPC;
1771 case POWERPC_EXCP_TRAP:
1772 cpu_abort(cs, "Tried to call a TRAP\n");
1775 /* Should not happen ! */
1776 cpu_abort(cs, "Unknown program exception (%02x)\n",
1780 info._sifields._sigfault._addr = env->nip - 4;
1781 queue_signal(env, info.si_signo, &info);
1783 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
1784 EXCP_DUMP(env, "No floating point allowed\n");
1785 info.si_signo = TARGET_SIGILL;
1787 info.si_code = TARGET_ILL_COPROC;
1788 info._sifields._sigfault._addr = env->nip - 4;
1789 queue_signal(env, info.si_signo, &info);
1791 case POWERPC_EXCP_SYSCALL: /* System call exception */
1792 cpu_abort(cs, "Syscall exception while in user mode. "
1795 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
1796 EXCP_DUMP(env, "No APU instruction allowed\n");
1797 info.si_signo = TARGET_SIGILL;
1799 info.si_code = TARGET_ILL_COPROC;
1800 info._sifields._sigfault._addr = env->nip - 4;
1801 queue_signal(env, info.si_signo, &info);
1803 case POWERPC_EXCP_DECR: /* Decrementer exception */
1804 cpu_abort(cs, "Decrementer interrupt while in user mode. "
1807 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
1808 cpu_abort(cs, "Fix interval timer interrupt while in user mode. "
1811 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
1812 cpu_abort(cs, "Watchdog timer interrupt while in user mode. "
1815 case POWERPC_EXCP_DTLB: /* Data TLB error */
1816 cpu_abort(cs, "Data TLB exception while in user mode. "
1819 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
1820 cpu_abort(cs, "Instruction TLB exception while in user mode. "
1823 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */
1824 EXCP_DUMP(env, "No SPE/floating-point instruction allowed\n");
1825 info.si_signo = TARGET_SIGILL;
1827 info.si_code = TARGET_ILL_COPROC;
1828 info._sifields._sigfault._addr = env->nip - 4;
1829 queue_signal(env, info.si_signo, &info);
1831 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */
1832 cpu_abort(cs, "Embedded floating-point data IRQ not handled\n");
1834 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */
1835 cpu_abort(cs, "Embedded floating-point round IRQ not handled\n");
1837 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */
1838 cpu_abort(cs, "Performance monitor exception not handled\n");
1840 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
1841 cpu_abort(cs, "Doorbell interrupt while in user mode. "
1844 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
1845 cpu_abort(cs, "Doorbell critical interrupt while in user mode. "
1848 case POWERPC_EXCP_RESET: /* System reset exception */
1849 cpu_abort(cs, "Reset interrupt while in user mode. "
1852 case POWERPC_EXCP_DSEG: /* Data segment exception */
1853 cpu_abort(cs, "Data segment exception while in user mode. "
1856 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
1857 cpu_abort(cs, "Instruction segment exception "
1858 "while in user mode. Aborting\n");
1860 /* PowerPC 64 with hypervisor mode support */
1861 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
1862 cpu_abort(cs, "Hypervisor decrementer interrupt "
1863 "while in user mode. Aborting\n");
1865 case POWERPC_EXCP_TRACE: /* Trace exception */
1867 * we use this exception to emulate step-by-step execution mode.
1870 /* PowerPC 64 with hypervisor mode support */
1871 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
1872 cpu_abort(cs, "Hypervisor data storage exception "
1873 "while in user mode. Aborting\n");
1875 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */
1876 cpu_abort(cs, "Hypervisor instruction storage exception "
1877 "while in user mode. Aborting\n");
1879 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
1880 cpu_abort(cs, "Hypervisor data segment exception "
1881 "while in user mode. Aborting\n");
1883 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */
1884 cpu_abort(cs, "Hypervisor instruction segment exception "
1885 "while in user mode. Aborting\n");
1887 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
1888 EXCP_DUMP(env, "No Altivec instructions allowed\n");
1889 info.si_signo = TARGET_SIGILL;
1891 info.si_code = TARGET_ILL_COPROC;
1892 info._sifields._sigfault._addr = env->nip - 4;
1893 queue_signal(env, info.si_signo, &info);
1895 case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */
1896 cpu_abort(cs, "Programmable interval timer interrupt "
1897 "while in user mode. Aborting\n");
1899 case POWERPC_EXCP_IO: /* IO error exception */
1900 cpu_abort(cs, "IO error exception while in user mode. "
1903 case POWERPC_EXCP_RUNM: /* Run mode exception */
1904 cpu_abort(cs, "Run mode exception while in user mode. "
1907 case POWERPC_EXCP_EMUL: /* Emulation trap exception */
1908 cpu_abort(cs, "Emulation trap exception not handled\n");
1910 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
1911 cpu_abort(cs, "Instruction fetch TLB exception "
1912 "while in user-mode. Aborting");
1914 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
1915 cpu_abort(cs, "Data load TLB exception while in user-mode. "
1918 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
1919 cpu_abort(cs, "Data store TLB exception while in user-mode. "
1922 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
1923 cpu_abort(cs, "Floating-point assist exception not handled\n");
1925 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
1926 cpu_abort(cs, "Instruction address breakpoint exception "
1929 case POWERPC_EXCP_SMI: /* System management interrupt */
1930 cpu_abort(cs, "System management interrupt while in user mode. "
1933 case POWERPC_EXCP_THERM: /* Thermal interrupt */
1934 cpu_abort(cs, "Thermal interrupt interrupt while in user mode. "
1937 case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */
1938 cpu_abort(cs, "Performance monitor exception not handled\n");
1940 case POWERPC_EXCP_VPUA: /* Vector assist exception */
1941 cpu_abort(cs, "Vector assist exception not handled\n");
1943 case POWERPC_EXCP_SOFTP: /* Soft patch exception */
1944 cpu_abort(cs, "Soft patch exception not handled\n");
1946 case POWERPC_EXCP_MAINT: /* Maintenance exception */
1947 cpu_abort(cs, "Maintenance exception while in user mode. "
1950 case POWERPC_EXCP_STOP: /* stop translation */
1951 /* We did invalidate the instruction cache. Go on */
1953 case POWERPC_EXCP_BRANCH: /* branch instruction: */
1954 /* We just stopped because of a branch. Go on */
1956 case POWERPC_EXCP_SYSCALL_USER:
1957 /* system call in user-mode emulation */
1959 * PPC ABI uses overflow flag in cr0 to signal an error
1962 env->crf[0] &= ~0x1;
1963 ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4],
1964 env->gpr[5], env->gpr[6], env->gpr[7],
1966 if (ret == (target_ulong)(-TARGET_QEMU_ESIGRETURN)) {
1967 /* Returning from a successful sigreturn syscall.
1968 Avoid corrupting register state. */
1971 if (ret > (target_ulong)(-515)) {
1977 case POWERPC_EXCP_STCX:
1978 if (do_store_exclusive(env)) {
1979 info.si_signo = TARGET_SIGSEGV;
1981 info.si_code = TARGET_SEGV_MAPERR;
1982 info._sifields._sigfault._addr = env->nip;
1983 queue_signal(env, info.si_signo, &info);
1990 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1992 info.si_signo = sig;
1994 info.si_code = TARGET_TRAP_BRKPT;
1995 queue_signal(env, info.si_signo, &info);
1999 case EXCP_INTERRUPT:
2000 /* just indicate that signals should be handled asap */
2003 cpu_abort(cs, "Unknown exception 0x%d. Aborting\n", trapnr);
2006 process_pending_signals(env);
2013 # ifdef TARGET_ABI_MIPSO32
2014 # define MIPS_SYS(name, args) args,
2015 static const uint8_t mips_syscall_args[] = {
2016 MIPS_SYS(sys_syscall , 8) /* 4000 */
2017 MIPS_SYS(sys_exit , 1)
2018 MIPS_SYS(sys_fork , 0)
2019 MIPS_SYS(sys_read , 3)
2020 MIPS_SYS(sys_write , 3)
2021 MIPS_SYS(sys_open , 3) /* 4005 */
2022 MIPS_SYS(sys_close , 1)
2023 MIPS_SYS(sys_waitpid , 3)
2024 MIPS_SYS(sys_creat , 2)
2025 MIPS_SYS(sys_link , 2)
2026 MIPS_SYS(sys_unlink , 1) /* 4010 */
2027 MIPS_SYS(sys_execve , 0)
2028 MIPS_SYS(sys_chdir , 1)
2029 MIPS_SYS(sys_time , 1)
2030 MIPS_SYS(sys_mknod , 3)
2031 MIPS_SYS(sys_chmod , 2) /* 4015 */
2032 MIPS_SYS(sys_lchown , 3)
2033 MIPS_SYS(sys_ni_syscall , 0)
2034 MIPS_SYS(sys_ni_syscall , 0) /* was sys_stat */
2035 MIPS_SYS(sys_lseek , 3)
2036 MIPS_SYS(sys_getpid , 0) /* 4020 */
2037 MIPS_SYS(sys_mount , 5)
2038 MIPS_SYS(sys_umount , 1)
2039 MIPS_SYS(sys_setuid , 1)
2040 MIPS_SYS(sys_getuid , 0)
2041 MIPS_SYS(sys_stime , 1) /* 4025 */
2042 MIPS_SYS(sys_ptrace , 4)
2043 MIPS_SYS(sys_alarm , 1)
2044 MIPS_SYS(sys_ni_syscall , 0) /* was sys_fstat */
2045 MIPS_SYS(sys_pause , 0)
2046 MIPS_SYS(sys_utime , 2) /* 4030 */
2047 MIPS_SYS(sys_ni_syscall , 0)
2048 MIPS_SYS(sys_ni_syscall , 0)
2049 MIPS_SYS(sys_access , 2)
2050 MIPS_SYS(sys_nice , 1)
2051 MIPS_SYS(sys_ni_syscall , 0) /* 4035 */
2052 MIPS_SYS(sys_sync , 0)
2053 MIPS_SYS(sys_kill , 2)
2054 MIPS_SYS(sys_rename , 2)
2055 MIPS_SYS(sys_mkdir , 2)
2056 MIPS_SYS(sys_rmdir , 1) /* 4040 */
2057 MIPS_SYS(sys_dup , 1)
2058 MIPS_SYS(sys_pipe , 0)
2059 MIPS_SYS(sys_times , 1)
2060 MIPS_SYS(sys_ni_syscall , 0)
2061 MIPS_SYS(sys_brk , 1) /* 4045 */
2062 MIPS_SYS(sys_setgid , 1)
2063 MIPS_SYS(sys_getgid , 0)
2064 MIPS_SYS(sys_ni_syscall , 0) /* was signal(2) */
2065 MIPS_SYS(sys_geteuid , 0)
2066 MIPS_SYS(sys_getegid , 0) /* 4050 */
2067 MIPS_SYS(sys_acct , 0)
2068 MIPS_SYS(sys_umount2 , 2)
2069 MIPS_SYS(sys_ni_syscall , 0)
2070 MIPS_SYS(sys_ioctl , 3)
2071 MIPS_SYS(sys_fcntl , 3) /* 4055 */
2072 MIPS_SYS(sys_ni_syscall , 2)
2073 MIPS_SYS(sys_setpgid , 2)
2074 MIPS_SYS(sys_ni_syscall , 0)
2075 MIPS_SYS(sys_olduname , 1)
2076 MIPS_SYS(sys_umask , 1) /* 4060 */
2077 MIPS_SYS(sys_chroot , 1)
2078 MIPS_SYS(sys_ustat , 2)
2079 MIPS_SYS(sys_dup2 , 2)
2080 MIPS_SYS(sys_getppid , 0)
2081 MIPS_SYS(sys_getpgrp , 0) /* 4065 */
2082 MIPS_SYS(sys_setsid , 0)
2083 MIPS_SYS(sys_sigaction , 3)
2084 MIPS_SYS(sys_sgetmask , 0)
2085 MIPS_SYS(sys_ssetmask , 1)
2086 MIPS_SYS(sys_setreuid , 2) /* 4070 */
2087 MIPS_SYS(sys_setregid , 2)
2088 MIPS_SYS(sys_sigsuspend , 0)
2089 MIPS_SYS(sys_sigpending , 1)
2090 MIPS_SYS(sys_sethostname , 2)
2091 MIPS_SYS(sys_setrlimit , 2) /* 4075 */
2092 MIPS_SYS(sys_getrlimit , 2)
2093 MIPS_SYS(sys_getrusage , 2)
2094 MIPS_SYS(sys_gettimeofday, 2)
2095 MIPS_SYS(sys_settimeofday, 2)
2096 MIPS_SYS(sys_getgroups , 2) /* 4080 */
2097 MIPS_SYS(sys_setgroups , 2)
2098 MIPS_SYS(sys_ni_syscall , 0) /* old_select */
2099 MIPS_SYS(sys_symlink , 2)
2100 MIPS_SYS(sys_ni_syscall , 0) /* was sys_lstat */
2101 MIPS_SYS(sys_readlink , 3) /* 4085 */
2102 MIPS_SYS(sys_uselib , 1)
2103 MIPS_SYS(sys_swapon , 2)
2104 MIPS_SYS(sys_reboot , 3)
2105 MIPS_SYS(old_readdir , 3)
2106 MIPS_SYS(old_mmap , 6) /* 4090 */
2107 MIPS_SYS(sys_munmap , 2)
2108 MIPS_SYS(sys_truncate , 2)
2109 MIPS_SYS(sys_ftruncate , 2)
2110 MIPS_SYS(sys_fchmod , 2)
2111 MIPS_SYS(sys_fchown , 3) /* 4095 */
2112 MIPS_SYS(sys_getpriority , 2)
2113 MIPS_SYS(sys_setpriority , 3)
2114 MIPS_SYS(sys_ni_syscall , 0)
2115 MIPS_SYS(sys_statfs , 2)
2116 MIPS_SYS(sys_fstatfs , 2) /* 4100 */
2117 MIPS_SYS(sys_ni_syscall , 0) /* was ioperm(2) */
2118 MIPS_SYS(sys_socketcall , 2)
2119 MIPS_SYS(sys_syslog , 3)
2120 MIPS_SYS(sys_setitimer , 3)
2121 MIPS_SYS(sys_getitimer , 2) /* 4105 */
2122 MIPS_SYS(sys_newstat , 2)
2123 MIPS_SYS(sys_newlstat , 2)
2124 MIPS_SYS(sys_newfstat , 2)
2125 MIPS_SYS(sys_uname , 1)
2126 MIPS_SYS(sys_ni_syscall , 0) /* 4110 was iopl(2) */
2127 MIPS_SYS(sys_vhangup , 0)
2128 MIPS_SYS(sys_ni_syscall , 0) /* was sys_idle() */
2129 MIPS_SYS(sys_ni_syscall , 0) /* was sys_vm86 */
2130 MIPS_SYS(sys_wait4 , 4)
2131 MIPS_SYS(sys_swapoff , 1) /* 4115 */
2132 MIPS_SYS(sys_sysinfo , 1)
2133 MIPS_SYS(sys_ipc , 6)
2134 MIPS_SYS(sys_fsync , 1)
2135 MIPS_SYS(sys_sigreturn , 0)
2136 MIPS_SYS(sys_clone , 6) /* 4120 */
2137 MIPS_SYS(sys_setdomainname, 2)
2138 MIPS_SYS(sys_newuname , 1)
2139 MIPS_SYS(sys_ni_syscall , 0) /* sys_modify_ldt */
2140 MIPS_SYS(sys_adjtimex , 1)
2141 MIPS_SYS(sys_mprotect , 3) /* 4125 */
2142 MIPS_SYS(sys_sigprocmask , 3)
2143 MIPS_SYS(sys_ni_syscall , 0) /* was create_module */
2144 MIPS_SYS(sys_init_module , 5)
2145 MIPS_SYS(sys_delete_module, 1)
2146 MIPS_SYS(sys_ni_syscall , 0) /* 4130 was get_kernel_syms */
2147 MIPS_SYS(sys_quotactl , 0)
2148 MIPS_SYS(sys_getpgid , 1)
2149 MIPS_SYS(sys_fchdir , 1)
2150 MIPS_SYS(sys_bdflush , 2)
2151 MIPS_SYS(sys_sysfs , 3) /* 4135 */
2152 MIPS_SYS(sys_personality , 1)
2153 MIPS_SYS(sys_ni_syscall , 0) /* for afs_syscall */
2154 MIPS_SYS(sys_setfsuid , 1)
2155 MIPS_SYS(sys_setfsgid , 1)
2156 MIPS_SYS(sys_llseek , 5) /* 4140 */
2157 MIPS_SYS(sys_getdents , 3)
2158 MIPS_SYS(sys_select , 5)
2159 MIPS_SYS(sys_flock , 2)
2160 MIPS_SYS(sys_msync , 3)
2161 MIPS_SYS(sys_readv , 3) /* 4145 */
2162 MIPS_SYS(sys_writev , 3)
2163 MIPS_SYS(sys_cacheflush , 3)
2164 MIPS_SYS(sys_cachectl , 3)
2165 MIPS_SYS(sys_sysmips , 4)
2166 MIPS_SYS(sys_ni_syscall , 0) /* 4150 */
2167 MIPS_SYS(sys_getsid , 1)
2168 MIPS_SYS(sys_fdatasync , 0)
2169 MIPS_SYS(sys_sysctl , 1)
2170 MIPS_SYS(sys_mlock , 2)
2171 MIPS_SYS(sys_munlock , 2) /* 4155 */
2172 MIPS_SYS(sys_mlockall , 1)
2173 MIPS_SYS(sys_munlockall , 0)
2174 MIPS_SYS(sys_sched_setparam, 2)
2175 MIPS_SYS(sys_sched_getparam, 2)
2176 MIPS_SYS(sys_sched_setscheduler, 3) /* 4160 */
2177 MIPS_SYS(sys_sched_getscheduler, 1)
2178 MIPS_SYS(sys_sched_yield , 0)
2179 MIPS_SYS(sys_sched_get_priority_max, 1)
2180 MIPS_SYS(sys_sched_get_priority_min, 1)
2181 MIPS_SYS(sys_sched_rr_get_interval, 2) /* 4165 */
2182 MIPS_SYS(sys_nanosleep, 2)
2183 MIPS_SYS(sys_mremap , 5)
2184 MIPS_SYS(sys_accept , 3)
2185 MIPS_SYS(sys_bind , 3)
2186 MIPS_SYS(sys_connect , 3) /* 4170 */
2187 MIPS_SYS(sys_getpeername , 3)
2188 MIPS_SYS(sys_getsockname , 3)
2189 MIPS_SYS(sys_getsockopt , 5)
2190 MIPS_SYS(sys_listen , 2)
2191 MIPS_SYS(sys_recv , 4) /* 4175 */
2192 MIPS_SYS(sys_recvfrom , 6)
2193 MIPS_SYS(sys_recvmsg , 3)
2194 MIPS_SYS(sys_send , 4)
2195 MIPS_SYS(sys_sendmsg , 3)
2196 MIPS_SYS(sys_sendto , 6) /* 4180 */
2197 MIPS_SYS(sys_setsockopt , 5)
2198 MIPS_SYS(sys_shutdown , 2)
2199 MIPS_SYS(sys_socket , 3)
2200 MIPS_SYS(sys_socketpair , 4)
2201 MIPS_SYS(sys_setresuid , 3) /* 4185 */
2202 MIPS_SYS(sys_getresuid , 3)
2203 MIPS_SYS(sys_ni_syscall , 0) /* was sys_query_module */
2204 MIPS_SYS(sys_poll , 3)
2205 MIPS_SYS(sys_nfsservctl , 3)
2206 MIPS_SYS(sys_setresgid , 3) /* 4190 */
2207 MIPS_SYS(sys_getresgid , 3)
2208 MIPS_SYS(sys_prctl , 5)
2209 MIPS_SYS(sys_rt_sigreturn, 0)
2210 MIPS_SYS(sys_rt_sigaction, 4)
2211 MIPS_SYS(sys_rt_sigprocmask, 4) /* 4195 */
2212 MIPS_SYS(sys_rt_sigpending, 2)
2213 MIPS_SYS(sys_rt_sigtimedwait, 4)
2214 MIPS_SYS(sys_rt_sigqueueinfo, 3)
2215 MIPS_SYS(sys_rt_sigsuspend, 0)
2216 MIPS_SYS(sys_pread64 , 6) /* 4200 */
2217 MIPS_SYS(sys_pwrite64 , 6)
2218 MIPS_SYS(sys_chown , 3)
2219 MIPS_SYS(sys_getcwd , 2)
2220 MIPS_SYS(sys_capget , 2)
2221 MIPS_SYS(sys_capset , 2) /* 4205 */
2222 MIPS_SYS(sys_sigaltstack , 2)
2223 MIPS_SYS(sys_sendfile , 4)
2224 MIPS_SYS(sys_ni_syscall , 0)
2225 MIPS_SYS(sys_ni_syscall , 0)
2226 MIPS_SYS(sys_mmap2 , 6) /* 4210 */
2227 MIPS_SYS(sys_truncate64 , 4)
2228 MIPS_SYS(sys_ftruncate64 , 4)
2229 MIPS_SYS(sys_stat64 , 2)
2230 MIPS_SYS(sys_lstat64 , 2)
2231 MIPS_SYS(sys_fstat64 , 2) /* 4215 */
2232 MIPS_SYS(sys_pivot_root , 2)
2233 MIPS_SYS(sys_mincore , 3)
2234 MIPS_SYS(sys_madvise , 3)
2235 MIPS_SYS(sys_getdents64 , 3)
2236 MIPS_SYS(sys_fcntl64 , 3) /* 4220 */
2237 MIPS_SYS(sys_ni_syscall , 0)
2238 MIPS_SYS(sys_gettid , 0)
2239 MIPS_SYS(sys_readahead , 5)
2240 MIPS_SYS(sys_setxattr , 5)
2241 MIPS_SYS(sys_lsetxattr , 5) /* 4225 */
2242 MIPS_SYS(sys_fsetxattr , 5)
2243 MIPS_SYS(sys_getxattr , 4)
2244 MIPS_SYS(sys_lgetxattr , 4)
2245 MIPS_SYS(sys_fgetxattr , 4)
2246 MIPS_SYS(sys_listxattr , 3) /* 4230 */
2247 MIPS_SYS(sys_llistxattr , 3)
2248 MIPS_SYS(sys_flistxattr , 3)
2249 MIPS_SYS(sys_removexattr , 2)
2250 MIPS_SYS(sys_lremovexattr, 2)
2251 MIPS_SYS(sys_fremovexattr, 2) /* 4235 */
2252 MIPS_SYS(sys_tkill , 2)
2253 MIPS_SYS(sys_sendfile64 , 5)
2254 MIPS_SYS(sys_futex , 6)
2255 MIPS_SYS(sys_sched_setaffinity, 3)
2256 MIPS_SYS(sys_sched_getaffinity, 3) /* 4240 */
2257 MIPS_SYS(sys_io_setup , 2)
2258 MIPS_SYS(sys_io_destroy , 1)
2259 MIPS_SYS(sys_io_getevents, 5)
2260 MIPS_SYS(sys_io_submit , 3)
2261 MIPS_SYS(sys_io_cancel , 3) /* 4245 */
2262 MIPS_SYS(sys_exit_group , 1)
2263 MIPS_SYS(sys_lookup_dcookie, 3)
2264 MIPS_SYS(sys_epoll_create, 1)
2265 MIPS_SYS(sys_epoll_ctl , 4)
2266 MIPS_SYS(sys_epoll_wait , 3) /* 4250 */
2267 MIPS_SYS(sys_remap_file_pages, 5)
2268 MIPS_SYS(sys_set_tid_address, 1)
2269 MIPS_SYS(sys_restart_syscall, 0)
2270 MIPS_SYS(sys_fadvise64_64, 7)
2271 MIPS_SYS(sys_statfs64 , 3) /* 4255 */
2272 MIPS_SYS(sys_fstatfs64 , 2)
2273 MIPS_SYS(sys_timer_create, 3)
2274 MIPS_SYS(sys_timer_settime, 4)
2275 MIPS_SYS(sys_timer_gettime, 2)
2276 MIPS_SYS(sys_timer_getoverrun, 1) /* 4260 */
2277 MIPS_SYS(sys_timer_delete, 1)
2278 MIPS_SYS(sys_clock_settime, 2)
2279 MIPS_SYS(sys_clock_gettime, 2)
2280 MIPS_SYS(sys_clock_getres, 2)
2281 MIPS_SYS(sys_clock_nanosleep, 4) /* 4265 */
2282 MIPS_SYS(sys_tgkill , 3)
2283 MIPS_SYS(sys_utimes , 2)
2284 MIPS_SYS(sys_mbind , 4)
2285 MIPS_SYS(sys_ni_syscall , 0) /* sys_get_mempolicy */
2286 MIPS_SYS(sys_ni_syscall , 0) /* 4270 sys_set_mempolicy */
2287 MIPS_SYS(sys_mq_open , 4)
2288 MIPS_SYS(sys_mq_unlink , 1)
2289 MIPS_SYS(sys_mq_timedsend, 5)
2290 MIPS_SYS(sys_mq_timedreceive, 5)
2291 MIPS_SYS(sys_mq_notify , 2) /* 4275 */
2292 MIPS_SYS(sys_mq_getsetattr, 3)
2293 MIPS_SYS(sys_ni_syscall , 0) /* sys_vserver */
2294 MIPS_SYS(sys_waitid , 4)
2295 MIPS_SYS(sys_ni_syscall , 0) /* available, was setaltroot */
2296 MIPS_SYS(sys_add_key , 5)
2297 MIPS_SYS(sys_request_key, 4)
2298 MIPS_SYS(sys_keyctl , 5)
2299 MIPS_SYS(sys_set_thread_area, 1)
2300 MIPS_SYS(sys_inotify_init, 0)
2301 MIPS_SYS(sys_inotify_add_watch, 3) /* 4285 */
2302 MIPS_SYS(sys_inotify_rm_watch, 2)
2303 MIPS_SYS(sys_migrate_pages, 4)
2304 MIPS_SYS(sys_openat, 4)
2305 MIPS_SYS(sys_mkdirat, 3)
2306 MIPS_SYS(sys_mknodat, 4) /* 4290 */
2307 MIPS_SYS(sys_fchownat, 5)
2308 MIPS_SYS(sys_futimesat, 3)
2309 MIPS_SYS(sys_fstatat64, 4)
2310 MIPS_SYS(sys_unlinkat, 3)
2311 MIPS_SYS(sys_renameat, 4) /* 4295 */
2312 MIPS_SYS(sys_linkat, 5)
2313 MIPS_SYS(sys_symlinkat, 3)
2314 MIPS_SYS(sys_readlinkat, 4)
2315 MIPS_SYS(sys_fchmodat, 3)
2316 MIPS_SYS(sys_faccessat, 3) /* 4300 */
2317 MIPS_SYS(sys_pselect6, 6)
2318 MIPS_SYS(sys_ppoll, 5)
2319 MIPS_SYS(sys_unshare, 1)
2320 MIPS_SYS(sys_splice, 6)
2321 MIPS_SYS(sys_sync_file_range, 7) /* 4305 */
2322 MIPS_SYS(sys_tee, 4)
2323 MIPS_SYS(sys_vmsplice, 4)
2324 MIPS_SYS(sys_move_pages, 6)
2325 MIPS_SYS(sys_set_robust_list, 2)
2326 MIPS_SYS(sys_get_robust_list, 3) /* 4310 */
2327 MIPS_SYS(sys_kexec_load, 4)
2328 MIPS_SYS(sys_getcpu, 3)
2329 MIPS_SYS(sys_epoll_pwait, 6)
2330 MIPS_SYS(sys_ioprio_set, 3)
2331 MIPS_SYS(sys_ioprio_get, 2)
2332 MIPS_SYS(sys_utimensat, 4)
2333 MIPS_SYS(sys_signalfd, 3)
2334 MIPS_SYS(sys_ni_syscall, 0) /* was timerfd */
2335 MIPS_SYS(sys_eventfd, 1)
2336 MIPS_SYS(sys_fallocate, 6) /* 4320 */
2337 MIPS_SYS(sys_timerfd_create, 2)
2338 MIPS_SYS(sys_timerfd_gettime, 2)
2339 MIPS_SYS(sys_timerfd_settime, 4)
2340 MIPS_SYS(sys_signalfd4, 4)
2341 MIPS_SYS(sys_eventfd2, 2) /* 4325 */
2342 MIPS_SYS(sys_epoll_create1, 1)
2343 MIPS_SYS(sys_dup3, 3)
2344 MIPS_SYS(sys_pipe2, 2)
2345 MIPS_SYS(sys_inotify_init1, 1)
2346 MIPS_SYS(sys_preadv, 6) /* 4330 */
2347 MIPS_SYS(sys_pwritev, 6)
2348 MIPS_SYS(sys_rt_tgsigqueueinfo, 4)
2349 MIPS_SYS(sys_perf_event_open, 5)
2350 MIPS_SYS(sys_accept4, 4)
2351 MIPS_SYS(sys_recvmmsg, 5) /* 4335 */
2352 MIPS_SYS(sys_fanotify_init, 2)
2353 MIPS_SYS(sys_fanotify_mark, 6)
2354 MIPS_SYS(sys_prlimit64, 4)
2355 MIPS_SYS(sys_name_to_handle_at, 5)
2356 MIPS_SYS(sys_open_by_handle_at, 3) /* 4340 */
2357 MIPS_SYS(sys_clock_adjtime, 2)
2358 MIPS_SYS(sys_syncfs, 1)
2363 static int do_store_exclusive(CPUMIPSState *env)
2366 target_ulong page_addr;
2374 page_addr = addr & TARGET_PAGE_MASK;
2377 flags = page_get_flags(page_addr);
2378 if ((flags & PAGE_READ) == 0) {
2381 reg = env->llreg & 0x1f;
2382 d = (env->llreg & 0x20) != 0;
2384 segv = get_user_s64(val, addr);
2386 segv = get_user_s32(val, addr);
2389 if (val != env->llval) {
2390 env->active_tc.gpr[reg] = 0;
2393 segv = put_user_u64(env->llnewval, addr);
2395 segv = put_user_u32(env->llnewval, addr);
2398 env->active_tc.gpr[reg] = 1;
2405 env->active_tc.PC += 4;
2418 static int do_break(CPUMIPSState *env, target_siginfo_t *info,
2426 info->si_signo = TARGET_SIGFPE;
2428 info->si_code = (code == BRK_OVERFLOW) ? FPE_INTOVF : FPE_INTDIV;
2429 queue_signal(env, info->si_signo, &*info);
2433 info->si_signo = TARGET_SIGTRAP;
2435 queue_signal(env, info->si_signo, &*info);
2443 void cpu_loop(CPUMIPSState *env)
2445 CPUState *cs = CPU(mips_env_get_cpu(env));
2446 target_siginfo_t info;
2449 # ifdef TARGET_ABI_MIPSO32
2450 unsigned int syscall_num;
2455 trapnr = cpu_mips_exec(cs);
2459 env->active_tc.PC += 4;
2460 # ifdef TARGET_ABI_MIPSO32
2461 syscall_num = env->active_tc.gpr[2] - 4000;
2462 if (syscall_num >= sizeof(mips_syscall_args)) {
2463 ret = -TARGET_ENOSYS;
2467 abi_ulong arg5 = 0, arg6 = 0, arg7 = 0, arg8 = 0;
2469 nb_args = mips_syscall_args[syscall_num];
2470 sp_reg = env->active_tc.gpr[29];
2472 /* these arguments are taken from the stack */
2474 if ((ret = get_user_ual(arg8, sp_reg + 28)) != 0) {
2478 if ((ret = get_user_ual(arg7, sp_reg + 24)) != 0) {
2482 if ((ret = get_user_ual(arg6, sp_reg + 20)) != 0) {
2486 if ((ret = get_user_ual(arg5, sp_reg + 16)) != 0) {
2492 ret = do_syscall(env, env->active_tc.gpr[2],
2493 env->active_tc.gpr[4],
2494 env->active_tc.gpr[5],
2495 env->active_tc.gpr[6],
2496 env->active_tc.gpr[7],
2497 arg5, arg6, arg7, arg8);
2501 ret = do_syscall(env, env->active_tc.gpr[2],
2502 env->active_tc.gpr[4], env->active_tc.gpr[5],
2503 env->active_tc.gpr[6], env->active_tc.gpr[7],
2504 env->active_tc.gpr[8], env->active_tc.gpr[9],
2505 env->active_tc.gpr[10], env->active_tc.gpr[11]);
2507 if (ret == -TARGET_QEMU_ESIGRETURN) {
2508 /* Returning from a successful sigreturn syscall.
2509 Avoid clobbering register state. */
2512 if ((abi_ulong)ret >= (abi_ulong)-1133) {
2513 env->active_tc.gpr[7] = 1; /* error flag */
2516 env->active_tc.gpr[7] = 0; /* error flag */
2518 env->active_tc.gpr[2] = ret;
2524 info.si_signo = TARGET_SIGSEGV;
2526 /* XXX: check env->error_code */
2527 info.si_code = TARGET_SEGV_MAPERR;
2528 info._sifields._sigfault._addr = env->CP0_BadVAddr;
2529 queue_signal(env, info.si_signo, &info);
2533 info.si_signo = TARGET_SIGILL;
2536 queue_signal(env, info.si_signo, &info);
2538 case EXCP_INTERRUPT:
2539 /* just indicate that signals should be handled asap */
2545 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2548 info.si_signo = sig;
2550 info.si_code = TARGET_TRAP_BRKPT;
2551 queue_signal(env, info.si_signo, &info);
2556 if (do_store_exclusive(env)) {
2557 info.si_signo = TARGET_SIGSEGV;
2559 info.si_code = TARGET_SEGV_MAPERR;
2560 info._sifields._sigfault._addr = env->active_tc.PC;
2561 queue_signal(env, info.si_signo, &info);
2565 info.si_signo = TARGET_SIGILL;
2567 info.si_code = TARGET_ILL_ILLOPC;
2568 queue_signal(env, info.si_signo, &info);
2570 /* The code below was inspired by the MIPS Linux kernel trap
2571 * handling code in arch/mips/kernel/traps.c.
2575 abi_ulong trap_instr;
2578 if (env->hflags & MIPS_HFLAG_M16) {
2579 if (env->insn_flags & ASE_MICROMIPS) {
2580 /* microMIPS mode */
2581 ret = get_user_u16(trap_instr, env->active_tc.PC);
2586 if ((trap_instr >> 10) == 0x11) {
2587 /* 16-bit instruction */
2588 code = trap_instr & 0xf;
2590 /* 32-bit instruction */
2593 ret = get_user_u16(instr_lo,
2594 env->active_tc.PC + 2);
2598 trap_instr = (trap_instr << 16) | instr_lo;
2599 code = ((trap_instr >> 6) & ((1 << 20) - 1));
2600 /* Unfortunately, microMIPS also suffers from
2601 the old assembler bug... */
2602 if (code >= (1 << 10)) {
2608 ret = get_user_u16(trap_instr, env->active_tc.PC);
2612 code = (trap_instr >> 6) & 0x3f;
2615 ret = get_user_u32(trap_instr, env->active_tc.PC);
2620 /* As described in the original Linux kernel code, the
2621 * below checks on 'code' are to work around an old
2624 code = ((trap_instr >> 6) & ((1 << 20) - 1));
2625 if (code >= (1 << 10)) {
2630 if (do_break(env, &info, code) != 0) {
2637 abi_ulong trap_instr;
2638 unsigned int code = 0;
2640 if (env->hflags & MIPS_HFLAG_M16) {
2641 /* microMIPS mode */
2644 ret = get_user_u16(instr[0], env->active_tc.PC) ||
2645 get_user_u16(instr[1], env->active_tc.PC + 2);
2647 trap_instr = (instr[0] << 16) | instr[1];
2649 ret = get_user_u32(trap_instr, env->active_tc.PC);
2656 /* The immediate versions don't provide a code. */
2657 if (!(trap_instr & 0xFC000000)) {
2658 if (env->hflags & MIPS_HFLAG_M16) {
2659 /* microMIPS mode */
2660 code = ((trap_instr >> 12) & ((1 << 4) - 1));
2662 code = ((trap_instr >> 6) & ((1 << 10) - 1));
2666 if (do_break(env, &info, code) != 0) {
2673 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
2676 process_pending_signals(env);
2681 #ifdef TARGET_OPENRISC
2683 void cpu_loop(CPUOpenRISCState *env)
2685 CPUState *cs = CPU(openrisc_env_get_cpu(env));
2690 trapnr = cpu_openrisc_exec(cs);
2696 qemu_log_mask(CPU_LOG_INT, "\nReset request, exit, pc is %#x\n", env->pc);
2700 qemu_log_mask(CPU_LOG_INT, "\nBus error, exit, pc is %#x\n", env->pc);
2701 gdbsig = TARGET_SIGBUS;
2705 cpu_dump_state(cs, stderr, fprintf, 0);
2706 gdbsig = TARGET_SIGSEGV;
2709 qemu_log_mask(CPU_LOG_INT, "\nTick time interrupt pc is %#x\n", env->pc);
2712 qemu_log_mask(CPU_LOG_INT, "\nAlignment pc is %#x\n", env->pc);
2713 gdbsig = TARGET_SIGBUS;
2716 qemu_log_mask(CPU_LOG_INT, "\nIllegal instructionpc is %#x\n", env->pc);
2717 gdbsig = TARGET_SIGILL;
2720 qemu_log_mask(CPU_LOG_INT, "\nExternal interruptpc is %#x\n", env->pc);
2724 qemu_log_mask(CPU_LOG_INT, "\nTLB miss\n");
2727 qemu_log_mask(CPU_LOG_INT, "\nRange\n");
2728 gdbsig = TARGET_SIGSEGV;
2731 env->pc += 4; /* 0xc00; */
2732 env->gpr[11] = do_syscall(env,
2733 env->gpr[11], /* return value */
2734 env->gpr[3], /* r3 - r7 are params */
2742 qemu_log_mask(CPU_LOG_INT, "\nFloating point error\n");
2745 qemu_log_mask(CPU_LOG_INT, "\nTrap\n");
2746 gdbsig = TARGET_SIGTRAP;
2749 qemu_log_mask(CPU_LOG_INT, "\nNR\n");
2752 EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n",
2754 gdbsig = TARGET_SIGILL;
2758 gdb_handlesig(cs, gdbsig);
2759 if (gdbsig != TARGET_SIGTRAP) {
2764 process_pending_signals(env);
2768 #endif /* TARGET_OPENRISC */
2771 void cpu_loop(CPUSH4State *env)
2773 CPUState *cs = CPU(sh_env_get_cpu(env));
2775 target_siginfo_t info;
2779 trapnr = cpu_sh4_exec(cs);
2785 ret = do_syscall(env,
2794 env->gregs[0] = ret;
2796 case EXCP_INTERRUPT:
2797 /* just indicate that signals should be handled asap */
2803 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2806 info.si_signo = sig;
2808 info.si_code = TARGET_TRAP_BRKPT;
2809 queue_signal(env, info.si_signo, &info);
2815 info.si_signo = TARGET_SIGSEGV;
2817 info.si_code = TARGET_SEGV_MAPERR;
2818 info._sifields._sigfault._addr = env->tea;
2819 queue_signal(env, info.si_signo, &info);
2823 printf ("Unhandled trap: 0x%x\n", trapnr);
2824 cpu_dump_state(cs, stderr, fprintf, 0);
2827 process_pending_signals (env);
2833 void cpu_loop(CPUCRISState *env)
2835 CPUState *cs = CPU(cris_env_get_cpu(env));
2837 target_siginfo_t info;
2841 trapnr = cpu_cris_exec(cs);
2846 info.si_signo = TARGET_SIGSEGV;
2848 /* XXX: check env->error_code */
2849 info.si_code = TARGET_SEGV_MAPERR;
2850 info._sifields._sigfault._addr = env->pregs[PR_EDA];
2851 queue_signal(env, info.si_signo, &info);
2854 case EXCP_INTERRUPT:
2855 /* just indicate that signals should be handled asap */
2858 ret = do_syscall(env,
2867 env->regs[10] = ret;
2873 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2876 info.si_signo = sig;
2878 info.si_code = TARGET_TRAP_BRKPT;
2879 queue_signal(env, info.si_signo, &info);
2884 printf ("Unhandled trap: 0x%x\n", trapnr);
2885 cpu_dump_state(cs, stderr, fprintf, 0);
2888 process_pending_signals (env);
2893 #ifdef TARGET_MICROBLAZE
2894 void cpu_loop(CPUMBState *env)
2896 CPUState *cs = CPU(mb_env_get_cpu(env));
2898 target_siginfo_t info;
2902 trapnr = cpu_mb_exec(cs);
2907 info.si_signo = TARGET_SIGSEGV;
2909 /* XXX: check env->error_code */
2910 info.si_code = TARGET_SEGV_MAPERR;
2911 info._sifields._sigfault._addr = 0;
2912 queue_signal(env, info.si_signo, &info);
2915 case EXCP_INTERRUPT:
2916 /* just indicate that signals should be handled asap */
2919 /* Return address is 4 bytes after the call. */
2921 env->sregs[SR_PC] = env->regs[14];
2922 ret = do_syscall(env,
2934 env->regs[17] = env->sregs[SR_PC] + 4;
2935 if (env->iflags & D_FLAG) {
2936 env->sregs[SR_ESR] |= 1 << 12;
2937 env->sregs[SR_PC] -= 4;
2938 /* FIXME: if branch was immed, replay the imm as well. */
2941 env->iflags &= ~(IMM_FLAG | D_FLAG);
2943 switch (env->sregs[SR_ESR] & 31) {
2944 case ESR_EC_DIVZERO:
2945 info.si_signo = TARGET_SIGFPE;
2947 info.si_code = TARGET_FPE_FLTDIV;
2948 info._sifields._sigfault._addr = 0;
2949 queue_signal(env, info.si_signo, &info);
2952 info.si_signo = TARGET_SIGFPE;
2954 if (env->sregs[SR_FSR] & FSR_IO) {
2955 info.si_code = TARGET_FPE_FLTINV;
2957 if (env->sregs[SR_FSR] & FSR_DZ) {
2958 info.si_code = TARGET_FPE_FLTDIV;
2960 info._sifields._sigfault._addr = 0;
2961 queue_signal(env, info.si_signo, &info);
2964 printf ("Unhandled hw-exception: 0x%x\n",
2965 env->sregs[SR_ESR] & ESR_EC_MASK);
2966 cpu_dump_state(cs, stderr, fprintf, 0);
2975 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2978 info.si_signo = sig;
2980 info.si_code = TARGET_TRAP_BRKPT;
2981 queue_signal(env, info.si_signo, &info);
2986 printf ("Unhandled trap: 0x%x\n", trapnr);
2987 cpu_dump_state(cs, stderr, fprintf, 0);
2990 process_pending_signals (env);
2997 void cpu_loop(CPUM68KState *env)
2999 CPUState *cs = CPU(m68k_env_get_cpu(env));
3002 target_siginfo_t info;
3003 TaskState *ts = cs->opaque;
3007 trapnr = cpu_m68k_exec(cs);
3012 if (ts->sim_syscalls) {
3014 get_user_u16(nr, env->pc + 2);
3016 do_m68k_simcall(env, nr);
3022 case EXCP_HALT_INSN:
3023 /* Semihosing syscall. */
3025 do_m68k_semihosting(env, env->dregs[0]);
3029 case EXCP_UNSUPPORTED:
3031 info.si_signo = TARGET_SIGILL;
3033 info.si_code = TARGET_ILL_ILLOPN;
3034 info._sifields._sigfault._addr = env->pc;
3035 queue_signal(env, info.si_signo, &info);
3039 ts->sim_syscalls = 0;
3042 env->dregs[0] = do_syscall(env,
3053 case EXCP_INTERRUPT:
3054 /* just indicate that signals should be handled asap */
3058 info.si_signo = TARGET_SIGSEGV;
3060 /* XXX: check env->error_code */
3061 info.si_code = TARGET_SEGV_MAPERR;
3062 info._sifields._sigfault._addr = env->mmu.ar;
3063 queue_signal(env, info.si_signo, &info);
3070 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
3073 info.si_signo = sig;
3075 info.si_code = TARGET_TRAP_BRKPT;
3076 queue_signal(env, info.si_signo, &info);
3081 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
3084 process_pending_signals(env);
3087 #endif /* TARGET_M68K */
3090 static void do_store_exclusive(CPUAlphaState *env, int reg, int quad)
3092 target_ulong addr, val, tmp;
3093 target_siginfo_t info;
3096 addr = env->lock_addr;
3097 tmp = env->lock_st_addr;
3098 env->lock_addr = -1;
3099 env->lock_st_addr = 0;
3105 if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
3109 if (val == env->lock_value) {
3111 if (quad ? put_user_u64(tmp, addr) : put_user_u32(tmp, addr)) {
3128 info.si_signo = TARGET_SIGSEGV;
3130 info.si_code = TARGET_SEGV_MAPERR;
3131 info._sifields._sigfault._addr = addr;
3132 queue_signal(env, TARGET_SIGSEGV, &info);
3135 void cpu_loop(CPUAlphaState *env)
3137 CPUState *cs = CPU(alpha_env_get_cpu(env));
3139 target_siginfo_t info;
3144 trapnr = cpu_alpha_exec(cs);
3147 /* All of the traps imply a transition through PALcode, which
3148 implies an REI instruction has been executed. Which means
3149 that the intr_flag should be cleared. */
3154 fprintf(stderr, "Reset requested. Exit\n");
3158 fprintf(stderr, "Machine check exception. Exit\n");
3161 case EXCP_SMP_INTERRUPT:
3162 case EXCP_CLK_INTERRUPT:
3163 case EXCP_DEV_INTERRUPT:
3164 fprintf(stderr, "External interrupt. Exit\n");
3168 env->lock_addr = -1;
3169 info.si_signo = TARGET_SIGSEGV;
3171 info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID
3172 ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR);
3173 info._sifields._sigfault._addr = env->trap_arg0;
3174 queue_signal(env, info.si_signo, &info);
3177 env->lock_addr = -1;
3178 info.si_signo = TARGET_SIGBUS;
3180 info.si_code = TARGET_BUS_ADRALN;
3181 info._sifields._sigfault._addr = env->trap_arg0;
3182 queue_signal(env, info.si_signo, &info);
3186 env->lock_addr = -1;
3187 info.si_signo = TARGET_SIGILL;
3189 info.si_code = TARGET_ILL_ILLOPC;
3190 info._sifields._sigfault._addr = env->pc;
3191 queue_signal(env, info.si_signo, &info);
3194 env->lock_addr = -1;
3195 info.si_signo = TARGET_SIGFPE;
3197 info.si_code = TARGET_FPE_FLTINV;
3198 info._sifields._sigfault._addr = env->pc;
3199 queue_signal(env, info.si_signo, &info);
3202 /* No-op. Linux simply re-enables the FPU. */
3205 env->lock_addr = -1;
3206 switch (env->error_code) {
3209 info.si_signo = TARGET_SIGTRAP;
3211 info.si_code = TARGET_TRAP_BRKPT;
3212 info._sifields._sigfault._addr = env->pc;
3213 queue_signal(env, info.si_signo, &info);
3217 info.si_signo = TARGET_SIGTRAP;
3220 info._sifields._sigfault._addr = env->pc;
3221 queue_signal(env, info.si_signo, &info);
3225 trapnr = env->ir[IR_V0];
3226 sysret = do_syscall(env, trapnr,
3227 env->ir[IR_A0], env->ir[IR_A1],
3228 env->ir[IR_A2], env->ir[IR_A3],
3229 env->ir[IR_A4], env->ir[IR_A5],
3231 if (trapnr == TARGET_NR_sigreturn
3232 || trapnr == TARGET_NR_rt_sigreturn) {
3235 /* Syscall writes 0 to V0 to bypass error check, similar
3236 to how this is handled internal to Linux kernel.
3237 (Ab)use trapnr temporarily as boolean indicating error. */
3238 trapnr = (env->ir[IR_V0] != 0 && sysret < 0);
3239 env->ir[IR_V0] = (trapnr ? -sysret : sysret);
3240 env->ir[IR_A3] = trapnr;
3244 /* ??? We can probably elide the code using page_unprotect
3245 that is checking for self-modifying code. Instead we
3246 could simply call tb_flush here. Until we work out the
3247 changes required to turn off the extra write protection,
3248 this can be a no-op. */
3252 /* Handled in the translator for usermode. */
3256 /* Handled in the translator for usermode. */
3260 info.si_signo = TARGET_SIGFPE;
3261 switch (env->ir[IR_A0]) {
3262 case TARGET_GEN_INTOVF:
3263 info.si_code = TARGET_FPE_INTOVF;
3265 case TARGET_GEN_INTDIV:
3266 info.si_code = TARGET_FPE_INTDIV;
3268 case TARGET_GEN_FLTOVF:
3269 info.si_code = TARGET_FPE_FLTOVF;
3271 case TARGET_GEN_FLTUND:
3272 info.si_code = TARGET_FPE_FLTUND;
3274 case TARGET_GEN_FLTINV:
3275 info.si_code = TARGET_FPE_FLTINV;
3277 case TARGET_GEN_FLTINE:
3278 info.si_code = TARGET_FPE_FLTRES;
3280 case TARGET_GEN_ROPRAND:
3284 info.si_signo = TARGET_SIGTRAP;
3289 info._sifields._sigfault._addr = env->pc;
3290 queue_signal(env, info.si_signo, &info);
3297 info.si_signo = gdb_handlesig(cs, TARGET_SIGTRAP);
3298 if (info.si_signo) {
3299 env->lock_addr = -1;
3301 info.si_code = TARGET_TRAP_BRKPT;
3302 queue_signal(env, info.si_signo, &info);
3307 do_store_exclusive(env, env->error_code, trapnr - EXCP_STL_C);
3309 case EXCP_INTERRUPT:
3310 /* Just indicate that signals should be handled asap. */
3313 printf ("Unhandled trap: 0x%x\n", trapnr);
3314 cpu_dump_state(cs, stderr, fprintf, 0);
3317 process_pending_signals (env);
3320 #endif /* TARGET_ALPHA */
3323 void cpu_loop(CPUS390XState *env)
3325 CPUState *cs = CPU(s390_env_get_cpu(env));
3327 target_siginfo_t info;
3332 trapnr = cpu_s390x_exec(cs);
3335 case EXCP_INTERRUPT:
3336 /* Just indicate that signals should be handled asap. */
3340 n = env->int_svc_code;
3342 /* syscalls > 255 */
3345 env->psw.addr += env->int_svc_ilen;
3346 env->regs[2] = do_syscall(env, n, env->regs[2], env->regs[3],
3347 env->regs[4], env->regs[5],
3348 env->regs[6], env->regs[7], 0, 0);
3352 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
3354 n = TARGET_TRAP_BRKPT;
3359 n = env->int_pgm_code;
3362 case PGM_PRIVILEGED:
3363 sig = TARGET_SIGILL;
3364 n = TARGET_ILL_ILLOPC;
3366 case PGM_PROTECTION:
3367 case PGM_ADDRESSING:
3368 sig = TARGET_SIGSEGV;
3369 /* XXX: check env->error_code */
3370 n = TARGET_SEGV_MAPERR;
3371 addr = env->__excp_addr;
3374 case PGM_SPECIFICATION:
3375 case PGM_SPECIAL_OP:
3378 sig = TARGET_SIGILL;
3379 n = TARGET_ILL_ILLOPN;
3382 case PGM_FIXPT_OVERFLOW:
3383 sig = TARGET_SIGFPE;
3384 n = TARGET_FPE_INTOVF;
3386 case PGM_FIXPT_DIVIDE:
3387 sig = TARGET_SIGFPE;
3388 n = TARGET_FPE_INTDIV;
3392 n = (env->fpc >> 8) & 0xff;
3394 /* compare-and-trap */
3397 /* An IEEE exception, simulated or otherwise. */
3399 n = TARGET_FPE_FLTINV;
3400 } else if (n & 0x40) {
3401 n = TARGET_FPE_FLTDIV;
3402 } else if (n & 0x20) {
3403 n = TARGET_FPE_FLTOVF;
3404 } else if (n & 0x10) {
3405 n = TARGET_FPE_FLTUND;
3406 } else if (n & 0x08) {
3407 n = TARGET_FPE_FLTRES;
3409 /* ??? Quantum exception; BFP, DFP error. */
3412 sig = TARGET_SIGFPE;
3417 fprintf(stderr, "Unhandled program exception: %#x\n", n);
3418 cpu_dump_state(cs, stderr, fprintf, 0);
3424 addr = env->psw.addr;
3426 info.si_signo = sig;
3429 info._sifields._sigfault._addr = addr;
3430 queue_signal(env, info.si_signo, &info);
3434 fprintf(stderr, "Unhandled trap: 0x%x\n", trapnr);
3435 cpu_dump_state(cs, stderr, fprintf, 0);
3438 process_pending_signals (env);
3442 #endif /* TARGET_S390X */
3444 #ifdef TARGET_TILEGX
3446 static void gen_sigill_reg(CPUTLGState *env)
3448 target_siginfo_t info;
3450 info.si_signo = TARGET_SIGILL;
3452 info.si_code = TARGET_ILL_PRVREG;
3453 info._sifields._sigfault._addr = env->pc;
3454 queue_signal(env, info.si_signo, &info);
3457 static void do_signal(CPUTLGState *env, int signo, int sigcode)
3459 target_siginfo_t info;
3461 info.si_signo = signo;
3463 info._sifields._sigfault._addr = env->pc;
3465 if (signo == TARGET_SIGSEGV) {
3466 /* The passed in sigcode is a dummy; check for a page mapping
3467 and pass either MAPERR or ACCERR. */
3468 target_ulong addr = env->excaddr;
3469 info._sifields._sigfault._addr = addr;
3470 if (page_check_range(addr, 1, PAGE_VALID) < 0) {
3471 sigcode = TARGET_SEGV_MAPERR;
3473 sigcode = TARGET_SEGV_ACCERR;
3476 info.si_code = sigcode;
3478 queue_signal(env, info.si_signo, &info);
3481 static void gen_sigsegv_maperr(CPUTLGState *env, target_ulong addr)
3483 env->excaddr = addr;
3484 do_signal(env, TARGET_SIGSEGV, 0);
3487 static void set_regval(CPUTLGState *env, uint8_t reg, uint64_t val)
3489 if (unlikely(reg >= TILEGX_R_COUNT)) {
3500 gen_sigill_reg(env);
3503 g_assert_not_reached();
3506 env->regs[reg] = val;
3510 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3511 * memory at the address held in the first source register. If the values are
3512 * not equal, then no memory operation is performed. If the values are equal,
3513 * the 8-byte quantity from the second source register is written into memory
3514 * at the address held in the first source register. In either case, the result
3515 * of the instruction is the value read from memory. The compare and write to
3516 * memory are atomic and thus can be used for synchronization purposes. This
3517 * instruction only operates for addresses aligned to a 8-byte boundary.
3518 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3520 * Functional Description (64-bit)
3521 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3522 * rf[Dest] = memVal;
3523 * if (memVal == SPR[CmpValueSPR])
3524 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3526 * Functional Description (32-bit)
3527 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3528 * rf[Dest] = memVal;
3529 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3530 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3533 * This function also processes exch and exch4 which need not process SPR.
3535 static void do_exch(CPUTLGState *env, bool quad, bool cmp)
3538 target_long val, sprval;
3542 addr = env->atomic_srca;
3543 if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
3544 goto sigsegv_maperr;
3549 sprval = env->spregs[TILEGX_SPR_CMPEXCH];
3551 sprval = sextract64(env->spregs[TILEGX_SPR_CMPEXCH], 0, 32);
3555 if (!cmp || val == sprval) {
3556 target_long valb = env->atomic_srcb;
3557 if (quad ? put_user_u64(valb, addr) : put_user_u32(valb, addr)) {
3558 goto sigsegv_maperr;
3562 set_regval(env, env->atomic_dstr, val);
3568 gen_sigsegv_maperr(env, addr);
3571 static void do_fetch(CPUTLGState *env, int trapnr, bool quad)
3575 target_long val, valb;
3579 addr = env->atomic_srca;
3580 valb = env->atomic_srcb;
3581 if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
3582 goto sigsegv_maperr;
3586 case TILEGX_EXCP_OPCODE_FETCHADD:
3587 case TILEGX_EXCP_OPCODE_FETCHADD4:
3590 case TILEGX_EXCP_OPCODE_FETCHADDGEZ:
3596 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4:
3598 if ((int32_t)valb < 0) {
3602 case TILEGX_EXCP_OPCODE_FETCHAND:
3603 case TILEGX_EXCP_OPCODE_FETCHAND4:
3606 case TILEGX_EXCP_OPCODE_FETCHOR:
3607 case TILEGX_EXCP_OPCODE_FETCHOR4:
3611 g_assert_not_reached();
3615 if (quad ? put_user_u64(valb, addr) : put_user_u32(valb, addr)) {
3616 goto sigsegv_maperr;
3620 set_regval(env, env->atomic_dstr, val);
3626 gen_sigsegv_maperr(env, addr);
3629 void cpu_loop(CPUTLGState *env)
3631 CPUState *cs = CPU(tilegx_env_get_cpu(env));
3636 trapnr = cpu_tilegx_exec(cs);
3639 case TILEGX_EXCP_SYSCALL:
3640 env->regs[TILEGX_R_RE] = do_syscall(env, env->regs[TILEGX_R_NR],
3641 env->regs[0], env->regs[1],
3642 env->regs[2], env->regs[3],
3643 env->regs[4], env->regs[5],
3644 env->regs[6], env->regs[7]);
3645 env->regs[TILEGX_R_ERR] = TILEGX_IS_ERRNO(env->regs[TILEGX_R_RE])
3646 ? - env->regs[TILEGX_R_RE]
3649 case TILEGX_EXCP_OPCODE_EXCH:
3650 do_exch(env, true, false);
3652 case TILEGX_EXCP_OPCODE_EXCH4:
3653 do_exch(env, false, false);
3655 case TILEGX_EXCP_OPCODE_CMPEXCH:
3656 do_exch(env, true, true);
3658 case TILEGX_EXCP_OPCODE_CMPEXCH4:
3659 do_exch(env, false, true);
3661 case TILEGX_EXCP_OPCODE_FETCHADD:
3662 case TILEGX_EXCP_OPCODE_FETCHADDGEZ:
3663 case TILEGX_EXCP_OPCODE_FETCHAND:
3664 case TILEGX_EXCP_OPCODE_FETCHOR:
3665 do_fetch(env, trapnr, true);
3667 case TILEGX_EXCP_OPCODE_FETCHADD4:
3668 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4:
3669 case TILEGX_EXCP_OPCODE_FETCHAND4:
3670 case TILEGX_EXCP_OPCODE_FETCHOR4:
3671 do_fetch(env, trapnr, false);
3673 case TILEGX_EXCP_SIGNAL:
3674 do_signal(env, env->signo, env->sigcode);
3676 case TILEGX_EXCP_REG_IDN_ACCESS:
3677 case TILEGX_EXCP_REG_UDN_ACCESS:
3678 gen_sigill_reg(env);
3681 fprintf(stderr, "trapnr is %d[0x%x].\n", trapnr, trapnr);
3682 g_assert_not_reached();
3684 process_pending_signals(env);
3690 THREAD CPUState *thread_cpu;
3692 void task_settid(TaskState *ts)
3694 if (ts->ts_tid == 0) {
3695 ts->ts_tid = (pid_t)syscall(SYS_gettid);
3699 void stop_all_tasks(void)
3702 * We trust that when using NPTL, start_exclusive()
3703 * handles thread stopping correctly.
3708 /* Assumes contents are already zeroed. */
3709 void init_task_state(TaskState *ts)
3714 ts->first_free = ts->sigqueue_table;
3715 for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) {
3716 ts->sigqueue_table[i].next = &ts->sigqueue_table[i + 1];
3718 ts->sigqueue_table[i].next = NULL;
3721 CPUArchState *cpu_copy(CPUArchState *env)
3723 CPUState *cpu = ENV_GET_CPU(env);
3724 CPUState *new_cpu = cpu_init(cpu_model);
3725 CPUArchState *new_env = new_cpu->env_ptr;
3729 /* Reset non arch specific state */
3732 memcpy(new_env, env, sizeof(CPUArchState));
3734 /* Clone all break/watchpoints.
3735 Note: Once we support ptrace with hw-debug register access, make sure
3736 BP_CPU break/watchpoints are handled correctly on clone. */
3737 QTAILQ_INIT(&new_cpu->breakpoints);
3738 QTAILQ_INIT(&new_cpu->watchpoints);
3739 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
3740 cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL);
3742 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
3743 cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags, NULL);
3749 static void handle_arg_help(const char *arg)
3751 usage(EXIT_SUCCESS);
3754 static void handle_arg_log(const char *arg)
3758 mask = qemu_str_to_log_mask(arg);
3760 qemu_print_log_usage(stdout);
3766 static void handle_arg_log_filename(const char *arg)
3768 qemu_set_log_filename(arg);
3771 static void handle_arg_set_env(const char *arg)
3773 char *r, *p, *token;
3774 r = p = strdup(arg);
3775 while ((token = strsep(&p, ",")) != NULL) {
3776 if (envlist_setenv(envlist, token) != 0) {
3777 usage(EXIT_FAILURE);
3783 static void handle_arg_unset_env(const char *arg)
3785 char *r, *p, *token;
3786 r = p = strdup(arg);
3787 while ((token = strsep(&p, ",")) != NULL) {
3788 if (envlist_unsetenv(envlist, token) != 0) {
3789 usage(EXIT_FAILURE);
3795 static void handle_arg_argv0(const char *arg)
3797 argv0 = strdup(arg);
3800 static void handle_arg_stack_size(const char *arg)
3803 guest_stack_size = strtoul(arg, &p, 0);
3804 if (guest_stack_size == 0) {
3805 usage(EXIT_FAILURE);
3809 guest_stack_size *= 1024 * 1024;
3810 } else if (*p == 'k' || *p == 'K') {
3811 guest_stack_size *= 1024;
3815 static void handle_arg_ld_prefix(const char *arg)
3817 interp_prefix = strdup(arg);
3820 static void handle_arg_pagesize(const char *arg)
3822 qemu_host_page_size = atoi(arg);
3823 if (qemu_host_page_size == 0 ||
3824 (qemu_host_page_size & (qemu_host_page_size - 1)) != 0) {
3825 fprintf(stderr, "page size must be a power of two\n");
3830 static void handle_arg_randseed(const char *arg)
3832 unsigned long long seed;
3834 if (parse_uint_full(arg, &seed, 0) != 0 || seed > UINT_MAX) {
3835 fprintf(stderr, "Invalid seed number: %s\n", arg);
3841 static void handle_arg_gdb(const char *arg)
3843 gdbstub_port = atoi(arg);
3846 static void handle_arg_uname(const char *arg)
3848 qemu_uname_release = strdup(arg);
3851 static void handle_arg_cpu(const char *arg)
3853 cpu_model = strdup(arg);
3854 if (cpu_model == NULL || is_help_option(cpu_model)) {
3855 /* XXX: implement xxx_cpu_list for targets that still miss it */
3856 #if defined(cpu_list)
3857 cpu_list(stdout, &fprintf);
3863 static void handle_arg_guest_base(const char *arg)
3865 guest_base = strtol(arg, NULL, 0);
3866 have_guest_base = 1;
3869 static void handle_arg_reserved_va(const char *arg)
3873 reserved_va = strtoul(arg, &p, 0);
3887 unsigned long unshifted = reserved_va;
3889 reserved_va <<= shift;
3890 if (((reserved_va >> shift) != unshifted)
3891 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3892 || (reserved_va > (1ul << TARGET_VIRT_ADDR_SPACE_BITS))
3895 fprintf(stderr, "Reserved virtual address too big\n");
3900 fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
3905 static void handle_arg_singlestep(const char *arg)
3910 static void handle_arg_strace(const char *arg)
3915 static void handle_arg_version(const char *arg)
3917 printf("qemu-" TARGET_NAME " version " QEMU_VERSION QEMU_PKGVERSION
3918 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3922 struct qemu_argument {
3926 void (*handle_opt)(const char *arg);
3927 const char *example;
3931 static const struct qemu_argument arg_table[] = {
3932 {"h", "", false, handle_arg_help,
3933 "", "print this help"},
3934 {"help", "", false, handle_arg_help,
3936 {"g", "QEMU_GDB", true, handle_arg_gdb,
3937 "port", "wait gdb connection to 'port'"},
3938 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix,
3939 "path", "set the elf interpreter prefix to 'path'"},
3940 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size,
3941 "size", "set the stack size to 'size' bytes"},
3942 {"cpu", "QEMU_CPU", true, handle_arg_cpu,
3943 "model", "select CPU (-cpu help for list)"},
3944 {"E", "QEMU_SET_ENV", true, handle_arg_set_env,
3945 "var=value", "sets targets environment variable (see below)"},
3946 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env,
3947 "var", "unsets targets environment variable (see below)"},
3948 {"0", "QEMU_ARGV0", true, handle_arg_argv0,
3949 "argv0", "forces target process argv[0] to be 'argv0'"},
3950 {"r", "QEMU_UNAME", true, handle_arg_uname,
3951 "uname", "set qemu uname release string to 'uname'"},
3952 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base,
3953 "address", "set guest_base address to 'address'"},
3954 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va,
3955 "size", "reserve 'size' bytes for guest virtual address space"},
3956 {"d", "QEMU_LOG", true, handle_arg_log,
3957 "item[,...]", "enable logging of specified items "
3958 "(use '-d help' for a list of items)"},
3959 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename,
3960 "logfile", "write logs to 'logfile' (default stderr)"},
3961 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize,
3962 "pagesize", "set the host page size to 'pagesize'"},
3963 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep,
3964 "", "run in singlestep mode"},
3965 {"strace", "QEMU_STRACE", false, handle_arg_strace,
3966 "", "log system calls"},
3967 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed,
3968 "", "Seed for pseudo-random number generator"},
3969 {"version", "QEMU_VERSION", false, handle_arg_version,
3970 "", "display version information and exit"},
3971 {NULL, NULL, false, NULL, NULL, NULL}
3974 static void usage(int exitcode)
3976 const struct qemu_argument *arginfo;
3980 printf("usage: qemu-" TARGET_NAME " [options] program [arguments...]\n"
3981 "Linux CPU emulator (compiled for " TARGET_NAME " emulation)\n"
3983 "Options and associated environment variables:\n"
3986 /* Calculate column widths. We must always have at least enough space
3987 * for the column header.
3989 maxarglen = strlen("Argument");
3990 maxenvlen = strlen("Env-variable");
3992 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
3993 int arglen = strlen(arginfo->argv);
3994 if (arginfo->has_arg) {
3995 arglen += strlen(arginfo->example) + 1;
3997 if (strlen(arginfo->env) > maxenvlen) {
3998 maxenvlen = strlen(arginfo->env);
4000 if (arglen > maxarglen) {
4005 printf("%-*s %-*s Description\n", maxarglen+1, "Argument",
4006 maxenvlen, "Env-variable");
4008 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
4009 if (arginfo->has_arg) {
4010 printf("-%s %-*s %-*s %s\n", arginfo->argv,
4011 (int)(maxarglen - strlen(arginfo->argv) - 1),
4012 arginfo->example, maxenvlen, arginfo->env, arginfo->help);
4014 printf("-%-*s %-*s %s\n", maxarglen, arginfo->argv,
4015 maxenvlen, arginfo->env,
4022 "QEMU_LD_PREFIX = %s\n"
4023 "QEMU_STACK_SIZE = %ld byte\n",
4028 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4029 "QEMU_UNSET_ENV environment variables to set and unset\n"
4030 "environment variables for the target process.\n"
4031 "It is possible to provide several variables by separating them\n"
4032 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4033 "provide the -E and -U options multiple times.\n"
4034 "The following lines are equivalent:\n"
4035 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4036 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4037 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4038 "Note that if you provide several changes to a single variable\n"
4039 "the last change will stay in effect.\n");
4044 static int parse_args(int argc, char **argv)
4048 const struct qemu_argument *arginfo;
4050 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
4051 if (arginfo->env == NULL) {
4055 r = getenv(arginfo->env);
4057 arginfo->handle_opt(r);
4063 if (optind >= argc) {
4072 if (!strcmp(r, "-")) {
4075 /* Treat --foo the same as -foo. */
4080 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
4081 if (!strcmp(r, arginfo->argv)) {
4082 if (arginfo->has_arg) {
4083 if (optind >= argc) {
4084 (void) fprintf(stderr,
4085 "qemu: missing argument for option '%s'\n", r);
4088 arginfo->handle_opt(argv[optind]);
4091 arginfo->handle_opt(NULL);
4097 /* no option matched the current argv */
4098 if (arginfo->handle_opt == NULL) {
4099 (void) fprintf(stderr, "qemu: unknown option '%s'\n", r);
4104 if (optind >= argc) {
4105 (void) fprintf(stderr, "qemu: no user program specified\n");
4109 filename = argv[optind];
4110 exec_path = argv[optind];
4115 int main(int argc, char **argv, char **envp)
4117 struct target_pt_regs regs1, *regs = ®s1;
4118 struct image_info info1, *info = &info1;
4119 struct linux_binprm bprm;
4124 char **target_environ, **wrk;
4131 module_call_init(MODULE_INIT_QOM);
4133 if ((envlist = envlist_create()) == NULL) {
4134 (void) fprintf(stderr, "Unable to allocate envlist\n");
4138 /* add current environment into the list */
4139 for (wrk = environ; *wrk != NULL; wrk++) {
4140 (void) envlist_setenv(envlist, *wrk);
4143 /* Read the stack limit from the kernel. If it's "unlimited",
4144 then we can do little else besides use the default. */
4147 if (getrlimit(RLIMIT_STACK, &lim) == 0
4148 && lim.rlim_cur != RLIM_INFINITY
4149 && lim.rlim_cur == (target_long)lim.rlim_cur) {
4150 guest_stack_size = lim.rlim_cur;
4155 #if defined(cpudef_setup)
4156 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
4161 optind = parse_args(argc, argv);
4164 memset(regs, 0, sizeof(struct target_pt_regs));
4166 /* Zero out image_info */
4167 memset(info, 0, sizeof(struct image_info));
4169 memset(&bprm, 0, sizeof (bprm));
4171 /* Scan interp_prefix dir for replacement files. */
4172 init_paths(interp_prefix);
4174 init_qemu_uname_release();
4176 if (cpu_model == NULL) {
4177 #if defined(TARGET_I386)
4178 #ifdef TARGET_X86_64
4179 cpu_model = "qemu64";
4181 cpu_model = "qemu32";
4183 #elif defined(TARGET_ARM)
4185 #elif defined(TARGET_UNICORE32)
4187 #elif defined(TARGET_M68K)
4189 #elif defined(TARGET_SPARC)
4190 #ifdef TARGET_SPARC64
4191 cpu_model = "TI UltraSparc II";
4193 cpu_model = "Fujitsu MB86904";
4195 #elif defined(TARGET_MIPS)
4196 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4201 #elif defined TARGET_OPENRISC
4202 cpu_model = "or1200";
4203 #elif defined(TARGET_PPC)
4204 # ifdef TARGET_PPC64
4205 cpu_model = "POWER8";
4209 #elif defined TARGET_SH4
4210 cpu_model = TYPE_SH7785_CPU;
4216 /* NOTE: we need to init the CPU at this stage to get
4217 qemu_host_page_size */
4218 cpu = cpu_init(cpu_model);
4220 fprintf(stderr, "Unable to find CPU definition\n");
4228 if (getenv("QEMU_STRACE")) {
4232 if (getenv("QEMU_RAND_SEED")) {
4233 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4236 target_environ = envlist_to_environ(envlist, NULL);
4237 envlist_free(envlist);
4240 * Now that page sizes are configured in cpu_init() we can do
4241 * proper page alignment for guest_base.
4243 guest_base = HOST_PAGE_ALIGN(guest_base);
4245 if (reserved_va || have_guest_base) {
4246 guest_base = init_guest_space(guest_base, reserved_va, 0,
4248 if (guest_base == (unsigned long)-1) {
4249 fprintf(stderr, "Unable to reserve 0x%lx bytes of virtual address "
4250 "space for use as guest address space (check your virtual "
4251 "memory ulimit setting or reserve less using -R option)\n",
4257 mmap_next_start = reserved_va;
4262 * Read in mmap_min_addr kernel parameter. This value is used
4263 * When loading the ELF image to determine whether guest_base
4264 * is needed. It is also used in mmap_find_vma.
4269 if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) {
4271 if (fscanf(fp, "%lu", &tmp) == 1) {
4272 mmap_min_addr = tmp;
4273 qemu_log_mask(CPU_LOG_PAGE, "host mmap_min_addr=0x%lx\n", mmap_min_addr);
4280 * Prepare copy of argv vector for target.
4282 target_argc = argc - optind;
4283 target_argv = calloc(target_argc + 1, sizeof (char *));
4284 if (target_argv == NULL) {
4285 (void) fprintf(stderr, "Unable to allocate memory for target_argv\n");
4290 * If argv0 is specified (using '-0' switch) we replace
4291 * argv[0] pointer with the given one.
4294 if (argv0 != NULL) {
4295 target_argv[i++] = strdup(argv0);
4297 for (; i < target_argc; i++) {
4298 target_argv[i] = strdup(argv[optind + i]);
4300 target_argv[target_argc] = NULL;
4302 ts = g_new0(TaskState, 1);
4303 init_task_state(ts);
4304 /* build Task State */
4310 execfd = qemu_getauxval(AT_EXECFD);
4312 execfd = open(filename, O_RDONLY);
4314 printf("Error while loading %s: %s\n", filename, strerror(errno));
4315 _exit(EXIT_FAILURE);
4319 ret = loader_exec(execfd, filename, target_argv, target_environ, regs,
4322 printf("Error while loading %s: %s\n", filename, strerror(-ret));
4323 _exit(EXIT_FAILURE);
4326 for (wrk = target_environ; *wrk; wrk++) {
4330 free(target_environ);
4332 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
4333 qemu_log("guest_base 0x%lx\n", guest_base);
4336 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
4337 qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code);
4338 qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n",
4340 qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n",
4342 qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data);
4343 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n",
4345 qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk);
4346 qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry);
4349 target_set_brk(info->brk);
4353 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4354 generating the prologue until now so that the prologue can take
4355 the real value of GUEST_BASE into account. */
4356 tcg_prologue_init(&tcg_ctx);
4358 #if defined(TARGET_I386)
4359 env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
4360 env->hflags |= HF_PE_MASK | HF_CPL_MASK;
4361 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4362 env->cr[4] |= CR4_OSFXSR_MASK;
4363 env->hflags |= HF_OSFXSR_MASK;
4365 #ifndef TARGET_ABI32
4366 /* enable 64 bit mode if possible */
4367 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
4368 fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n");
4371 env->cr[4] |= CR4_PAE_MASK;
4372 env->efer |= MSR_EFER_LMA | MSR_EFER_LME;
4373 env->hflags |= HF_LMA_MASK;
4376 /* flags setup : we activate the IRQs by default as in user mode */
4377 env->eflags |= IF_MASK;
4379 /* linux register setup */
4380 #ifndef TARGET_ABI32
4381 env->regs[R_EAX] = regs->rax;
4382 env->regs[R_EBX] = regs->rbx;
4383 env->regs[R_ECX] = regs->rcx;
4384 env->regs[R_EDX] = regs->rdx;
4385 env->regs[R_ESI] = regs->rsi;
4386 env->regs[R_EDI] = regs->rdi;
4387 env->regs[R_EBP] = regs->rbp;
4388 env->regs[R_ESP] = regs->rsp;
4389 env->eip = regs->rip;
4391 env->regs[R_EAX] = regs->eax;
4392 env->regs[R_EBX] = regs->ebx;
4393 env->regs[R_ECX] = regs->ecx;
4394 env->regs[R_EDX] = regs->edx;
4395 env->regs[R_ESI] = regs->esi;
4396 env->regs[R_EDI] = regs->edi;
4397 env->regs[R_EBP] = regs->ebp;
4398 env->regs[R_ESP] = regs->esp;
4399 env->eip = regs->eip;
4402 /* linux interrupt setup */
4403 #ifndef TARGET_ABI32
4404 env->idt.limit = 511;
4406 env->idt.limit = 255;
4408 env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1),
4409 PROT_READ|PROT_WRITE,
4410 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4411 idt_table = g2h(env->idt.base);
4434 /* linux segment setup */
4436 uint64_t *gdt_table;
4437 env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES,
4438 PROT_READ|PROT_WRITE,
4439 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4440 env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1;
4441 gdt_table = g2h(env->gdt.base);
4443 write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
4444 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
4445 (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
4447 /* 64 bit code segment */
4448 write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
4449 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
4451 (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
4453 write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff,
4454 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
4455 (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT));
4457 cpu_x86_load_seg(env, R_CS, __USER_CS);
4458 cpu_x86_load_seg(env, R_SS, __USER_DS);
4460 cpu_x86_load_seg(env, R_DS, __USER_DS);
4461 cpu_x86_load_seg(env, R_ES, __USER_DS);
4462 cpu_x86_load_seg(env, R_FS, __USER_DS);
4463 cpu_x86_load_seg(env, R_GS, __USER_DS);
4464 /* This hack makes Wine work... */
4465 env->segs[R_FS].selector = 0;
4467 cpu_x86_load_seg(env, R_DS, 0);
4468 cpu_x86_load_seg(env, R_ES, 0);
4469 cpu_x86_load_seg(env, R_FS, 0);
4470 cpu_x86_load_seg(env, R_GS, 0);
4472 #elif defined(TARGET_AARCH64)
4476 if (!(arm_feature(env, ARM_FEATURE_AARCH64))) {
4478 "The selected ARM CPU does not support 64 bit mode\n");
4482 for (i = 0; i < 31; i++) {
4483 env->xregs[i] = regs->regs[i];
4486 env->xregs[31] = regs->sp;
4488 #elif defined(TARGET_ARM)
4491 cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC,
4493 for(i = 0; i < 16; i++) {
4494 env->regs[i] = regs->uregs[i];
4496 #ifdef TARGET_WORDS_BIGENDIAN
4498 if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4
4499 && (info->elf_flags & EF_ARM_BE8)) {
4500 env->uncached_cpsr |= CPSR_E;
4501 env->cp15.sctlr_el[1] |= SCTLR_E0E;
4503 env->cp15.sctlr_el[1] |= SCTLR_B;
4507 #elif defined(TARGET_UNICORE32)
4510 cpu_asr_write(env, regs->uregs[32], 0xffffffff);
4511 for (i = 0; i < 32; i++) {
4512 env->regs[i] = regs->uregs[i];
4515 #elif defined(TARGET_SPARC)
4519 env->npc = regs->npc;
4521 for(i = 0; i < 8; i++)
4522 env->gregs[i] = regs->u_regs[i];
4523 for(i = 0; i < 8; i++)
4524 env->regwptr[i] = regs->u_regs[i + 8];
4526 #elif defined(TARGET_PPC)
4530 #if defined(TARGET_PPC64)
4531 #if defined(TARGET_ABI32)
4532 env->msr &= ~((target_ulong)1 << MSR_SF);
4534 env->msr |= (target_ulong)1 << MSR_SF;
4537 env->nip = regs->nip;
4538 for(i = 0; i < 32; i++) {
4539 env->gpr[i] = regs->gpr[i];
4542 #elif defined(TARGET_M68K)
4545 env->dregs[0] = regs->d0;
4546 env->dregs[1] = regs->d1;
4547 env->dregs[2] = regs->d2;
4548 env->dregs[3] = regs->d3;
4549 env->dregs[4] = regs->d4;
4550 env->dregs[5] = regs->d5;
4551 env->dregs[6] = regs->d6;
4552 env->dregs[7] = regs->d7;
4553 env->aregs[0] = regs->a0;
4554 env->aregs[1] = regs->a1;
4555 env->aregs[2] = regs->a2;
4556 env->aregs[3] = regs->a3;
4557 env->aregs[4] = regs->a4;
4558 env->aregs[5] = regs->a5;
4559 env->aregs[6] = regs->a6;
4560 env->aregs[7] = regs->usp;
4562 ts->sim_syscalls = 1;
4564 #elif defined(TARGET_MICROBLAZE)
4566 env->regs[0] = regs->r0;
4567 env->regs[1] = regs->r1;
4568 env->regs[2] = regs->r2;
4569 env->regs[3] = regs->r3;
4570 env->regs[4] = regs->r4;
4571 env->regs[5] = regs->r5;
4572 env->regs[6] = regs->r6;
4573 env->regs[7] = regs->r7;
4574 env->regs[8] = regs->r8;
4575 env->regs[9] = regs->r9;
4576 env->regs[10] = regs->r10;
4577 env->regs[11] = regs->r11;
4578 env->regs[12] = regs->r12;
4579 env->regs[13] = regs->r13;
4580 env->regs[14] = regs->r14;
4581 env->regs[15] = regs->r15;
4582 env->regs[16] = regs->r16;
4583 env->regs[17] = regs->r17;
4584 env->regs[18] = regs->r18;
4585 env->regs[19] = regs->r19;
4586 env->regs[20] = regs->r20;
4587 env->regs[21] = regs->r21;
4588 env->regs[22] = regs->r22;
4589 env->regs[23] = regs->r23;
4590 env->regs[24] = regs->r24;
4591 env->regs[25] = regs->r25;
4592 env->regs[26] = regs->r26;
4593 env->regs[27] = regs->r27;
4594 env->regs[28] = regs->r28;
4595 env->regs[29] = regs->r29;
4596 env->regs[30] = regs->r30;
4597 env->regs[31] = regs->r31;
4598 env->sregs[SR_PC] = regs->pc;
4600 #elif defined(TARGET_MIPS)
4604 for(i = 0; i < 32; i++) {
4605 env->active_tc.gpr[i] = regs->regs[i];
4607 env->active_tc.PC = regs->cp0_epc & ~(target_ulong)1;
4608 if (regs->cp0_epc & 1) {
4609 env->hflags |= MIPS_HFLAG_M16;
4612 #elif defined(TARGET_OPENRISC)
4616 for (i = 0; i < 32; i++) {
4617 env->gpr[i] = regs->gpr[i];
4623 #elif defined(TARGET_SH4)
4627 for(i = 0; i < 16; i++) {
4628 env->gregs[i] = regs->regs[i];
4632 #elif defined(TARGET_ALPHA)
4636 for(i = 0; i < 28; i++) {
4637 env->ir[i] = ((abi_ulong *)regs)[i];
4639 env->ir[IR_SP] = regs->usp;
4642 #elif defined(TARGET_CRIS)
4644 env->regs[0] = regs->r0;
4645 env->regs[1] = regs->r1;
4646 env->regs[2] = regs->r2;
4647 env->regs[3] = regs->r3;
4648 env->regs[4] = regs->r4;
4649 env->regs[5] = regs->r5;
4650 env->regs[6] = regs->r6;
4651 env->regs[7] = regs->r7;
4652 env->regs[8] = regs->r8;
4653 env->regs[9] = regs->r9;
4654 env->regs[10] = regs->r10;
4655 env->regs[11] = regs->r11;
4656 env->regs[12] = regs->r12;
4657 env->regs[13] = regs->r13;
4658 env->regs[14] = info->start_stack;
4659 env->regs[15] = regs->acr;
4660 env->pc = regs->erp;
4662 #elif defined(TARGET_S390X)
4665 for (i = 0; i < 16; i++) {
4666 env->regs[i] = regs->gprs[i];
4668 env->psw.mask = regs->psw.mask;
4669 env->psw.addr = regs->psw.addr;
4671 #elif defined(TARGET_TILEGX)
4674 for (i = 0; i < TILEGX_R_COUNT; i++) {
4675 env->regs[i] = regs->regs[i];
4677 for (i = 0; i < TILEGX_SPR_COUNT; i++) {
4683 #error unsupported target CPU
4686 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4687 ts->stack_base = info->start_stack;
4688 ts->heap_base = info->brk;
4689 /* This will be filled in on the first SYS_HEAPINFO call. */
4694 if (gdbserver_start(gdbstub_port) < 0) {
4695 fprintf(stderr, "qemu: could not open gdbserver on port %d\n",
4699 gdb_handlesig(cpu, 0);