4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
59 bool singlestep_enabled;
62 /* Information carried about a condition to be evaluated. */
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
109 cpu_fprintf(f, "\n");
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
118 cpu_fprintf(f, "\n");
124 for (i = 0; i < 32; i++) {
125 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
126 env->vregs[i][0].ll, env->vregs[i][1].ll);
127 cpu_fprintf(f, (i % 2) ? " " : "\n");
130 #ifndef CONFIG_USER_ONLY
131 for (i = 0; i < 16; i++) {
132 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
134 cpu_fprintf(f, "\n");
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i = 0; i < CC_OP_MAX; i++) {
143 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
144 inline_branch_miss[i], inline_branch_hit[i]);
148 cpu_fprintf(f, "\n");
151 static TCGv_i64 psw_addr;
152 static TCGv_i64 psw_mask;
153 static TCGv_i64 gbea;
155 static TCGv_i32 cc_op;
156 static TCGv_i64 cc_src;
157 static TCGv_i64 cc_dst;
158 static TCGv_i64 cc_vr;
160 static char cpu_reg_names[32][4];
161 static TCGv_i64 regs[16];
162 static TCGv_i64 fregs[16];
164 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
166 void s390x_translate_init(void)
170 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
171 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.addr),
174 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
175 offsetof(CPUS390XState, psw.mask),
177 gbea = tcg_global_mem_new_i64(TCG_AREG0,
178 offsetof(CPUS390XState, gbea),
181 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
183 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
185 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
187 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
190 for (i = 0; i < 16; i++) {
191 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
192 regs[i] = tcg_global_mem_new(TCG_AREG0,
193 offsetof(CPUS390XState, regs[i]),
197 for (i = 0; i < 16; i++) {
198 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
199 fregs[i] = tcg_global_mem_new(TCG_AREG0,
200 offsetof(CPUS390XState, vregs[i][0].d),
201 cpu_reg_names[i + 16]);
205 static TCGv_i64 load_reg(int reg)
207 TCGv_i64 r = tcg_temp_new_i64();
208 tcg_gen_mov_i64(r, regs[reg]);
212 static TCGv_i64 load_freg32_i64(int reg)
214 TCGv_i64 r = tcg_temp_new_i64();
215 tcg_gen_shri_i64(r, fregs[reg], 32);
219 static void store_reg(int reg, TCGv_i64 v)
221 tcg_gen_mov_i64(regs[reg], v);
224 static void store_freg(int reg, TCGv_i64 v)
226 tcg_gen_mov_i64(fregs[reg], v);
229 static void store_reg32_i64(int reg, TCGv_i64 v)
231 /* 32 bit register writes keep the upper half */
232 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
235 static void store_reg32h_i64(int reg, TCGv_i64 v)
237 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
240 static void store_freg32_i64(int reg, TCGv_i64 v)
242 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
245 static void return_low128(TCGv_i64 dest)
247 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
250 static void update_psw_addr(DisasContext *s)
253 tcg_gen_movi_i64(psw_addr, s->pc);
256 static void per_branch(DisasContext *s, bool to_next)
258 #ifndef CONFIG_USER_ONLY
259 tcg_gen_movi_i64(gbea, s->pc);
261 if (s->tb->flags & FLAG_MASK_PER) {
262 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
263 gen_helper_per_branch(cpu_env, gbea, next_pc);
265 tcg_temp_free_i64(next_pc);
271 static void per_branch_cond(DisasContext *s, TCGCond cond,
272 TCGv_i64 arg1, TCGv_i64 arg2)
274 #ifndef CONFIG_USER_ONLY
275 if (s->tb->flags & FLAG_MASK_PER) {
276 TCGLabel *lab = gen_new_label();
277 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
279 tcg_gen_movi_i64(gbea, s->pc);
280 gen_helper_per_branch(cpu_env, gbea, psw_addr);
284 TCGv_i64 pc = tcg_const_i64(s->pc);
285 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
286 tcg_temp_free_i64(pc);
291 static void per_breaking_event(DisasContext *s)
293 tcg_gen_movi_i64(gbea, s->pc);
296 static void update_cc_op(DisasContext *s)
298 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
299 tcg_gen_movi_i32(cc_op, s->cc_op);
303 static void potential_page_fault(DisasContext *s)
309 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
311 return (uint64_t)cpu_lduw_code(env, pc);
314 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
316 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
319 static int get_mem_index(DisasContext *s)
321 switch (s->tb->flags & FLAG_MASK_ASC) {
322 case PSW_ASC_PRIMARY >> 32:
324 case PSW_ASC_SECONDARY >> 32:
326 case PSW_ASC_HOME >> 32:
334 static void gen_exception(int excp)
336 TCGv_i32 tmp = tcg_const_i32(excp);
337 gen_helper_exception(cpu_env, tmp);
338 tcg_temp_free_i32(tmp);
341 static void gen_program_exception(DisasContext *s, int code)
345 /* Remember what pgm exeption this was. */
346 tmp = tcg_const_i32(code);
347 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
348 tcg_temp_free_i32(tmp);
350 tmp = tcg_const_i32(s->next_pc - s->pc);
351 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
352 tcg_temp_free_i32(tmp);
354 /* Advance past instruction. */
361 /* Trigger exception. */
362 gen_exception(EXCP_PGM);
365 static inline void gen_illegal_opcode(DisasContext *s)
367 gen_program_exception(s, PGM_OPERATION);
370 static inline void gen_trap(DisasContext *s)
374 /* Set DXC to 0xff. */
375 t = tcg_temp_new_i32();
376 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
377 tcg_gen_ori_i32(t, t, 0xff00);
378 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
379 tcg_temp_free_i32(t);
381 gen_program_exception(s, PGM_DATA);
384 #ifndef CONFIG_USER_ONLY
385 static void check_privileged(DisasContext *s)
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
393 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
395 TCGv_i64 tmp = tcg_temp_new_i64();
396 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
398 /* Note that d2 is limited to 20 bits, signed. If we crop negative
399 displacements early we create larger immedate addends. */
401 /* Note that addi optimizes the imm==0 case. */
403 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
404 tcg_gen_addi_i64(tmp, tmp, d2);
406 tcg_gen_addi_i64(tmp, regs[b2], d2);
408 tcg_gen_addi_i64(tmp, regs[x2], d2);
414 tcg_gen_movi_i64(tmp, d2);
417 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
423 static inline bool live_cc_data(DisasContext *s)
425 return (s->cc_op != CC_OP_DYNAMIC
426 && s->cc_op != CC_OP_STATIC
430 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
432 if (live_cc_data(s)) {
433 tcg_gen_discard_i64(cc_src);
434 tcg_gen_discard_i64(cc_dst);
435 tcg_gen_discard_i64(cc_vr);
437 s->cc_op = CC_OP_CONST0 + val;
440 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
442 if (live_cc_data(s)) {
443 tcg_gen_discard_i64(cc_src);
444 tcg_gen_discard_i64(cc_vr);
446 tcg_gen_mov_i64(cc_dst, dst);
450 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
453 if (live_cc_data(s)) {
454 tcg_gen_discard_i64(cc_vr);
456 tcg_gen_mov_i64(cc_src, src);
457 tcg_gen_mov_i64(cc_dst, dst);
461 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
462 TCGv_i64 dst, TCGv_i64 vr)
464 tcg_gen_mov_i64(cc_src, src);
465 tcg_gen_mov_i64(cc_dst, dst);
466 tcg_gen_mov_i64(cc_vr, vr);
470 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
472 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
475 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
477 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
480 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
482 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
485 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
487 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
490 /* CC value is in env->cc_op */
491 static void set_cc_static(DisasContext *s)
493 if (live_cc_data(s)) {
494 tcg_gen_discard_i64(cc_src);
495 tcg_gen_discard_i64(cc_dst);
496 tcg_gen_discard_i64(cc_vr);
498 s->cc_op = CC_OP_STATIC;
501 /* calculates cc into cc_op */
502 static void gen_op_calc_cc(DisasContext *s)
504 TCGv_i32 local_cc_op;
507 TCGV_UNUSED_I32(local_cc_op);
508 TCGV_UNUSED_I64(dummy);
511 dummy = tcg_const_i64(0);
525 local_cc_op = tcg_const_i32(s->cc_op);
541 /* s->cc_op is the cc value */
542 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
545 /* env->cc_op already is the cc value */
560 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
565 case CC_OP_LTUGTU_32:
566 case CC_OP_LTUGTU_64:
573 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
588 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
591 /* unknown operation - assume 3 arguments and cc_op in env */
592 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
598 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
599 tcg_temp_free_i32(local_cc_op);
601 if (!TCGV_IS_UNUSED_I64(dummy)) {
602 tcg_temp_free_i64(dummy);
605 /* We now have cc in cc_op as constant */
609 static int use_goto_tb(DisasContext *s, uint64_t dest)
611 /* NOTE: we handle the case where the TB spans two pages here */
612 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
613 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
614 && !s->singlestep_enabled
615 && !(s->tb->cflags & CF_LAST_IO)
616 && !(s->tb->flags & FLAG_MASK_PER));
619 static void account_noninline_branch(DisasContext *s, int cc_op)
621 #ifdef DEBUG_INLINE_BRANCHES
622 inline_branch_miss[cc_op]++;
626 static void account_inline_branch(DisasContext *s, int cc_op)
628 #ifdef DEBUG_INLINE_BRANCHES
629 inline_branch_hit[cc_op]++;
633 /* Table of mask values to comparison codes, given a comparison as input.
634 For such, CC=3 should not be possible. */
635 static const TCGCond ltgt_cond[16] = {
636 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
637 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
638 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
639 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
640 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
641 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
642 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
643 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
646 /* Table of mask values to comparison codes, given a logic op as input.
647 For such, only CC=0 and CC=1 should be possible. */
648 static const TCGCond nz_cond[16] = {
649 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
650 TCG_COND_NEVER, TCG_COND_NEVER,
651 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
652 TCG_COND_NE, TCG_COND_NE,
653 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
654 TCG_COND_EQ, TCG_COND_EQ,
655 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
656 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
659 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
660 details required to generate a TCG comparison. */
661 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
664 enum cc_op old_cc_op = s->cc_op;
666 if (mask == 15 || mask == 0) {
667 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
670 c->g1 = c->g2 = true;
675 /* Find the TCG condition for the mask + cc op. */
681 cond = ltgt_cond[mask];
682 if (cond == TCG_COND_NEVER) {
685 account_inline_branch(s, old_cc_op);
688 case CC_OP_LTUGTU_32:
689 case CC_OP_LTUGTU_64:
690 cond = tcg_unsigned_cond(ltgt_cond[mask]);
691 if (cond == TCG_COND_NEVER) {
694 account_inline_branch(s, old_cc_op);
698 cond = nz_cond[mask];
699 if (cond == TCG_COND_NEVER) {
702 account_inline_branch(s, old_cc_op);
717 account_inline_branch(s, old_cc_op);
732 account_inline_branch(s, old_cc_op);
736 switch (mask & 0xa) {
737 case 8: /* src == 0 -> no one bit found */
740 case 2: /* src != 0 -> one bit found */
746 account_inline_branch(s, old_cc_op);
752 case 8 | 2: /* vr == 0 */
755 case 4 | 1: /* vr != 0 */
758 case 8 | 4: /* no carry -> vr >= src */
761 case 2 | 1: /* carry -> vr < src */
767 account_inline_branch(s, old_cc_op);
772 /* Note that CC=0 is impossible; treat it as dont-care. */
774 case 2: /* zero -> op1 == op2 */
777 case 4 | 1: /* !zero -> op1 != op2 */
780 case 4: /* borrow (!carry) -> op1 < op2 */
783 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
789 account_inline_branch(s, old_cc_op);
794 /* Calculate cc value. */
799 /* Jump based on CC. We'll load up the real cond below;
800 the assignment here merely avoids a compiler warning. */
801 account_noninline_branch(s, old_cc_op);
802 old_cc_op = CC_OP_STATIC;
803 cond = TCG_COND_NEVER;
807 /* Load up the arguments of the comparison. */
809 c->g1 = c->g2 = false;
813 c->u.s32.a = tcg_temp_new_i32();
814 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
815 c->u.s32.b = tcg_const_i32(0);
818 case CC_OP_LTUGTU_32:
821 c->u.s32.a = tcg_temp_new_i32();
822 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
823 c->u.s32.b = tcg_temp_new_i32();
824 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
831 c->u.s64.b = tcg_const_i64(0);
835 case CC_OP_LTUGTU_64:
839 c->g1 = c->g2 = true;
845 c->u.s64.a = tcg_temp_new_i64();
846 c->u.s64.b = tcg_const_i64(0);
847 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
852 c->u.s32.a = tcg_temp_new_i32();
853 c->u.s32.b = tcg_temp_new_i32();
854 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
855 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
856 tcg_gen_movi_i32(c->u.s32.b, 0);
858 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
865 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
866 c->u.s64.b = tcg_const_i64(0);
878 case 0x8 | 0x4 | 0x2: /* cc != 3 */
880 c->u.s32.b = tcg_const_i32(3);
882 case 0x8 | 0x4 | 0x1: /* cc != 2 */
884 c->u.s32.b = tcg_const_i32(2);
886 case 0x8 | 0x2 | 0x1: /* cc != 1 */
888 c->u.s32.b = tcg_const_i32(1);
890 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
893 c->u.s32.a = tcg_temp_new_i32();
894 c->u.s32.b = tcg_const_i32(0);
895 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
897 case 0x8 | 0x4: /* cc < 2 */
899 c->u.s32.b = tcg_const_i32(2);
901 case 0x8: /* cc == 0 */
903 c->u.s32.b = tcg_const_i32(0);
905 case 0x4 | 0x2 | 0x1: /* cc != 0 */
907 c->u.s32.b = tcg_const_i32(0);
909 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
912 c->u.s32.a = tcg_temp_new_i32();
913 c->u.s32.b = tcg_const_i32(0);
914 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
916 case 0x4: /* cc == 1 */
918 c->u.s32.b = tcg_const_i32(1);
920 case 0x2 | 0x1: /* cc > 1 */
922 c->u.s32.b = tcg_const_i32(1);
924 case 0x2: /* cc == 2 */
926 c->u.s32.b = tcg_const_i32(2);
928 case 0x1: /* cc == 3 */
930 c->u.s32.b = tcg_const_i32(3);
933 /* CC is masked by something else: (8 >> cc) & mask. */
936 c->u.s32.a = tcg_const_i32(8);
937 c->u.s32.b = tcg_const_i32(0);
938 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
939 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
950 static void free_compare(DisasCompare *c)
954 tcg_temp_free_i64(c->u.s64.a);
956 tcg_temp_free_i32(c->u.s32.a);
961 tcg_temp_free_i64(c->u.s64.b);
963 tcg_temp_free_i32(c->u.s32.b);
968 /* ====================================================================== */
969 /* Define the insn format enumeration. */
970 #define F0(N) FMT_##N,
971 #define F1(N, X1) F0(N)
972 #define F2(N, X1, X2) F0(N)
973 #define F3(N, X1, X2, X3) F0(N)
974 #define F4(N, X1, X2, X3, X4) F0(N)
975 #define F5(N, X1, X2, X3, X4, X5) F0(N)
978 #include "insn-format.def"
988 /* Define a structure to hold the decoded fields. We'll store each inside
989 an array indexed by an enum. In order to conserve memory, we'll arrange
990 for fields that do not exist at the same time to overlap, thus the "C"
991 for compact. For checking purposes there is an "O" for original index
992 as well that will be applied to availability bitmaps. */
994 enum DisasFieldIndexO {
1017 enum DisasFieldIndexC {
1048 struct DisasFields {
1052 unsigned presentC:16;
1053 unsigned int presentO;
1057 /* This is the way fields are to be accessed out of DisasFields. */
1058 #define have_field(S, F) have_field1((S), FLD_O_##F)
1059 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1061 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1063 return (f->presentO >> c) & 1;
1066 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1067 enum DisasFieldIndexC c)
1069 assert(have_field1(f, o));
1073 /* Describe the layout of each field in each format. */
1074 typedef struct DisasField {
1076 unsigned int size:8;
1077 unsigned int type:2;
1078 unsigned int indexC:6;
1079 enum DisasFieldIndexO indexO:8;
1082 typedef struct DisasFormatInfo {
1083 DisasField op[NUM_C_FIELD];
1086 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1087 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1088 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1089 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1090 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1091 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1092 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1093 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1094 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1095 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1096 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1097 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1098 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1099 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1101 #define F0(N) { { } },
1102 #define F1(N, X1) { { X1 } },
1103 #define F2(N, X1, X2) { { X1, X2 } },
1104 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1105 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1106 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1108 static const DisasFormatInfo format_info[] = {
1109 #include "insn-format.def"
1127 /* Generally, we'll extract operands into this structures, operate upon
1128 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1129 of routines below for more details. */
1131 bool g_out, g_out2, g_in1, g_in2;
1132 TCGv_i64 out, out2, in1, in2;
1136 /* Instructions can place constraints on their operands, raising specification
1137 exceptions if they are violated. To make this easy to automate, each "in1",
1138 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1139 of the following, or 0. To make this easy to document, we'll put the
1140 SPEC_<name> defines next to <name>. */
1142 #define SPEC_r1_even 1
1143 #define SPEC_r2_even 2
1144 #define SPEC_r3_even 4
1145 #define SPEC_r1_f128 8
1146 #define SPEC_r2_f128 16
1148 /* Return values from translate_one, indicating the state of the TB. */
1150 /* Continue the TB. */
1152 /* We have emitted one or more goto_tb. No fixup required. */
1154 /* We are not using a goto_tb (for whatever reason), but have updated
1155 the PC (for whatever reason), so there's no need to do it again on
1158 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1159 updated the PC for the next instruction to be executed. */
1161 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1162 No following code will be executed. */
1166 typedef enum DisasFacility {
1167 FAC_Z, /* zarch (default) */
1168 FAC_CASS, /* compare and swap and store */
1169 FAC_CASS2, /* compare and swap and store 2*/
1170 FAC_DFP, /* decimal floating point */
1171 FAC_DFPR, /* decimal floating point rounding */
1172 FAC_DO, /* distinct operands */
1173 FAC_EE, /* execute extensions */
1174 FAC_EI, /* extended immediate */
1175 FAC_FPE, /* floating point extension */
1176 FAC_FPSSH, /* floating point support sign handling */
1177 FAC_FPRGR, /* FPR-GR transfer */
1178 FAC_GIE, /* general instructions extension */
1179 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1180 FAC_HW, /* high-word */
1181 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1182 FAC_MIE, /* miscellaneous-instruction-extensions */
1183 FAC_LAT, /* load-and-trap */
1184 FAC_LOC, /* load/store on condition */
1185 FAC_LD, /* long displacement */
1186 FAC_PC, /* population count */
1187 FAC_SCF, /* store clock fast */
1188 FAC_SFLE, /* store facility list extended */
1189 FAC_ILA, /* interlocked access facility 1 */
1195 DisasFacility fac:8;
1200 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1201 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1202 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1203 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1204 void (*help_cout)(DisasContext *, DisasOps *);
1205 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1210 /* ====================================================================== */
1211 /* Miscellaneous helpers, used by several operations. */
1213 static void help_l2_shift(DisasContext *s, DisasFields *f,
1214 DisasOps *o, int mask)
1216 int b2 = get_field(f, b2);
1217 int d2 = get_field(f, d2);
1220 o->in2 = tcg_const_i64(d2 & mask);
1222 o->in2 = get_address(s, 0, b2, d2);
1223 tcg_gen_andi_i64(o->in2, o->in2, mask);
1227 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1229 if (dest == s->next_pc) {
1230 per_branch(s, true);
1233 if (use_goto_tb(s, dest)) {
1235 per_breaking_event(s);
1237 tcg_gen_movi_i64(psw_addr, dest);
1238 tcg_gen_exit_tb((uintptr_t)s->tb);
1239 return EXIT_GOTO_TB;
1241 tcg_gen_movi_i64(psw_addr, dest);
1242 per_branch(s, false);
1243 return EXIT_PC_UPDATED;
1247 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1248 bool is_imm, int imm, TCGv_i64 cdest)
1251 uint64_t dest = s->pc + 2 * imm;
1254 /* Take care of the special cases first. */
1255 if (c->cond == TCG_COND_NEVER) {
1260 if (dest == s->next_pc) {
1261 /* Branch to next. */
1262 per_branch(s, true);
1266 if (c->cond == TCG_COND_ALWAYS) {
1267 ret = help_goto_direct(s, dest);
1271 if (TCGV_IS_UNUSED_I64(cdest)) {
1272 /* E.g. bcr %r0 -> no branch. */
1276 if (c->cond == TCG_COND_ALWAYS) {
1277 tcg_gen_mov_i64(psw_addr, cdest);
1278 per_branch(s, false);
1279 ret = EXIT_PC_UPDATED;
1284 if (use_goto_tb(s, s->next_pc)) {
1285 if (is_imm && use_goto_tb(s, dest)) {
1286 /* Both exits can use goto_tb. */
1289 lab = gen_new_label();
1291 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1293 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1296 /* Branch not taken. */
1298 tcg_gen_movi_i64(psw_addr, s->next_pc);
1299 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1303 per_breaking_event(s);
1305 tcg_gen_movi_i64(psw_addr, dest);
1306 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1310 /* Fallthru can use goto_tb, but taken branch cannot. */
1311 /* Store taken branch destination before the brcond. This
1312 avoids having to allocate a new local temp to hold it.
1313 We'll overwrite this in the not taken case anyway. */
1315 tcg_gen_mov_i64(psw_addr, cdest);
1318 lab = gen_new_label();
1320 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1322 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1325 /* Branch not taken. */
1328 tcg_gen_movi_i64(psw_addr, s->next_pc);
1329 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1333 tcg_gen_movi_i64(psw_addr, dest);
1335 per_breaking_event(s);
1336 ret = EXIT_PC_UPDATED;
1339 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1340 Most commonly we're single-stepping or some other condition that
1341 disables all use of goto_tb. Just update the PC and exit. */
1343 TCGv_i64 next = tcg_const_i64(s->next_pc);
1345 cdest = tcg_const_i64(dest);
1349 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1351 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1353 TCGv_i32 t0 = tcg_temp_new_i32();
1354 TCGv_i64 t1 = tcg_temp_new_i64();
1355 TCGv_i64 z = tcg_const_i64(0);
1356 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1357 tcg_gen_extu_i32_i64(t1, t0);
1358 tcg_temp_free_i32(t0);
1359 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1360 per_branch_cond(s, TCG_COND_NE, t1, z);
1361 tcg_temp_free_i64(t1);
1362 tcg_temp_free_i64(z);
1366 tcg_temp_free_i64(cdest);
1368 tcg_temp_free_i64(next);
1370 ret = EXIT_PC_UPDATED;
1378 /* ====================================================================== */
1379 /* The operations. These perform the bulk of the work for any insn,
1380 usually after the operands have been loaded and output initialized. */
1382 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1385 z = tcg_const_i64(0);
1386 n = tcg_temp_new_i64();
1387 tcg_gen_neg_i64(n, o->in2);
1388 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1389 tcg_temp_free_i64(n);
1390 tcg_temp_free_i64(z);
1394 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1396 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1400 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1402 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1406 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1408 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1409 tcg_gen_mov_i64(o->out2, o->in2);
1413 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1415 tcg_gen_add_i64(o->out, o->in1, o->in2);
1419 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1424 tcg_gen_add_i64(o->out, o->in1, o->in2);
1426 /* The carry flag is the msb of CC, therefore the branch mask that would
1427 create that comparison is 3. Feeding the generated comparison to
1428 setcond produces the carry flag that we desire. */
1429 disas_jcc(s, &cmp, 3);
1430 carry = tcg_temp_new_i64();
1432 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1434 TCGv_i32 t = tcg_temp_new_i32();
1435 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1436 tcg_gen_extu_i32_i64(carry, t);
1437 tcg_temp_free_i32(t);
1441 tcg_gen_add_i64(o->out, o->out, carry);
1442 tcg_temp_free_i64(carry);
1446 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1448 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1452 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1454 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1458 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1460 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1461 return_low128(o->out2);
1465 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1467 tcg_gen_and_i64(o->out, o->in1, o->in2);
1471 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1473 int shift = s->insn->data & 0xff;
1474 int size = s->insn->data >> 8;
1475 uint64_t mask = ((1ull << size) - 1) << shift;
1478 tcg_gen_shli_i64(o->in2, o->in2, shift);
1479 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1480 tcg_gen_and_i64(o->out, o->in1, o->in2);
1482 /* Produce the CC from only the bits manipulated. */
1483 tcg_gen_andi_i64(cc_dst, o->out, mask);
1484 set_cc_nz_u64(s, cc_dst);
1488 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1490 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1491 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1492 tcg_gen_mov_i64(psw_addr, o->in2);
1493 per_branch(s, false);
1494 return EXIT_PC_UPDATED;
1500 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1502 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1503 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1506 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1508 int m1 = get_field(s->fields, m1);
1509 bool is_imm = have_field(s->fields, i2);
1510 int imm = is_imm ? get_field(s->fields, i2) : 0;
1513 disas_jcc(s, &c, m1);
1514 return help_branch(s, &c, is_imm, imm, o->in2);
1517 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1519 int r1 = get_field(s->fields, r1);
1520 bool is_imm = have_field(s->fields, i2);
1521 int imm = is_imm ? get_field(s->fields, i2) : 0;
1525 c.cond = TCG_COND_NE;
1530 t = tcg_temp_new_i64();
1531 tcg_gen_subi_i64(t, regs[r1], 1);
1532 store_reg32_i64(r1, t);
1533 c.u.s32.a = tcg_temp_new_i32();
1534 c.u.s32.b = tcg_const_i32(0);
1535 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1536 tcg_temp_free_i64(t);
1538 return help_branch(s, &c, is_imm, imm, o->in2);
1541 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1543 int r1 = get_field(s->fields, r1);
1544 int imm = get_field(s->fields, i2);
1548 c.cond = TCG_COND_NE;
1553 t = tcg_temp_new_i64();
1554 tcg_gen_shri_i64(t, regs[r1], 32);
1555 tcg_gen_subi_i64(t, t, 1);
1556 store_reg32h_i64(r1, t);
1557 c.u.s32.a = tcg_temp_new_i32();
1558 c.u.s32.b = tcg_const_i32(0);
1559 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1560 tcg_temp_free_i64(t);
1562 return help_branch(s, &c, 1, imm, o->in2);
1565 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1567 int r1 = get_field(s->fields, r1);
1568 bool is_imm = have_field(s->fields, i2);
1569 int imm = is_imm ? get_field(s->fields, i2) : 0;
1572 c.cond = TCG_COND_NE;
1577 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1578 c.u.s64.a = regs[r1];
1579 c.u.s64.b = tcg_const_i64(0);
1581 return help_branch(s, &c, is_imm, imm, o->in2);
1584 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1586 int r1 = get_field(s->fields, r1);
1587 int r3 = get_field(s->fields, r3);
1588 bool is_imm = have_field(s->fields, i2);
1589 int imm = is_imm ? get_field(s->fields, i2) : 0;
1593 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1598 t = tcg_temp_new_i64();
1599 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1600 c.u.s32.a = tcg_temp_new_i32();
1601 c.u.s32.b = tcg_temp_new_i32();
1602 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1603 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1604 store_reg32_i64(r1, t);
1605 tcg_temp_free_i64(t);
1607 return help_branch(s, &c, is_imm, imm, o->in2);
1610 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1612 int r1 = get_field(s->fields, r1);
1613 int r3 = get_field(s->fields, r3);
1614 bool is_imm = have_field(s->fields, i2);
1615 int imm = is_imm ? get_field(s->fields, i2) : 0;
1618 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1621 if (r1 == (r3 | 1)) {
1622 c.u.s64.b = load_reg(r3 | 1);
1625 c.u.s64.b = regs[r3 | 1];
1629 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1630 c.u.s64.a = regs[r1];
1633 return help_branch(s, &c, is_imm, imm, o->in2);
1636 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1638 int imm, m3 = get_field(s->fields, m3);
1642 c.cond = ltgt_cond[m3];
1643 if (s->insn->data) {
1644 c.cond = tcg_unsigned_cond(c.cond);
1646 c.is_64 = c.g1 = c.g2 = true;
1650 is_imm = have_field(s->fields, i4);
1652 imm = get_field(s->fields, i4);
1655 o->out = get_address(s, 0, get_field(s->fields, b4),
1656 get_field(s->fields, d4));
1659 return help_branch(s, &c, is_imm, imm, o->out);
1662 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1664 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1669 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1671 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1676 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1678 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1683 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1685 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1686 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1687 tcg_temp_free_i32(m3);
1688 gen_set_cc_nz_f32(s, o->in2);
1692 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1694 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1695 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1696 tcg_temp_free_i32(m3);
1697 gen_set_cc_nz_f64(s, o->in2);
1701 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1703 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1704 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1705 tcg_temp_free_i32(m3);
1706 gen_set_cc_nz_f128(s, o->in1, o->in2);
1710 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1712 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1713 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1714 tcg_temp_free_i32(m3);
1715 gen_set_cc_nz_f32(s, o->in2);
1719 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1721 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1722 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1723 tcg_temp_free_i32(m3);
1724 gen_set_cc_nz_f64(s, o->in2);
1728 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1730 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1731 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1732 tcg_temp_free_i32(m3);
1733 gen_set_cc_nz_f128(s, o->in1, o->in2);
1737 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1739 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1740 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1741 tcg_temp_free_i32(m3);
1742 gen_set_cc_nz_f32(s, o->in2);
1746 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1748 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1749 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1750 tcg_temp_free_i32(m3);
1751 gen_set_cc_nz_f64(s, o->in2);
1755 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1757 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1758 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1759 tcg_temp_free_i32(m3);
1760 gen_set_cc_nz_f128(s, o->in1, o->in2);
1764 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1766 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1767 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1768 tcg_temp_free_i32(m3);
1769 gen_set_cc_nz_f32(s, o->in2);
1773 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1775 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1776 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1777 tcg_temp_free_i32(m3);
1778 gen_set_cc_nz_f64(s, o->in2);
1782 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1784 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1785 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1786 tcg_temp_free_i32(m3);
1787 gen_set_cc_nz_f128(s, o->in1, o->in2);
1791 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1793 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1794 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1795 tcg_temp_free_i32(m3);
1799 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1801 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1802 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1803 tcg_temp_free_i32(m3);
1807 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1809 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1810 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1811 tcg_temp_free_i32(m3);
1812 return_low128(o->out2);
1816 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1818 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1819 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1820 tcg_temp_free_i32(m3);
1824 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1826 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1827 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1828 tcg_temp_free_i32(m3);
1832 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1834 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1835 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1836 tcg_temp_free_i32(m3);
1837 return_low128(o->out2);
1841 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1843 int r2 = get_field(s->fields, r2);
1844 TCGv_i64 len = tcg_temp_new_i64();
1846 potential_page_fault(s);
1847 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1849 return_low128(o->out);
1851 tcg_gen_add_i64(regs[r2], regs[r2], len);
1852 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1853 tcg_temp_free_i64(len);
1858 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1860 int l = get_field(s->fields, l1);
1865 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1866 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1869 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1870 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1873 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1874 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1877 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1878 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1881 potential_page_fault(s);
1882 vl = tcg_const_i32(l);
1883 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1884 tcg_temp_free_i32(vl);
1888 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1892 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1894 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1895 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1896 potential_page_fault(s);
1897 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1898 tcg_temp_free_i32(r1);
1899 tcg_temp_free_i32(r3);
1904 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1906 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1907 TCGv_i32 t1 = tcg_temp_new_i32();
1908 tcg_gen_trunc_i64_i32(t1, o->in1);
1909 potential_page_fault(s);
1910 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1912 tcg_temp_free_i32(t1);
1913 tcg_temp_free_i32(m3);
1917 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1919 potential_page_fault(s);
1920 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1922 return_low128(o->in2);
1926 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1928 TCGv_i64 t = tcg_temp_new_i64();
1929 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1930 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1931 tcg_gen_or_i64(o->out, o->out, t);
1932 tcg_temp_free_i64(t);
1936 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1938 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1939 int d2 = get_field(s->fields, d2);
1940 int b2 = get_field(s->fields, b2);
1941 int is_64 = s->insn->data;
1942 TCGv_i64 addr, mem, cc, z;
1944 /* Note that in1 = R3 (new value) and
1945 in2 = (zero-extended) R1 (expected value). */
1947 /* Load the memory into the (temporary) output. While the PoO only talks
1948 about moving the memory to R1 on inequality, if we include equality it
1949 means that R1 is equal to the memory in all conditions. */
1950 addr = get_address(s, 0, b2, d2);
1952 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1954 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1957 /* Are the memory and expected values (un)equal? Note that this setcond
1958 produces the output CC value, thus the NE sense of the test. */
1959 cc = tcg_temp_new_i64();
1960 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1962 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1963 Recall that we are allowed to unconditionally issue the store (and
1964 thus any possible write trap), so (re-)store the original contents
1965 of MEM in case of inequality. */
1966 z = tcg_const_i64(0);
1967 mem = tcg_temp_new_i64();
1968 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1970 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1972 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1974 tcg_temp_free_i64(z);
1975 tcg_temp_free_i64(mem);
1976 tcg_temp_free_i64(addr);
1978 /* Store CC back to cc_op. Wait until after the store so that any
1979 exception gets the old cc_op value. */
1980 tcg_gen_trunc_i64_i32(cc_op, cc);
1981 tcg_temp_free_i64(cc);
1986 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1988 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1989 int r1 = get_field(s->fields, r1);
1990 int r3 = get_field(s->fields, r3);
1991 int d2 = get_field(s->fields, d2);
1992 int b2 = get_field(s->fields, b2);
1993 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1995 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1997 addrh = get_address(s, 0, b2, d2);
1998 addrl = get_address(s, 0, b2, d2 + 8);
1999 outh = tcg_temp_new_i64();
2000 outl = tcg_temp_new_i64();
2002 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
2003 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
2005 /* Fold the double-word compare with arithmetic. */
2006 cc = tcg_temp_new_i64();
2007 z = tcg_temp_new_i64();
2008 tcg_gen_xor_i64(cc, outh, regs[r1]);
2009 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
2010 tcg_gen_or_i64(cc, cc, z);
2011 tcg_gen_movi_i64(z, 0);
2012 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
2014 memh = tcg_temp_new_i64();
2015 meml = tcg_temp_new_i64();
2016 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
2017 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
2018 tcg_temp_free_i64(z);
2020 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
2021 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
2022 tcg_temp_free_i64(memh);
2023 tcg_temp_free_i64(meml);
2024 tcg_temp_free_i64(addrh);
2025 tcg_temp_free_i64(addrl);
2027 /* Save back state now that we've passed all exceptions. */
2028 tcg_gen_mov_i64(regs[r1], outh);
2029 tcg_gen_mov_i64(regs[r1 + 1], outl);
2030 tcg_gen_trunc_i64_i32(cc_op, cc);
2031 tcg_temp_free_i64(outh);
2032 tcg_temp_free_i64(outl);
2033 tcg_temp_free_i64(cc);
2038 #ifndef CONFIG_USER_ONLY
2039 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2041 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2042 check_privileged(s);
2043 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2044 tcg_temp_free_i32(r1);
2050 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2052 TCGv_i64 t1 = tcg_temp_new_i64();
2053 TCGv_i32 t2 = tcg_temp_new_i32();
2054 tcg_gen_trunc_i64_i32(t2, o->in1);
2055 gen_helper_cvd(t1, t2);
2056 tcg_temp_free_i32(t2);
2057 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2058 tcg_temp_free_i64(t1);
2062 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2064 int m3 = get_field(s->fields, m3);
2065 TCGLabel *lab = gen_new_label();
2068 c = tcg_invert_cond(ltgt_cond[m3]);
2069 if (s->insn->data) {
2070 c = tcg_unsigned_cond(c);
2072 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2081 #ifndef CONFIG_USER_ONLY
2082 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2084 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2085 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2086 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2088 check_privileged(s);
2092 gen_helper_diag(cpu_env, r1, r3, func_code);
2094 tcg_temp_free_i32(func_code);
2095 tcg_temp_free_i32(r3);
2096 tcg_temp_free_i32(r1);
2101 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2103 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2104 return_low128(o->out);
2108 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2110 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2111 return_low128(o->out);
2115 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2117 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2118 return_low128(o->out);
2122 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2124 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2125 return_low128(o->out);
2129 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2131 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2135 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2137 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2141 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2143 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2144 return_low128(o->out2);
2148 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2150 int r2 = get_field(s->fields, r2);
2151 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2155 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2157 /* No cache information provided. */
2158 tcg_gen_movi_i64(o->out, -1);
2162 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2164 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2168 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2170 int r1 = get_field(s->fields, r1);
2171 int r2 = get_field(s->fields, r2);
2172 TCGv_i64 t = tcg_temp_new_i64();
2174 /* Note the "subsequently" in the PoO, which implies a defined result
2175 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2176 tcg_gen_shri_i64(t, psw_mask, 32);
2177 store_reg32_i64(r1, t);
2179 store_reg32_i64(r2, psw_mask);
2182 tcg_temp_free_i64(t);
2186 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2188 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2189 tb->flags, (ab)use the tb->cs_base field as the address of
2190 the template in memory, and grab 8 bits of tb->flags/cflags for
2191 the contents of the register. We would then recognize all this
2192 in gen_intermediate_code_internal, generating code for exactly
2193 one instruction. This new TB then gets executed normally.
2195 On the other hand, this seems to be mostly used for modifying
2196 MVC inside of memcpy, which needs a helper call anyway. So
2197 perhaps this doesn't bear thinking about any further. */
2204 tmp = tcg_const_i64(s->next_pc);
2205 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2206 tcg_temp_free_i64(tmp);
2211 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2213 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2214 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2215 tcg_temp_free_i32(m3);
2219 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2221 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2222 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2223 tcg_temp_free_i32(m3);
2227 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2229 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2230 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2231 return_low128(o->out2);
2232 tcg_temp_free_i32(m3);
2236 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2238 /* We'll use the original input for cc computation, since we get to
2239 compare that against 0, which ought to be better than comparing
2240 the real output against 64. It also lets cc_dst be a convenient
2241 temporary during our computation. */
2242 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2244 /* R1 = IN ? CLZ(IN) : 64. */
2245 gen_helper_clz(o->out, o->in2);
2247 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2248 value by 64, which is undefined. But since the shift is 64 iff the
2249 input is zero, we still get the correct result after and'ing. */
2250 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2251 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2252 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2256 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2258 int m3 = get_field(s->fields, m3);
2259 int pos, len, base = s->insn->data;
2260 TCGv_i64 tmp = tcg_temp_new_i64();
2265 /* Effectively a 32-bit load. */
2266 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2273 /* Effectively a 16-bit load. */
2274 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2282 /* Effectively an 8-bit load. */
2283 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2288 pos = base + ctz32(m3) * 8;
2289 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2290 ccm = ((1ull << len) - 1) << pos;
2294 /* This is going to be a sequence of loads and inserts. */
2295 pos = base + 32 - 8;
2299 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2300 tcg_gen_addi_i64(o->in2, o->in2, 1);
2301 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2304 m3 = (m3 << 1) & 0xf;
2310 tcg_gen_movi_i64(tmp, ccm);
2311 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2312 tcg_temp_free_i64(tmp);
2316 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2318 int shift = s->insn->data & 0xff;
2319 int size = s->insn->data >> 8;
2320 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2324 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2329 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2331 t1 = tcg_temp_new_i64();
2332 tcg_gen_shli_i64(t1, psw_mask, 20);
2333 tcg_gen_shri_i64(t1, t1, 36);
2334 tcg_gen_or_i64(o->out, o->out, t1);
2336 tcg_gen_extu_i32_i64(t1, cc_op);
2337 tcg_gen_shli_i64(t1, t1, 28);
2338 tcg_gen_or_i64(o->out, o->out, t1);
2339 tcg_temp_free_i64(t1);
2343 #ifndef CONFIG_USER_ONLY
2344 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2346 check_privileged(s);
2347 gen_helper_ipte(cpu_env, o->in1, o->in2);
2351 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2353 check_privileged(s);
2354 gen_helper_iske(o->out, cpu_env, o->in2);
2359 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2361 gen_helper_ldeb(o->out, cpu_env, o->in2);
2365 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2367 gen_helper_ledb(o->out, cpu_env, o->in2);
2371 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2373 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2377 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2379 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2383 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2385 gen_helper_lxdb(o->out, cpu_env, o->in2);
2386 return_low128(o->out2);
2390 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2392 gen_helper_lxeb(o->out, cpu_env, o->in2);
2393 return_low128(o->out2);
2397 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2399 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2403 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2405 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2409 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2411 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2415 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2417 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2421 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2423 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2427 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2429 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2433 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2435 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2439 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2441 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2445 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2447 TCGLabel *lab = gen_new_label();
2448 store_reg32_i64(get_field(s->fields, r1), o->in2);
2449 /* The value is stored even in case of trap. */
2450 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2456 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2458 TCGLabel *lab = gen_new_label();
2459 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2460 /* The value is stored even in case of trap. */
2461 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2467 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2469 TCGLabel *lab = gen_new_label();
2470 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2471 /* The value is stored even in case of trap. */
2472 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2478 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2480 TCGLabel *lab = gen_new_label();
2481 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2482 /* The value is stored even in case of trap. */
2483 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2489 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2491 TCGLabel *lab = gen_new_label();
2492 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2493 /* The value is stored even in case of trap. */
2494 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2500 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2504 disas_jcc(s, &c, get_field(s->fields, m3));
2507 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2511 TCGv_i32 t32 = tcg_temp_new_i32();
2514 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2517 t = tcg_temp_new_i64();
2518 tcg_gen_extu_i32_i64(t, t32);
2519 tcg_temp_free_i32(t32);
2521 z = tcg_const_i64(0);
2522 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2523 tcg_temp_free_i64(t);
2524 tcg_temp_free_i64(z);
2530 #ifndef CONFIG_USER_ONLY
2531 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2533 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2534 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2535 check_privileged(s);
2536 potential_page_fault(s);
2537 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2538 tcg_temp_free_i32(r1);
2539 tcg_temp_free_i32(r3);
2543 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2545 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2546 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2547 check_privileged(s);
2548 potential_page_fault(s);
2549 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2550 tcg_temp_free_i32(r1);
2551 tcg_temp_free_i32(r3);
2554 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2556 check_privileged(s);
2557 potential_page_fault(s);
2558 gen_helper_lra(o->out, cpu_env, o->in2);
2563 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2567 check_privileged(s);
2568 per_breaking_event(s);
2570 t1 = tcg_temp_new_i64();
2571 t2 = tcg_temp_new_i64();
2572 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2573 tcg_gen_addi_i64(o->in2, o->in2, 4);
2574 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2575 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2576 tcg_gen_shli_i64(t1, t1, 32);
2577 gen_helper_load_psw(cpu_env, t1, t2);
2578 tcg_temp_free_i64(t1);
2579 tcg_temp_free_i64(t2);
2580 return EXIT_NORETURN;
2583 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2587 check_privileged(s);
2588 per_breaking_event(s);
2590 t1 = tcg_temp_new_i64();
2591 t2 = tcg_temp_new_i64();
2592 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2593 tcg_gen_addi_i64(o->in2, o->in2, 8);
2594 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2595 gen_helper_load_psw(cpu_env, t1, t2);
2596 tcg_temp_free_i64(t1);
2597 tcg_temp_free_i64(t2);
2598 return EXIT_NORETURN;
2602 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2604 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2605 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2606 potential_page_fault(s);
2607 gen_helper_lam(cpu_env, r1, o->in2, r3);
2608 tcg_temp_free_i32(r1);
2609 tcg_temp_free_i32(r3);
2613 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2615 int r1 = get_field(s->fields, r1);
2616 int r3 = get_field(s->fields, r3);
2619 /* Only one register to read. */
2620 t1 = tcg_temp_new_i64();
2621 if (unlikely(r1 == r3)) {
2622 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2623 store_reg32_i64(r1, t1);
2628 /* First load the values of the first and last registers to trigger
2629 possible page faults. */
2630 t2 = tcg_temp_new_i64();
2631 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2632 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2633 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2634 store_reg32_i64(r1, t1);
2635 store_reg32_i64(r3, t2);
2637 /* Only two registers to read. */
2638 if (((r1 + 1) & 15) == r3) {
2644 /* Then load the remaining registers. Page fault can't occur. */
2646 tcg_gen_movi_i64(t2, 4);
2649 tcg_gen_add_i64(o->in2, o->in2, t2);
2650 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2651 store_reg32_i64(r1, t1);
2659 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2661 int r1 = get_field(s->fields, r1);
2662 int r3 = get_field(s->fields, r3);
2665 /* Only one register to read. */
2666 t1 = tcg_temp_new_i64();
2667 if (unlikely(r1 == r3)) {
2668 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2669 store_reg32h_i64(r1, t1);
2674 /* First load the values of the first and last registers to trigger
2675 possible page faults. */
2676 t2 = tcg_temp_new_i64();
2677 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2678 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2679 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2680 store_reg32h_i64(r1, t1);
2681 store_reg32h_i64(r3, t2);
2683 /* Only two registers to read. */
2684 if (((r1 + 1) & 15) == r3) {
2690 /* Then load the remaining registers. Page fault can't occur. */
2692 tcg_gen_movi_i64(t2, 4);
2695 tcg_gen_add_i64(o->in2, o->in2, t2);
2696 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2697 store_reg32h_i64(r1, t1);
2705 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2707 int r1 = get_field(s->fields, r1);
2708 int r3 = get_field(s->fields, r3);
2711 /* Only one register to read. */
2712 if (unlikely(r1 == r3)) {
2713 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2717 /* First load the values of the first and last registers to trigger
2718 possible page faults. */
2719 t1 = tcg_temp_new_i64();
2720 t2 = tcg_temp_new_i64();
2721 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2722 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2723 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2724 tcg_gen_mov_i64(regs[r1], t1);
2727 /* Only two registers to read. */
2728 if (((r1 + 1) & 15) == r3) {
2733 /* Then load the remaining registers. Page fault can't occur. */
2735 tcg_gen_movi_i64(t1, 8);
2738 tcg_gen_add_i64(o->in2, o->in2, t1);
2739 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2746 #ifndef CONFIG_USER_ONLY
2747 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2749 check_privileged(s);
2750 potential_page_fault(s);
2751 gen_helper_lura(o->out, cpu_env, o->in2);
2755 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2757 check_privileged(s);
2758 potential_page_fault(s);
2759 gen_helper_lurag(o->out, cpu_env, o->in2);
2764 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2767 o->g_out = o->g_in2;
2768 TCGV_UNUSED_I64(o->in2);
2773 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2775 int b2 = get_field(s->fields, b2);
2776 TCGv ar1 = tcg_temp_new_i64();
2779 o->g_out = o->g_in2;
2780 TCGV_UNUSED_I64(o->in2);
2783 switch (s->tb->flags & FLAG_MASK_ASC) {
2784 case PSW_ASC_PRIMARY >> 32:
2785 tcg_gen_movi_i64(ar1, 0);
2787 case PSW_ASC_ACCREG >> 32:
2788 tcg_gen_movi_i64(ar1, 1);
2790 case PSW_ASC_SECONDARY >> 32:
2792 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2794 tcg_gen_movi_i64(ar1, 0);
2797 case PSW_ASC_HOME >> 32:
2798 tcg_gen_movi_i64(ar1, 2);
2802 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2803 tcg_temp_free_i64(ar1);
2808 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2812 o->g_out = o->g_in1;
2813 o->g_out2 = o->g_in2;
2814 TCGV_UNUSED_I64(o->in1);
2815 TCGV_UNUSED_I64(o->in2);
2816 o->g_in1 = o->g_in2 = false;
2820 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2822 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2823 potential_page_fault(s);
2824 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2825 tcg_temp_free_i32(l);
2829 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2831 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2832 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2833 potential_page_fault(s);
2834 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2835 tcg_temp_free_i32(r1);
2836 tcg_temp_free_i32(r2);
2841 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2843 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2844 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2845 potential_page_fault(s);
2846 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2847 tcg_temp_free_i32(r1);
2848 tcg_temp_free_i32(r3);
2853 #ifndef CONFIG_USER_ONLY
2854 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2856 int r1 = get_field(s->fields, l1);
2857 check_privileged(s);
2858 potential_page_fault(s);
2859 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2864 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2866 int r1 = get_field(s->fields, l1);
2867 check_privileged(s);
2868 potential_page_fault(s);
2869 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2875 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2877 potential_page_fault(s);
2878 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2883 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2885 potential_page_fault(s);
2886 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2888 return_low128(o->in2);
2892 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2894 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2898 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2900 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2904 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2906 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2910 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2912 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2916 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2918 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2922 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2924 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2925 return_low128(o->out2);
2929 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2931 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2932 return_low128(o->out2);
2936 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2938 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2939 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2940 tcg_temp_free_i64(r3);
2944 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2946 int r3 = get_field(s->fields, r3);
2947 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2951 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2953 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2954 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2955 tcg_temp_free_i64(r3);
2959 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2961 int r3 = get_field(s->fields, r3);
2962 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2966 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2969 z = tcg_const_i64(0);
2970 n = tcg_temp_new_i64();
2971 tcg_gen_neg_i64(n, o->in2);
2972 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2973 tcg_temp_free_i64(n);
2974 tcg_temp_free_i64(z);
2978 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2980 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2984 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2986 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2990 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2992 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2993 tcg_gen_mov_i64(o->out2, o->in2);
2997 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2999 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3000 potential_page_fault(s);
3001 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3002 tcg_temp_free_i32(l);
3007 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3009 tcg_gen_neg_i64(o->out, o->in2);
3013 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3015 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3019 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3021 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3025 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3027 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3028 tcg_gen_mov_i64(o->out2, o->in2);
3032 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3034 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3035 potential_page_fault(s);
3036 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3037 tcg_temp_free_i32(l);
3042 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3044 tcg_gen_or_i64(o->out, o->in1, o->in2);
3048 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3050 int shift = s->insn->data & 0xff;
3051 int size = s->insn->data >> 8;
3052 uint64_t mask = ((1ull << size) - 1) << shift;
3055 tcg_gen_shli_i64(o->in2, o->in2, shift);
3056 tcg_gen_or_i64(o->out, o->in1, o->in2);
3058 /* Produce the CC from only the bits manipulated. */
3059 tcg_gen_andi_i64(cc_dst, o->out, mask);
3060 set_cc_nz_u64(s, cc_dst);
3064 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3066 gen_helper_popcnt(o->out, o->in2);
3070 #ifndef CONFIG_USER_ONLY
3071 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3073 check_privileged(s);
3074 gen_helper_ptlb(cpu_env);
3079 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3081 int i3 = get_field(s->fields, i3);
3082 int i4 = get_field(s->fields, i4);
3083 int i5 = get_field(s->fields, i5);
3084 int do_zero = i4 & 0x80;
3085 uint64_t mask, imask, pmask;
3088 /* Adjust the arguments for the specific insn. */
3089 switch (s->fields->op2) {
3090 case 0x55: /* risbg */
3095 case 0x5d: /* risbhg */
3098 pmask = 0xffffffff00000000ull;
3100 case 0x51: /* risblg */
3103 pmask = 0x00000000ffffffffull;
3109 /* MASK is the set of bits to be inserted from R2.
3110 Take care for I3/I4 wraparound. */
3113 mask ^= pmask >> i4 >> 1;
3115 mask |= ~(pmask >> i4 >> 1);
3119 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3120 insns, we need to keep the other half of the register. */
3121 imask = ~mask | ~pmask;
3123 if (s->fields->op2 == 0x55) {
3130 /* In some cases we can implement this with deposit, which can be more
3131 efficient on some hosts. */
3132 if (~mask == imask && i3 <= i4) {
3133 if (s->fields->op2 == 0x5d) {
3136 /* Note that we rotate the bits to be inserted to the lsb, not to
3137 the position as described in the PoO. */
3140 rot = (i5 - pos) & 63;
3146 /* Rotate the input as necessary. */
3147 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3149 /* Insert the selected bits into the output. */
3151 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3152 } else if (imask == 0) {
3153 tcg_gen_andi_i64(o->out, o->in2, mask);
3155 tcg_gen_andi_i64(o->in2, o->in2, mask);
3156 tcg_gen_andi_i64(o->out, o->out, imask);
3157 tcg_gen_or_i64(o->out, o->out, o->in2);
3162 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3164 int i3 = get_field(s->fields, i3);
3165 int i4 = get_field(s->fields, i4);
3166 int i5 = get_field(s->fields, i5);
3169 /* If this is a test-only form, arrange to discard the result. */
3171 o->out = tcg_temp_new_i64();
3179 /* MASK is the set of bits to be operated on from R2.
3180 Take care for I3/I4 wraparound. */
3183 mask ^= ~0ull >> i4 >> 1;
3185 mask |= ~(~0ull >> i4 >> 1);
3188 /* Rotate the input as necessary. */
3189 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3192 switch (s->fields->op2) {
3193 case 0x55: /* AND */
3194 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3195 tcg_gen_and_i64(o->out, o->out, o->in2);
3198 tcg_gen_andi_i64(o->in2, o->in2, mask);
3199 tcg_gen_or_i64(o->out, o->out, o->in2);
3201 case 0x57: /* XOR */
3202 tcg_gen_andi_i64(o->in2, o->in2, mask);
3203 tcg_gen_xor_i64(o->out, o->out, o->in2);
3210 tcg_gen_andi_i64(cc_dst, o->out, mask);
3211 set_cc_nz_u64(s, cc_dst);
3215 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3217 tcg_gen_bswap16_i64(o->out, o->in2);
3221 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3223 tcg_gen_bswap32_i64(o->out, o->in2);
3227 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3229 tcg_gen_bswap64_i64(o->out, o->in2);
3233 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3235 TCGv_i32 t1 = tcg_temp_new_i32();
3236 TCGv_i32 t2 = tcg_temp_new_i32();
3237 TCGv_i32 to = tcg_temp_new_i32();
3238 tcg_gen_trunc_i64_i32(t1, o->in1);
3239 tcg_gen_trunc_i64_i32(t2, o->in2);
3240 tcg_gen_rotl_i32(to, t1, t2);
3241 tcg_gen_extu_i32_i64(o->out, to);
3242 tcg_temp_free_i32(t1);
3243 tcg_temp_free_i32(t2);
3244 tcg_temp_free_i32(to);
3248 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3250 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3254 #ifndef CONFIG_USER_ONLY
3255 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3257 check_privileged(s);
3258 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3263 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3265 check_privileged(s);
3266 gen_helper_sacf(cpu_env, o->in2);
3267 /* Addressing mode has changed, so end the block. */
3268 return EXIT_PC_STALE;
3272 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3274 int sam = s->insn->data;
3290 /* Bizarre but true, we check the address of the current insn for the
3291 specification exception, not the next to be executed. Thus the PoO
3292 documents that Bad Things Happen two bytes before the end. */
3293 if (s->pc & ~mask) {
3294 gen_program_exception(s, PGM_SPECIFICATION);
3295 return EXIT_NORETURN;
3299 tsam = tcg_const_i64(sam);
3300 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3301 tcg_temp_free_i64(tsam);
3303 /* Always exit the TB, since we (may have) changed execution mode. */
3304 return EXIT_PC_STALE;
3307 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3309 int r1 = get_field(s->fields, r1);
3310 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3314 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3316 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3320 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3322 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3326 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3328 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3329 return_low128(o->out2);
3333 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3335 gen_helper_sqeb(o->out, cpu_env, o->in2);
3339 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3341 gen_helper_sqdb(o->out, cpu_env, o->in2);
3345 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3347 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3348 return_low128(o->out2);
3352 #ifndef CONFIG_USER_ONLY
3353 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3355 check_privileged(s);
3356 potential_page_fault(s);
3357 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3362 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3364 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3365 check_privileged(s);
3366 potential_page_fault(s);
3367 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3368 tcg_temp_free_i32(r1);
3373 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3380 disas_jcc(s, &c, get_field(s->fields, m3));
3382 /* We want to store when the condition is fulfilled, so branch
3383 out when it's not */
3384 c.cond = tcg_invert_cond(c.cond);
3386 lab = gen_new_label();
3388 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3390 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3394 r1 = get_field(s->fields, r1);
3395 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3396 if (s->insn->data) {
3397 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3399 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3401 tcg_temp_free_i64(a);
3407 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3409 uint64_t sign = 1ull << s->insn->data;
3410 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3411 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3412 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3413 /* The arithmetic left shift is curious in that it does not affect
3414 the sign bit. Copy that over from the source unchanged. */
3415 tcg_gen_andi_i64(o->out, o->out, ~sign);
3416 tcg_gen_andi_i64(o->in1, o->in1, sign);
3417 tcg_gen_or_i64(o->out, o->out, o->in1);
3421 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3423 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3427 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3429 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3433 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3435 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3439 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3441 gen_helper_sfpc(cpu_env, o->in2);
3445 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3447 gen_helper_sfas(cpu_env, o->in2);
3451 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3453 int b2 = get_field(s->fields, b2);
3454 int d2 = get_field(s->fields, d2);
3455 TCGv_i64 t1 = tcg_temp_new_i64();
3456 TCGv_i64 t2 = tcg_temp_new_i64();
3459 switch (s->fields->op2) {
3460 case 0x99: /* SRNM */
3463 case 0xb8: /* SRNMB */
3466 case 0xb9: /* SRNMT */
3472 mask = (1 << len) - 1;
3474 /* Insert the value into the appropriate field of the FPC. */
3476 tcg_gen_movi_i64(t1, d2 & mask);
3478 tcg_gen_addi_i64(t1, regs[b2], d2);
3479 tcg_gen_andi_i64(t1, t1, mask);
3481 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3482 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3483 tcg_temp_free_i64(t1);
3485 /* Then install the new FPC to set the rounding mode in fpu_status. */
3486 gen_helper_sfpc(cpu_env, t2);
3487 tcg_temp_free_i64(t2);
3491 #ifndef CONFIG_USER_ONLY
3492 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3494 check_privileged(s);
3495 tcg_gen_shri_i64(o->in2, o->in2, 4);
3496 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3500 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3502 check_privileged(s);
3503 gen_helper_sske(cpu_env, o->in1, o->in2);
3507 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3509 check_privileged(s);
3510 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3514 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3516 check_privileged(s);
3517 /* ??? Surely cpu address != cpu number. In any case the previous
3518 version of this stored more than the required half-word, so it
3519 is unlikely this has ever been tested. */
3520 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3524 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3526 gen_helper_stck(o->out, cpu_env);
3527 /* ??? We don't implement clock states. */
3528 gen_op_movi_cc(s, 0);
3532 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3534 TCGv_i64 c1 = tcg_temp_new_i64();
3535 TCGv_i64 c2 = tcg_temp_new_i64();
3536 gen_helper_stck(c1, cpu_env);
3537 /* Shift the 64-bit value into its place as a zero-extended
3538 104-bit value. Note that "bit positions 64-103 are always
3539 non-zero so that they compare differently to STCK"; we set
3540 the least significant bit to 1. */
3541 tcg_gen_shli_i64(c2, c1, 56);
3542 tcg_gen_shri_i64(c1, c1, 8);
3543 tcg_gen_ori_i64(c2, c2, 0x10000);
3544 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3545 tcg_gen_addi_i64(o->in2, o->in2, 8);
3546 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3547 tcg_temp_free_i64(c1);
3548 tcg_temp_free_i64(c2);
3549 /* ??? We don't implement clock states. */
3550 gen_op_movi_cc(s, 0);
3554 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3556 check_privileged(s);
3557 gen_helper_sckc(cpu_env, o->in2);
3561 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3563 check_privileged(s);
3564 gen_helper_stckc(o->out, cpu_env);
3568 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3570 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3571 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3572 check_privileged(s);
3573 potential_page_fault(s);
3574 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3575 tcg_temp_free_i32(r1);
3576 tcg_temp_free_i32(r3);
3580 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3582 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3583 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3584 check_privileged(s);
3585 potential_page_fault(s);
3586 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3587 tcg_temp_free_i32(r1);
3588 tcg_temp_free_i32(r3);
3592 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3594 TCGv_i64 t1 = tcg_temp_new_i64();
3596 check_privileged(s);
3597 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3598 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3599 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3600 tcg_temp_free_i64(t1);
3605 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3607 check_privileged(s);
3608 gen_helper_spt(cpu_env, o->in2);
3612 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3615 /* We really ought to have more complete indication of facilities
3616 that we implement. Address this when STFLE is implemented. */
3617 check_privileged(s);
3618 f = tcg_const_i64(0xc0000000);
3619 a = tcg_const_i64(200);
3620 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3621 tcg_temp_free_i64(f);
3622 tcg_temp_free_i64(a);
3626 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3628 check_privileged(s);
3629 gen_helper_stpt(o->out, cpu_env);
3633 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3635 check_privileged(s);
3636 potential_page_fault(s);
3637 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3642 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3644 check_privileged(s);
3645 gen_helper_spx(cpu_env, o->in2);
3649 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3651 check_privileged(s);
3652 potential_page_fault(s);
3653 gen_helper_xsch(cpu_env, regs[1]);
3658 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3660 check_privileged(s);
3661 potential_page_fault(s);
3662 gen_helper_csch(cpu_env, regs[1]);
3667 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3669 check_privileged(s);
3670 potential_page_fault(s);
3671 gen_helper_hsch(cpu_env, regs[1]);
3676 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3678 check_privileged(s);
3679 potential_page_fault(s);
3680 gen_helper_msch(cpu_env, regs[1], o->in2);
3685 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3687 check_privileged(s);
3688 potential_page_fault(s);
3689 gen_helper_rchp(cpu_env, regs[1]);
3694 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3696 check_privileged(s);
3697 potential_page_fault(s);
3698 gen_helper_rsch(cpu_env, regs[1]);
3703 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3705 check_privileged(s);
3706 potential_page_fault(s);
3707 gen_helper_ssch(cpu_env, regs[1], o->in2);
3712 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3714 check_privileged(s);
3715 potential_page_fault(s);
3716 gen_helper_stsch(cpu_env, regs[1], o->in2);
3721 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3723 check_privileged(s);
3724 potential_page_fault(s);
3725 gen_helper_tsch(cpu_env, regs[1], o->in2);
3730 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3732 check_privileged(s);
3733 potential_page_fault(s);
3734 gen_helper_chsc(cpu_env, o->in2);
3739 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3741 check_privileged(s);
3742 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3743 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3747 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3749 uint64_t i2 = get_field(s->fields, i2);
3752 check_privileged(s);
3754 /* It is important to do what the instruction name says: STORE THEN.
3755 If we let the output hook perform the store then if we fault and
3756 restart, we'll have the wrong SYSTEM MASK in place. */
3757 t = tcg_temp_new_i64();
3758 tcg_gen_shri_i64(t, psw_mask, 56);
3759 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3760 tcg_temp_free_i64(t);
3762 if (s->fields->op == 0xac) {
3763 tcg_gen_andi_i64(psw_mask, psw_mask,
3764 (i2 << 56) | 0x00ffffffffffffffull);
3766 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3771 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3773 check_privileged(s);
3774 potential_page_fault(s);
3775 gen_helper_stura(cpu_env, o->in2, o->in1);
3779 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3781 check_privileged(s);
3782 potential_page_fault(s);
3783 gen_helper_sturg(cpu_env, o->in2, o->in1);
3788 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3790 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3794 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3796 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3800 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3802 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3806 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3808 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3812 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3814 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3815 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3816 potential_page_fault(s);
3817 gen_helper_stam(cpu_env, r1, o->in2, r3);
3818 tcg_temp_free_i32(r1);
3819 tcg_temp_free_i32(r3);
3823 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3825 int m3 = get_field(s->fields, m3);
3826 int pos, base = s->insn->data;
3827 TCGv_i64 tmp = tcg_temp_new_i64();
3829 pos = base + ctz32(m3) * 8;
3832 /* Effectively a 32-bit store. */
3833 tcg_gen_shri_i64(tmp, o->in1, pos);
3834 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3840 /* Effectively a 16-bit store. */
3841 tcg_gen_shri_i64(tmp, o->in1, pos);
3842 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3849 /* Effectively an 8-bit store. */
3850 tcg_gen_shri_i64(tmp, o->in1, pos);
3851 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3855 /* This is going to be a sequence of shifts and stores. */
3856 pos = base + 32 - 8;
3859 tcg_gen_shri_i64(tmp, o->in1, pos);
3860 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3861 tcg_gen_addi_i64(o->in2, o->in2, 1);
3863 m3 = (m3 << 1) & 0xf;
3868 tcg_temp_free_i64(tmp);
3872 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3874 int r1 = get_field(s->fields, r1);
3875 int r3 = get_field(s->fields, r3);
3876 int size = s->insn->data;
3877 TCGv_i64 tsize = tcg_const_i64(size);
3881 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3883 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3888 tcg_gen_add_i64(o->in2, o->in2, tsize);
3892 tcg_temp_free_i64(tsize);
3896 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3898 int r1 = get_field(s->fields, r1);
3899 int r3 = get_field(s->fields, r3);
3900 TCGv_i64 t = tcg_temp_new_i64();
3901 TCGv_i64 t4 = tcg_const_i64(4);
3902 TCGv_i64 t32 = tcg_const_i64(32);
3905 tcg_gen_shl_i64(t, regs[r1], t32);
3906 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3910 tcg_gen_add_i64(o->in2, o->in2, t4);
3914 tcg_temp_free_i64(t);
3915 tcg_temp_free_i64(t4);
3916 tcg_temp_free_i64(t32);
3920 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3922 potential_page_fault(s);
3923 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3925 return_low128(o->in2);
3929 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3931 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3935 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3940 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3942 /* The !borrow flag is the msb of CC. Since we want the inverse of
3943 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3944 disas_jcc(s, &cmp, 8 | 4);
3945 borrow = tcg_temp_new_i64();
3947 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3949 TCGv_i32 t = tcg_temp_new_i32();
3950 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3951 tcg_gen_extu_i32_i64(borrow, t);
3952 tcg_temp_free_i32(t);
3956 tcg_gen_sub_i64(o->out, o->out, borrow);
3957 tcg_temp_free_i64(borrow);
3961 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3968 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3969 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3970 tcg_temp_free_i32(t);
3972 t = tcg_const_i32(s->next_pc - s->pc);
3973 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3974 tcg_temp_free_i32(t);
3976 gen_exception(EXCP_SVC);
3977 return EXIT_NORETURN;
3980 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3982 gen_helper_tceb(cc_op, o->in1, o->in2);
3987 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3989 gen_helper_tcdb(cc_op, o->in1, o->in2);
3994 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3996 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
4001 #ifndef CONFIG_USER_ONLY
4002 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4004 potential_page_fault(s);
4005 gen_helper_tprot(cc_op, o->addr1, o->in2);
4011 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4013 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4014 potential_page_fault(s);
4015 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4016 tcg_temp_free_i32(l);
4021 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4023 potential_page_fault(s);
4024 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4025 return_low128(o->out2);
4030 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4032 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4033 potential_page_fault(s);
4034 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4035 tcg_temp_free_i32(l);
4040 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4042 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4043 potential_page_fault(s);
4044 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4045 tcg_temp_free_i32(l);
4049 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4051 int d1 = get_field(s->fields, d1);
4052 int d2 = get_field(s->fields, d2);
4053 int b1 = get_field(s->fields, b1);
4054 int b2 = get_field(s->fields, b2);
4055 int l = get_field(s->fields, l1);
4058 o->addr1 = get_address(s, 0, b1, d1);
4060 /* If the addresses are identical, this is a store/memset of zero. */
4061 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4062 o->in2 = tcg_const_i64(0);
4066 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4069 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4073 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4076 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4080 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4083 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4087 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4089 gen_op_movi_cc(s, 0);
4093 /* But in general we'll defer to a helper. */
4094 o->in2 = get_address(s, 0, b2, d2);
4095 t32 = tcg_const_i32(l);
4096 potential_page_fault(s);
4097 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4098 tcg_temp_free_i32(t32);
4103 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4105 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4109 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4111 int shift = s->insn->data & 0xff;
4112 int size = s->insn->data >> 8;
4113 uint64_t mask = ((1ull << size) - 1) << shift;
4116 tcg_gen_shli_i64(o->in2, o->in2, shift);
4117 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4119 /* Produce the CC from only the bits manipulated. */
4120 tcg_gen_andi_i64(cc_dst, o->out, mask);
4121 set_cc_nz_u64(s, cc_dst);
4125 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4127 o->out = tcg_const_i64(0);
4131 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4133 o->out = tcg_const_i64(0);
4139 /* ====================================================================== */
4140 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4141 the original inputs), update the various cc data structures in order to
4142 be able to compute the new condition code. */
4144 static void cout_abs32(DisasContext *s, DisasOps *o)
4146 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4149 static void cout_abs64(DisasContext *s, DisasOps *o)
4151 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4154 static void cout_adds32(DisasContext *s, DisasOps *o)
4156 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4159 static void cout_adds64(DisasContext *s, DisasOps *o)
4161 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4164 static void cout_addu32(DisasContext *s, DisasOps *o)
4166 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4169 static void cout_addu64(DisasContext *s, DisasOps *o)
4171 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4174 static void cout_addc32(DisasContext *s, DisasOps *o)
4176 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4179 static void cout_addc64(DisasContext *s, DisasOps *o)
4181 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4184 static void cout_cmps32(DisasContext *s, DisasOps *o)
4186 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4189 static void cout_cmps64(DisasContext *s, DisasOps *o)
4191 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4194 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4196 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4199 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4201 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4204 static void cout_f32(DisasContext *s, DisasOps *o)
4206 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4209 static void cout_f64(DisasContext *s, DisasOps *o)
4211 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4214 static void cout_f128(DisasContext *s, DisasOps *o)
4216 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4219 static void cout_nabs32(DisasContext *s, DisasOps *o)
4221 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4224 static void cout_nabs64(DisasContext *s, DisasOps *o)
4226 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4229 static void cout_neg32(DisasContext *s, DisasOps *o)
4231 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4234 static void cout_neg64(DisasContext *s, DisasOps *o)
4236 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4239 static void cout_nz32(DisasContext *s, DisasOps *o)
4241 tcg_gen_ext32u_i64(cc_dst, o->out);
4242 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4245 static void cout_nz64(DisasContext *s, DisasOps *o)
4247 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4250 static void cout_s32(DisasContext *s, DisasOps *o)
4252 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4255 static void cout_s64(DisasContext *s, DisasOps *o)
4257 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4260 static void cout_subs32(DisasContext *s, DisasOps *o)
4262 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4265 static void cout_subs64(DisasContext *s, DisasOps *o)
4267 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4270 static void cout_subu32(DisasContext *s, DisasOps *o)
4272 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4275 static void cout_subu64(DisasContext *s, DisasOps *o)
4277 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4280 static void cout_subb32(DisasContext *s, DisasOps *o)
4282 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4285 static void cout_subb64(DisasContext *s, DisasOps *o)
4287 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4290 static void cout_tm32(DisasContext *s, DisasOps *o)
4292 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4295 static void cout_tm64(DisasContext *s, DisasOps *o)
4297 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4300 /* ====================================================================== */
4301 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4302 with the TCG register to which we will write. Used in combination with
4303 the "wout" generators, in some cases we need a new temporary, and in
4304 some cases we can write to a TCG global. */
4306 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4308 o->out = tcg_temp_new_i64();
4310 #define SPEC_prep_new 0
4312 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4314 o->out = tcg_temp_new_i64();
4315 o->out2 = tcg_temp_new_i64();
4317 #define SPEC_prep_new_P 0
4319 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4321 o->out = regs[get_field(f, r1)];
4324 #define SPEC_prep_r1 0
4326 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4328 int r1 = get_field(f, r1);
4330 o->out2 = regs[r1 + 1];
4331 o->g_out = o->g_out2 = true;
4333 #define SPEC_prep_r1_P SPEC_r1_even
4335 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4337 o->out = fregs[get_field(f, r1)];
4340 #define SPEC_prep_f1 0
4342 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4344 int r1 = get_field(f, r1);
4346 o->out2 = fregs[r1 + 2];
4347 o->g_out = o->g_out2 = true;
4349 #define SPEC_prep_x1 SPEC_r1_f128
4351 /* ====================================================================== */
4352 /* The "Write OUTput" generators. These generally perform some non-trivial
4353 copy of data to TCG globals, or to main memory. The trivial cases are
4354 generally handled by having a "prep" generator install the TCG global
4355 as the destination of the operation. */
4357 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4359 store_reg(get_field(f, r1), o->out);
4361 #define SPEC_wout_r1 0
4363 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4365 int r1 = get_field(f, r1);
4366 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4368 #define SPEC_wout_r1_8 0
4370 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4372 int r1 = get_field(f, r1);
4373 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4375 #define SPEC_wout_r1_16 0
4377 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4379 store_reg32_i64(get_field(f, r1), o->out);
4381 #define SPEC_wout_r1_32 0
4383 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4385 store_reg32h_i64(get_field(f, r1), o->out);
4387 #define SPEC_wout_r1_32h 0
4389 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4391 int r1 = get_field(f, r1);
4392 store_reg32_i64(r1, o->out);
4393 store_reg32_i64(r1 + 1, o->out2);
4395 #define SPEC_wout_r1_P32 SPEC_r1_even
4397 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4399 int r1 = get_field(f, r1);
4400 store_reg32_i64(r1 + 1, o->out);
4401 tcg_gen_shri_i64(o->out, o->out, 32);
4402 store_reg32_i64(r1, o->out);
4404 #define SPEC_wout_r1_D32 SPEC_r1_even
4406 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4408 store_freg32_i64(get_field(f, r1), o->out);
4410 #define SPEC_wout_e1 0
4412 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4414 store_freg(get_field(f, r1), o->out);
4416 #define SPEC_wout_f1 0
4418 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4420 int f1 = get_field(s->fields, r1);
4421 store_freg(f1, o->out);
4422 store_freg(f1 + 2, o->out2);
4424 #define SPEC_wout_x1 SPEC_r1_f128
4426 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4428 if (get_field(f, r1) != get_field(f, r2)) {
4429 store_reg32_i64(get_field(f, r1), o->out);
4432 #define SPEC_wout_cond_r1r2_32 0
4434 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4436 if (get_field(f, r1) != get_field(f, r2)) {
4437 store_freg32_i64(get_field(f, r1), o->out);
4440 #define SPEC_wout_cond_e1e2 0
4442 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4444 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4446 #define SPEC_wout_m1_8 0
4448 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4450 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4452 #define SPEC_wout_m1_16 0
4454 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4456 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4458 #define SPEC_wout_m1_32 0
4460 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4462 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4464 #define SPEC_wout_m1_64 0
4466 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4468 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4470 #define SPEC_wout_m2_32 0
4472 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4474 /* XXX release reservation */
4475 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4476 store_reg32_i64(get_field(f, r1), o->in2);
4478 #define SPEC_wout_m2_32_r1_atomic 0
4480 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4482 /* XXX release reservation */
4483 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4484 store_reg(get_field(f, r1), o->in2);
4486 #define SPEC_wout_m2_64_r1_atomic 0
4488 /* ====================================================================== */
4489 /* The "INput 1" generators. These load the first operand to an insn. */
4491 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4493 o->in1 = load_reg(get_field(f, r1));
4495 #define SPEC_in1_r1 0
4497 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4499 o->in1 = regs[get_field(f, r1)];
4502 #define SPEC_in1_r1_o 0
4504 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4506 o->in1 = tcg_temp_new_i64();
4507 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4509 #define SPEC_in1_r1_32s 0
4511 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4513 o->in1 = tcg_temp_new_i64();
4514 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4516 #define SPEC_in1_r1_32u 0
4518 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4520 o->in1 = tcg_temp_new_i64();
4521 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4523 #define SPEC_in1_r1_sr32 0
4525 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4527 o->in1 = load_reg(get_field(f, r1) + 1);
4529 #define SPEC_in1_r1p1 SPEC_r1_even
4531 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4533 o->in1 = tcg_temp_new_i64();
4534 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4536 #define SPEC_in1_r1p1_32s SPEC_r1_even
4538 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4540 o->in1 = tcg_temp_new_i64();
4541 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4543 #define SPEC_in1_r1p1_32u SPEC_r1_even
4545 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4547 int r1 = get_field(f, r1);
4548 o->in1 = tcg_temp_new_i64();
4549 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4551 #define SPEC_in1_r1_D32 SPEC_r1_even
4553 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4555 o->in1 = load_reg(get_field(f, r2));
4557 #define SPEC_in1_r2 0
4559 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4561 o->in1 = tcg_temp_new_i64();
4562 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4564 #define SPEC_in1_r2_sr32 0
4566 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4568 o->in1 = load_reg(get_field(f, r3));
4570 #define SPEC_in1_r3 0
4572 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4574 o->in1 = regs[get_field(f, r3)];
4577 #define SPEC_in1_r3_o 0
4579 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4581 o->in1 = tcg_temp_new_i64();
4582 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4584 #define SPEC_in1_r3_32s 0
4586 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4588 o->in1 = tcg_temp_new_i64();
4589 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4591 #define SPEC_in1_r3_32u 0
4593 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4595 int r3 = get_field(f, r3);
4596 o->in1 = tcg_temp_new_i64();
4597 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4599 #define SPEC_in1_r3_D32 SPEC_r3_even
4601 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4603 o->in1 = load_freg32_i64(get_field(f, r1));
4605 #define SPEC_in1_e1 0
4607 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4609 o->in1 = fregs[get_field(f, r1)];
4612 #define SPEC_in1_f1_o 0
4614 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4616 int r1 = get_field(f, r1);
4618 o->out2 = fregs[r1 + 2];
4619 o->g_out = o->g_out2 = true;
4621 #define SPEC_in1_x1_o SPEC_r1_f128
4623 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4625 o->in1 = fregs[get_field(f, r3)];
4628 #define SPEC_in1_f3_o 0
4630 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4632 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4634 #define SPEC_in1_la1 0
4636 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4638 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4639 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4641 #define SPEC_in1_la2 0
4643 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4646 o->in1 = tcg_temp_new_i64();
4647 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4649 #define SPEC_in1_m1_8u 0
4651 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4654 o->in1 = tcg_temp_new_i64();
4655 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4657 #define SPEC_in1_m1_16s 0
4659 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4662 o->in1 = tcg_temp_new_i64();
4663 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4665 #define SPEC_in1_m1_16u 0
4667 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4670 o->in1 = tcg_temp_new_i64();
4671 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4673 #define SPEC_in1_m1_32s 0
4675 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4678 o->in1 = tcg_temp_new_i64();
4679 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4681 #define SPEC_in1_m1_32u 0
4683 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4686 o->in1 = tcg_temp_new_i64();
4687 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4689 #define SPEC_in1_m1_64 0
4691 /* ====================================================================== */
4692 /* The "INput 2" generators. These load the second operand to an insn. */
4694 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4696 o->in2 = regs[get_field(f, r1)];
4699 #define SPEC_in2_r1_o 0
4701 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4703 o->in2 = tcg_temp_new_i64();
4704 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4706 #define SPEC_in2_r1_16u 0
4708 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4710 o->in2 = tcg_temp_new_i64();
4711 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4713 #define SPEC_in2_r1_32u 0
4715 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4717 int r1 = get_field(f, r1);
4718 o->in2 = tcg_temp_new_i64();
4719 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4721 #define SPEC_in2_r1_D32 SPEC_r1_even
4723 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4725 o->in2 = load_reg(get_field(f, r2));
4727 #define SPEC_in2_r2 0
4729 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4731 o->in2 = regs[get_field(f, r2)];
4734 #define SPEC_in2_r2_o 0
4736 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4738 int r2 = get_field(f, r2);
4740 o->in2 = load_reg(r2);
4743 #define SPEC_in2_r2_nz 0
4745 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4747 o->in2 = tcg_temp_new_i64();
4748 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4750 #define SPEC_in2_r2_8s 0
4752 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4754 o->in2 = tcg_temp_new_i64();
4755 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4757 #define SPEC_in2_r2_8u 0
4759 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4761 o->in2 = tcg_temp_new_i64();
4762 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4764 #define SPEC_in2_r2_16s 0
4766 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4768 o->in2 = tcg_temp_new_i64();
4769 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4771 #define SPEC_in2_r2_16u 0
4773 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4775 o->in2 = load_reg(get_field(f, r3));
4777 #define SPEC_in2_r3 0
4779 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4781 o->in2 = tcg_temp_new_i64();
4782 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4784 #define SPEC_in2_r3_sr32 0
4786 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4788 o->in2 = tcg_temp_new_i64();
4789 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4791 #define SPEC_in2_r2_32s 0
4793 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4795 o->in2 = tcg_temp_new_i64();
4796 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4798 #define SPEC_in2_r2_32u 0
4800 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4802 o->in2 = tcg_temp_new_i64();
4803 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4805 #define SPEC_in2_r2_sr32 0
4807 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4809 o->in2 = load_freg32_i64(get_field(f, r2));
4811 #define SPEC_in2_e2 0
4813 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4815 o->in2 = fregs[get_field(f, r2)];
4818 #define SPEC_in2_f2_o 0
4820 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4822 int r2 = get_field(f, r2);
4824 o->in2 = fregs[r2 + 2];
4825 o->g_in1 = o->g_in2 = true;
4827 #define SPEC_in2_x2_o SPEC_r2_f128
4829 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4831 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4833 #define SPEC_in2_ra2 0
4835 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4837 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4838 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4840 #define SPEC_in2_a2 0
4842 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4844 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4846 #define SPEC_in2_ri2 0
4848 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4850 help_l2_shift(s, f, o, 31);
4852 #define SPEC_in2_sh32 0
4854 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4856 help_l2_shift(s, f, o, 63);
4858 #define SPEC_in2_sh64 0
4860 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4863 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4865 #define SPEC_in2_m2_8u 0
4867 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4870 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4872 #define SPEC_in2_m2_16s 0
4874 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4877 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4879 #define SPEC_in2_m2_16u 0
4881 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4884 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4886 #define SPEC_in2_m2_32s 0
4888 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4891 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4893 #define SPEC_in2_m2_32u 0
4895 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4898 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4900 #define SPEC_in2_m2_64 0
4902 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4905 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4907 #define SPEC_in2_mri2_16u 0
4909 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4912 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4914 #define SPEC_in2_mri2_32s 0
4916 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4919 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4921 #define SPEC_in2_mri2_32u 0
4923 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4926 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4928 #define SPEC_in2_mri2_64 0
4930 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4932 /* XXX should reserve the address */
4934 o->in2 = tcg_temp_new_i64();
4935 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4937 #define SPEC_in2_m2_32s_atomic 0
4939 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4941 /* XXX should reserve the address */
4943 o->in2 = tcg_temp_new_i64();
4944 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4946 #define SPEC_in2_m2_64_atomic 0
4948 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4950 o->in2 = tcg_const_i64(get_field(f, i2));
4952 #define SPEC_in2_i2 0
4954 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4956 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4958 #define SPEC_in2_i2_8u 0
4960 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4962 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4964 #define SPEC_in2_i2_16u 0
4966 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4968 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4970 #define SPEC_in2_i2_32u 0
4972 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4974 uint64_t i2 = (uint16_t)get_field(f, i2);
4975 o->in2 = tcg_const_i64(i2 << s->insn->data);
4977 #define SPEC_in2_i2_16u_shl 0
4979 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4981 uint64_t i2 = (uint32_t)get_field(f, i2);
4982 o->in2 = tcg_const_i64(i2 << s->insn->data);
4984 #define SPEC_in2_i2_32u_shl 0
4986 #ifndef CONFIG_USER_ONLY
4987 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
4989 o->in2 = tcg_const_i64(s->fields->raw_insn);
4991 #define SPEC_in2_insn 0
4994 /* ====================================================================== */
4996 /* Find opc within the table of insns. This is formulated as a switch
4997 statement so that (1) we get compile-time notice of cut-paste errors
4998 for duplicated opcodes, and (2) the compiler generates the binary
4999 search tree, rather than us having to post-process the table. */
5001 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5002 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5004 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5006 enum DisasInsnEnum {
5007 #include "insn-data.def"
5011 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5015 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5017 .help_in1 = in1_##I1, \
5018 .help_in2 = in2_##I2, \
5019 .help_prep = prep_##P, \
5020 .help_wout = wout_##W, \
5021 .help_cout = cout_##CC, \
5022 .help_op = op_##OP, \
5026 /* Allow 0 to be used for NULL in the table below. */
5034 #define SPEC_in1_0 0
5035 #define SPEC_in2_0 0
5036 #define SPEC_prep_0 0
5037 #define SPEC_wout_0 0
5039 static const DisasInsn insn_info[] = {
5040 #include "insn-data.def"
5044 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5045 case OPC: return &insn_info[insn_ ## NM];
5047 static const DisasInsn *lookup_opc(uint16_t opc)
5050 #include "insn-data.def"
5059 /* Extract a field from the insn. The INSN should be left-aligned in
5060 the uint64_t so that we can more easily utilize the big-bit-endian
5061 definitions we extract from the Principals of Operation. */
5063 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5071 /* Zero extract the field from the insn. */
5072 r = (insn << f->beg) >> (64 - f->size);
5074 /* Sign-extend, or un-swap the field as necessary. */
5076 case 0: /* unsigned */
5078 case 1: /* signed */
5079 assert(f->size <= 32);
5080 m = 1u << (f->size - 1);
5083 case 2: /* dl+dh split, signed 20 bit. */
5084 r = ((int8_t)r << 12) | (r >> 8);
5090 /* Validate that the "compressed" encoding we selected above is valid.
5091 I.e. we havn't make two different original fields overlap. */
5092 assert(((o->presentC >> f->indexC) & 1) == 0);
5093 o->presentC |= 1 << f->indexC;
5094 o->presentO |= 1 << f->indexO;
5096 o->c[f->indexC] = r;
5099 /* Lookup the insn at the current PC, extracting the operands into O and
5100 returning the info struct for the insn. Returns NULL for invalid insn. */
5102 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5105 uint64_t insn, pc = s->pc;
5107 const DisasInsn *info;
5109 insn = ld_code2(env, pc);
5110 op = (insn >> 8) & 0xff;
5111 ilen = get_ilen(op);
5112 s->next_pc = s->pc + ilen;
5119 insn = ld_code4(env, pc) << 32;
5122 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5128 /* We can't actually determine the insn format until we've looked up
5129 the full insn opcode. Which we can't do without locating the
5130 secondary opcode. Assume by default that OP2 is at bit 40; for
5131 those smaller insns that don't actually have a secondary opcode
5132 this will correctly result in OP2 = 0. */
5138 case 0xb2: /* S, RRF, RRE */
5139 case 0xb3: /* RRE, RRD, RRF */
5140 case 0xb9: /* RRE, RRF */
5141 case 0xe5: /* SSE, SIL */
5142 op2 = (insn << 8) >> 56;
5146 case 0xc0: /* RIL */
5147 case 0xc2: /* RIL */
5148 case 0xc4: /* RIL */
5149 case 0xc6: /* RIL */
5150 case 0xc8: /* SSF */
5151 case 0xcc: /* RIL */
5152 op2 = (insn << 12) >> 60;
5154 case 0xd0 ... 0xdf: /* SS */
5160 case 0xee ... 0xf3: /* SS */
5161 case 0xf8 ... 0xfd: /* SS */
5165 op2 = (insn << 40) >> 56;
5169 memset(f, 0, sizeof(*f));
5174 /* Lookup the instruction. */
5175 info = lookup_opc(op << 8 | op2);
5177 /* If we found it, extract the operands. */
5179 DisasFormat fmt = info->fmt;
5182 for (i = 0; i < NUM_C_FIELD; ++i) {
5183 extract_field(f, &format_info[fmt].op[i], insn);
5189 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5191 const DisasInsn *insn;
5192 ExitStatus ret = NO_EXIT;
5196 /* Search for the insn in the table. */
5197 insn = extract_insn(env, s, &f);
5199 /* Not found means unimplemented/illegal opcode. */
5201 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5203 gen_illegal_opcode(s);
5204 return EXIT_NORETURN;
5207 #ifndef CONFIG_USER_ONLY
5208 if (s->tb->flags & FLAG_MASK_PER) {
5209 TCGv_i64 addr = tcg_const_i64(s->pc);
5210 gen_helper_per_ifetch(cpu_env, addr);
5211 tcg_temp_free_i64(addr);
5215 /* Check for insn specification exceptions. */
5217 int spec = insn->spec, excp = 0, r;
5219 if (spec & SPEC_r1_even) {
5220 r = get_field(&f, r1);
5222 excp = PGM_SPECIFICATION;
5225 if (spec & SPEC_r2_even) {
5226 r = get_field(&f, r2);
5228 excp = PGM_SPECIFICATION;
5231 if (spec & SPEC_r3_even) {
5232 r = get_field(&f, r3);
5234 excp = PGM_SPECIFICATION;
5237 if (spec & SPEC_r1_f128) {
5238 r = get_field(&f, r1);
5240 excp = PGM_SPECIFICATION;
5243 if (spec & SPEC_r2_f128) {
5244 r = get_field(&f, r2);
5246 excp = PGM_SPECIFICATION;
5250 gen_program_exception(s, excp);
5251 return EXIT_NORETURN;
5255 /* Set up the strutures we use to communicate with the helpers. */
5258 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5259 TCGV_UNUSED_I64(o.out);
5260 TCGV_UNUSED_I64(o.out2);
5261 TCGV_UNUSED_I64(o.in1);
5262 TCGV_UNUSED_I64(o.in2);
5263 TCGV_UNUSED_I64(o.addr1);
5265 /* Implement the instruction. */
5266 if (insn->help_in1) {
5267 insn->help_in1(s, &f, &o);
5269 if (insn->help_in2) {
5270 insn->help_in2(s, &f, &o);
5272 if (insn->help_prep) {
5273 insn->help_prep(s, &f, &o);
5275 if (insn->help_op) {
5276 ret = insn->help_op(s, &o);
5278 if (insn->help_wout) {
5279 insn->help_wout(s, &f, &o);
5281 if (insn->help_cout) {
5282 insn->help_cout(s, &o);
5285 /* Free any temporaries created by the helpers. */
5286 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5287 tcg_temp_free_i64(o.out);
5289 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5290 tcg_temp_free_i64(o.out2);
5292 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5293 tcg_temp_free_i64(o.in1);
5295 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5296 tcg_temp_free_i64(o.in2);
5298 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5299 tcg_temp_free_i64(o.addr1);
5302 #ifndef CONFIG_USER_ONLY
5303 if (s->tb->flags & FLAG_MASK_PER) {
5304 /* An exception might be triggered, save PSW if not already done. */
5305 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5306 tcg_gen_movi_i64(psw_addr, s->next_pc);
5312 /* Call the helper to check for a possible PER exception. */
5313 gen_helper_per_check_exception(cpu_env);
5317 /* Advance to the next instruction. */
5322 static inline void gen_intermediate_code_internal(S390CPU *cpu,
5323 TranslationBlock *tb,
5326 CPUState *cs = CPU(cpu);
5327 CPUS390XState *env = &cpu->env;
5329 target_ulong pc_start;
5330 uint64_t next_page_start;
5332 int num_insns, max_insns;
5340 if (!(tb->flags & FLAG_MASK_64)) {
5341 pc_start &= 0x7fffffff;
5346 dc.cc_op = CC_OP_DYNAMIC;
5347 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5349 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5352 max_insns = tb->cflags & CF_COUNT_MASK;
5353 if (max_insns == 0) {
5354 max_insns = CF_COUNT_MASK;
5361 j = tcg_op_buf_count();
5365 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5368 tcg_ctx.gen_opc_pc[lj] = dc.pc;
5369 gen_opc_cc_op[lj] = dc.cc_op;
5370 tcg_ctx.gen_opc_instr_start[lj] = 1;
5371 tcg_ctx.gen_opc_icount[lj] = num_insns;
5373 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5377 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5378 tcg_gen_debug_insn_start(dc.pc);
5382 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
5383 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
5384 if (bp->pc == dc.pc) {
5385 status = EXIT_PC_STALE;
5391 if (status == NO_EXIT) {
5392 status = translate_one(env, &dc);
5395 /* If we reach a page boundary, are single stepping,
5396 or exhaust instruction count, stop generation. */
5397 if (status == NO_EXIT
5398 && (dc.pc >= next_page_start
5399 || tcg_op_buf_full()
5400 || num_insns >= max_insns
5402 || cs->singlestep_enabled)) {
5403 status = EXIT_PC_STALE;
5405 } while (status == NO_EXIT);
5407 if (tb->cflags & CF_LAST_IO) {
5416 update_psw_addr(&dc);
5418 case EXIT_PC_UPDATED:
5419 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5420 cc op type is in env */
5422 /* Exit the TB, either by raising a debug exception or by return. */
5424 gen_exception(EXCP_DEBUG);
5433 gen_tb_end(tb, num_insns);
5436 j = tcg_op_buf_count();
5439 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5442 tb->size = dc.pc - pc_start;
5443 tb->icount = num_insns;
5446 #if defined(S390X_DEBUG_DISAS)
5447 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5448 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5449 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5455 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
5457 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
5460 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
5462 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
5465 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
5468 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
5469 cc_op = gen_opc_cc_op[pc_pos];
5470 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {