#include "tcg-be-ldst.h"
-#ifndef NDEBUG
+#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
#if TCG_TARGET_REG_BITS == 64
"%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
}
if (opc & P_DATA16) {
/* We should never be asking for both 16 and 64-bit operation. */
- assert((opc & P_REXW) == 0);
+ tcg_debug_assert((opc & P_REXW) == 0);
tcg_out8(s, 0x66);
}
if (opc & P_ADDR32) {
if (index < 0) {
index = 4;
} else {
- assert(index != TCG_REG_ESP);
+ tcg_debug_assert(index != TCG_REG_ESP);
}
tcg_out_opc(s, opc, r, rm, index);
static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
{
/* movzbl */
- assert(src < 4 || TCG_TARGET_REG_BITS == 64);
+ tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
}
static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
{
/* movsbl */
- assert(src < 4 || TCG_TARGET_REG_BITS == 64);
+ tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
}
First argument register is clobbered. */
static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
- int mem_index, TCGMemOp s_bits,
+ int mem_index, TCGMemOp opc,
tcg_insn_unit **label_ptr, int which)
{
const TCGReg r0 = TCG_REG_L0;
const TCGReg r1 = TCG_REG_L1;
TCGType ttype = TCG_TYPE_I32;
- TCGType htype = TCG_TYPE_I32;
- int trexw = 0, hrexw = 0;
+ TCGType tlbtype = TCG_TYPE_I32;
+ int trexw = 0, hrexw = 0, tlbrexw = 0;
+ int s_mask = (1 << (opc & MO_SIZE)) - 1;
+ bool aligned = (opc & MO_AMASK) == MO_ALIGN || s_mask == 0;
if (TCG_TARGET_REG_BITS == 64) {
if (TARGET_LONG_BITS == 64) {
trexw = P_REXW;
}
if (TCG_TYPE_PTR == TCG_TYPE_I64) {
- htype = TCG_TYPE_I64;
hrexw = P_REXW;
+ if (TARGET_PAGE_BITS + CPU_TLB_BITS > 32) {
+ tlbtype = TCG_TYPE_I64;
+ tlbrexw = P_REXW;
+ }
}
}
- tcg_out_mov(s, htype, r0, addrlo);
- tcg_out_mov(s, ttype, r1, addrlo);
+ tcg_out_mov(s, tlbtype, r0, addrlo);
+ if (aligned) {
+ tcg_out_mov(s, ttype, r1, addrlo);
+ } else {
+ /* For unaligned access check that we don't cross pages using
+ the page address of the last byte. */
+ tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask);
+ }
- tcg_out_shifti(s, SHIFT_SHR + hrexw, r0,
+ tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tgen_arithi(s, ARITH_AND + trexw, r1,
- TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
- tgen_arithi(s, ARITH_AND + hrexw, r0,
+ TARGET_PAGE_MASK | (aligned ? s_mask : 0), 0);
+ tgen_arithi(s, ARITH_AND + tlbrexw, r0,
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0,
static int guest_base_flags;
static inline void setup_guest_base_seg(void)
{
- if (arch_prctl(ARCH_SET_GS, GUEST_BASE) == 0) {
+ if (arch_prctl(ARCH_SET_GS, guest_base) == 0) {
guest_base_flags = P_GS;
}
}
TCGMemOp opc;
#if defined(CONFIG_SOFTMMU)
int mem_index;
- TCGMemOp s_bits;
tcg_insn_unit *label_ptr[2];
#endif
#if defined(CONFIG_SOFTMMU)
mem_index = get_mmuidx(oi);
- s_bits = opc & MO_SIZE;
- tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
+ tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
label_ptr, offsetof(CPUTLBEntry, addr_read));
/* TLB Hit. */
s->code_ptr, label_ptr);
#else
{
- int32_t offset = GUEST_BASE;
+ int32_t offset = guest_base;
TCGReg base = addrlo;
int index = -1;
int seg = 0;
We can do this with the ADDR32 prefix if we're not using
a guest base, or when using segmentation. Otherwise we
need to zero-extend manually. */
- if (GUEST_BASE == 0 || guest_base_flags) {
+ if (guest_base == 0 || guest_base_flags) {
seg = guest_base_flags;
offset = 0;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
tcg_out_ext32u(s, TCG_REG_L0, base);
base = TCG_REG_L0;
}
- if (offset != GUEST_BASE) {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
+ if (offset != guest_base) {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base);
index = TCG_REG_L1;
offset = 0;
}
TCGMemOp opc;
#if defined(CONFIG_SOFTMMU)
int mem_index;
- TCGMemOp s_bits;
tcg_insn_unit *label_ptr[2];
#endif
#if defined(CONFIG_SOFTMMU)
mem_index = get_mmuidx(oi);
- s_bits = opc & MO_SIZE;
- tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
+ tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
label_ptr, offsetof(CPUTLBEntry, addr_write));
/* TLB Hit. */
s->code_ptr, label_ptr);
#else
{
- int32_t offset = GUEST_BASE;
+ int32_t offset = guest_base;
TCGReg base = addrlo;
int seg = 0;
/* See comment in tcg_out_qemu_ld re zero-extension of addrlo. */
- if (GUEST_BASE == 0 || guest_base_flags) {
+ if (guest_base == 0 || guest_base_flags) {
seg = guest_base_flags;
offset = 0;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
} else if (TCG_TARGET_REG_BITS == 64) {
/* ??? Note that we can't use the same SIB addressing scheme
as for loads, since we require L0 free for bswap. */
- if (offset != GUEST_BASE) {
+ if (offset != guest_base) {
if (TARGET_LONG_BITS == 32) {
tcg_out_ext32u(s, TCG_REG_L0, base);
base = TCG_REG_L0;
}
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base);
tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
base = TCG_REG_L1;
offset = 0;
case INDEX_op_bswap64_i64:
tcg_out_bswap64(s, args[0]);
break;
+ case INDEX_op_extu_i32_i64:
case INDEX_op_ext32u_i64:
tcg_out_ext32u(s, args[0], args[1]);
break;
+ case INDEX_op_ext_i32_i64:
case INDEX_op_ext32s_i64:
tcg_out_ext32s(s, args[0], args[1]);
break;
{ INDEX_op_ext16u_i64, { "r", "r" } },
{ INDEX_op_ext32u_i64, { "r", "r" } },
+ { INDEX_op_ext_i32_i64, { "r", "r" } },
+ { INDEX_op_extu_i32_i64, { "r", "r" } },
+
{ INDEX_op_deposit_i64, { "Q", "0", "Q" } },
{ INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } },
tcg_out_opc(s, OPC_RET, 0, 0, 0);
#if !defined(CONFIG_SOFTMMU)
- /* Try to set up a segment register to point to GUEST_BASE. */
- if (GUEST_BASE) {
+ /* Try to set up a segment register to point to guest_base. */
+ if (guest_base) {
setup_guest_base_seg();
}
#endif