From f75368cd0f337bc1e533e1db7e4c501d3487a983 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Tue, 19 Nov 2019 00:10:55 -0500 Subject: [PATCH] tcg: TCGMemOp is now accelerator independent MemOp Preparation for collapsing the two byte swaps, adjust_endianness and handle_bswap, along the I/O path. Target dependant attributes are conditionalized upon NEED_CPU_H. Backports commit 14776ab5a12972ea439c7fb2203a4c15a09094b4 from qemu --- qemu/accel/tcg/cputlb.c | 2 +- qemu/include/exec/memop.h | 110 ++++++++++++++++ qemu/target/arm/translate-a64.c | 48 +++---- qemu/target/arm/translate-a64.h | 2 +- qemu/target/arm/translate-sve.c | 2 +- qemu/target/arm/translate.c | 30 ++--- qemu/target/arm/translate.h | 2 +- qemu/target/i386/translate.c | 132 +++++++++---------- qemu/target/m68k/translate.c | 2 +- qemu/target/mips/translate.c | 10 +- qemu/target/riscv/insn_trans/trans_rva.inc.c | 8 +- qemu/target/riscv/insn_trans/trans_rvi.inc.c | 4 +- qemu/target/sparc/translate.c | 14 +- qemu/tcg/README | 2 +- qemu/tcg/aarch64/tcg-target.inc.c | 26 ++-- qemu/tcg/arm/tcg-target.inc.c | 26 ++-- qemu/tcg/i386/tcg-target.inc.c | 24 ++-- qemu/tcg/mips/tcg-target.inc.c | 16 +-- qemu/tcg/optimize.c | 2 +- qemu/tcg/ppc/tcg-target.inc.c | 12 +- qemu/tcg/s390/tcg-target.inc.c | 14 +- qemu/tcg/sparc/tcg-target.inc.c | 6 +- qemu/tcg/tcg-op.c | 38 +++--- qemu/tcg/tcg-op.h | 80 +++++------ qemu/tcg/tcg.c | 2 +- qemu/tcg/tcg.h | 101 +------------- 26 files changed, 368 insertions(+), 347 deletions(-) create mode 100644 qemu/include/exec/memop.h diff --git a/qemu/accel/tcg/cputlb.c b/qemu/accel/tcg/cputlb.c index 2f4398c1..80e3d2e2 100644 --- a/qemu/accel/tcg/cputlb.c +++ b/qemu/accel/tcg/cputlb.c @@ -772,7 +772,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); target_ulong tlb_addr = tlb_addr_write(tlbe); - TCGMemOp mop = get_memop(oi); + MemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); int s_bits = mop & MO_SIZE; diff --git a/qemu/include/exec/memop.h b/qemu/include/exec/memop.h new file mode 100644 index 00000000..7262ca3d --- /dev/null +++ b/qemu/include/exec/memop.h @@ -0,0 +1,110 @@ +/* + * Constants for memory operations + * + * Authors: + * Richard Henderson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef MEMOP_H +#define MEMOP_H + +typedef enum MemOp { + MO_8 = 0, + MO_16 = 1, + MO_32 = 2, + MO_64 = 3, + MO_SIZE = 3, /* Mask for the above. */ + + MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ + + MO_BSWAP = 8, /* Host reverse endian. */ +#ifdef HOST_WORDS_BIGENDIAN + MO_LE = MO_BSWAP, + MO_BE = 0, +#else + MO_LE = 0, + MO_BE = MO_BSWAP, +#endif +#ifdef NEED_CPU_H +#ifdef TARGET_WORDS_BIGENDIAN + MO_TE = MO_BE, +#else + MO_TE = MO_LE, +#endif +#endif + + /* + * MO_UNALN accesses are never checked for alignment. + * MO_ALIGN accesses will result in a call to the CPU's + * do_unaligned_access hook if the guest address is not aligned. + * The default depends on whether the target CPU defines + * TARGET_ALIGNED_ONLY. + * + * Some architectures (e.g. ARMv8) need the address which is aligned + * to a size more than the size of the memory access. + * Some architectures (e.g. SPARCv9) need an address which is aligned, + * but less strictly than the natural alignment. + * + * MO_ALIGN supposes the alignment size is the size of a memory access. + * + * There are three options: + * - unaligned access permitted (MO_UNALN). + * - an alignment to the size of an access (MO_ALIGN); + * - an alignment to a specified size, which may be more or less than + * the access size (MO_ALIGN_x where 'x' is a size in bytes); + */ + MO_ASHIFT = 4, + MO_AMASK = 7 << MO_ASHIFT, +#ifdef NEED_CPU_H +#ifdef TARGET_ALIGNED_ONLY + MO_ALIGN = 0, + MO_UNALN = MO_AMASK, +#else + MO_ALIGN = MO_AMASK, + MO_UNALN = 0, +#endif +#endif + MO_ALIGN_2 = 1 << MO_ASHIFT, + MO_ALIGN_4 = 2 << MO_ASHIFT, + MO_ALIGN_8 = 3 << MO_ASHIFT, + MO_ALIGN_16 = 4 << MO_ASHIFT, + MO_ALIGN_32 = 5 << MO_ASHIFT, + MO_ALIGN_64 = 6 << MO_ASHIFT, + + /* Combinations of the above, for ease of use. */ + MO_UB = MO_8, + MO_UW = MO_16, + MO_UL = MO_32, + MO_SB = MO_SIGN | MO_8, + MO_SW = MO_SIGN | MO_16, + MO_SL = MO_SIGN | MO_32, + MO_Q = MO_64, + + MO_LEUW = MO_LE | MO_UW, + MO_LEUL = MO_LE | MO_UL, + MO_LESW = MO_LE | MO_SW, + MO_LESL = MO_LE | MO_SL, + MO_LEQ = MO_LE | MO_Q, + + MO_BEUW = MO_BE | MO_UW, + MO_BEUL = MO_BE | MO_UL, + MO_BESW = MO_BE | MO_SW, + MO_BESL = MO_BE | MO_SL, + MO_BEQ = MO_BE | MO_Q, + +#ifdef NEED_CPU_H + MO_TEUW = MO_TE | MO_UW, + MO_TEUL = MO_TE | MO_UL, + MO_TESW = MO_TE | MO_SW, + MO_TESL = MO_TE | MO_SL, + MO_TEQ = MO_TE | MO_Q, +#endif + + MO_SSIZE = MO_SIZE | MO_SIGN, +} MemOp; + +#endif diff --git a/qemu/target/arm/translate-a64.c b/qemu/target/arm/translate-a64.c index 296942b4..12dc19c3 100644 --- a/qemu/target/arm/translate-a64.c +++ b/qemu/target/arm/translate-a64.c @@ -76,7 +76,7 @@ typedef void NeonGenOneOpFn(TCGContext *t, TCGv_i64, TCGv_i64); typedef void CryptoTwoOpFn(TCGContext *, TCGv_ptr, TCGv_ptr); typedef void CryptoThreeOpIntFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void CryptoThreeOpFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr); -typedef void AtomicThreeOpFn(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp); +typedef void AtomicThreeOpFn(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); /* initialize TCG globals. */ void a64_translate_init(struct uc_struct *uc) @@ -589,7 +589,7 @@ TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) * Dn, Sn, Hn or Bn). * (Note that this is not the same mapping as for A32; see cpu.h) */ -static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size) +static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size) { return vec_reg_offset(s, regno, 0, size); } @@ -1038,7 +1038,7 @@ static void do_gpr_ld_memidx(DisasContext *s, bool iss_sf, bool iss_ar) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGMemOp memop = s->be_data + size; + MemOp memop = s->be_data + size; g_assert(size <= 3); @@ -1116,7 +1116,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) TCGv_i64 tmphi; if (size < 4) { - TCGMemOp memop = s->be_data + size; + MemOp memop = s->be_data + size; tmphi = tcg_const_i64(tcg_ctx, 0); tcg_gen_qemu_ld_i64(s->uc, tmplo, tcg_addr, get_mem_index(s), memop); } else { @@ -1157,7 +1157,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) /* Get value of an element within a vector register */ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, - int element, TCGMemOp memop) + int element, MemOp memop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); @@ -1190,7 +1190,7 @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, } static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, - int element, TCGMemOp memop) + int element, MemOp memop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); @@ -1218,7 +1218,7 @@ static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, /* Set value of an element within a vector register */ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, - int element, TCGMemOp memop) + int element, MemOp memop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); @@ -1241,7 +1241,7 @@ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, } static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, - int destidx, int element, TCGMemOp memop) + int destidx, int element, MemOp memop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); @@ -1262,7 +1262,7 @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, /* Store from vector register to memory */ static void do_vec_st(DisasContext *s, int srcidx, int element, - TCGv_i64 tcg_addr, int size, TCGMemOp endian) + TCGv_i64 tcg_addr, int size, MemOp endian) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); @@ -1275,7 +1275,7 @@ static void do_vec_st(DisasContext *s, int srcidx, int element, /* Load from memory to vector register */ static void do_vec_ld(DisasContext *s, int destidx, int element, - TCGv_i64 tcg_addr, int size, TCGMemOp endian) + TCGv_i64 tcg_addr, int size, MemOp endian) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); @@ -2391,7 +2391,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, { TCGContext *tcg_ctx = s->uc->tcg_ctx; int idx = get_mem_index(s); - TCGMemOp memop = s->be_data; + MemOp memop = s->be_data; g_assert(size <= 3); if (is_pair) { @@ -3483,7 +3483,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) bool is_postidx = extract32(insn, 23, 1); bool is_q = extract32(insn, 30, 1); TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; - TCGMemOp endian = s->be_data; + MemOp endian = s->be_data; int ebytes; /* bytes per element */ int elements; /* elements per vector */ @@ -5691,7 +5691,7 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn) unsigned int mos, type, rm, cond, rn, rd; TCGv_i64 t_true, t_false, t_zero; DisasCompare64 c; - TCGMemOp sz; + MemOp sz; mos = extract32(insn, 29, 3); type = extract32(insn, 22, 2); @@ -6517,7 +6517,7 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn) int mos = extract32(insn, 29, 3); uint64_t imm; TCGv_i64 tcg_res; - TCGMemOp sz; + MemOp sz; if (mos || imm5) { unallocated_encoding(s); @@ -7289,7 +7289,7 @@ static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, if (esize == size) { int element; - TCGMemOp msize = esize == 16 ? MO_16 : MO_32; + MemOp msize = esize == 16 ? MO_16 : MO_32; TCGv_i32 tcg_elem; /* We should have one register left here */ @@ -8293,7 +8293,7 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, int shift = (2 * esize) - immhb; int elements = is_scalar ? 1 : (64 / esize); bool round = extract32(opcode, 0, 1); - TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); + MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); TCGv_i64 tcg_rn, tcg_rd, tcg_round; TCGv_i32 tcg_rd_narrowed; TCGv_i64 tcg_final; @@ -8453,7 +8453,7 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, } }; NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size]; - TCGMemOp memop = scalar ? size : MO_32; + MemOp memop = scalar ? size : MO_32; int maxpass = scalar ? 1 : is_q ? 4 : 2; for (pass = 0; pass < maxpass; pass++) { @@ -8498,7 +8498,7 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, TCGv_ptr tcg_fpst = get_fpstatus_ptr(s, size == MO_16); TCGv_i32 tcg_shift = NULL; - TCGMemOp mop = size | (is_signed ? MO_SIGN : 0); + MemOp mop = size | (is_signed ? MO_SIGN : 0); int pass; if (fracbits || size == MO_64) { @@ -10291,7 +10291,7 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, int dsize = is_q ? 128 : 64; int esize = 8 << size; int elements = dsize/esize; - TCGMemOp memop = size | (is_u ? 0 : MO_SIGN); + MemOp memop = size | (is_u ? 0 : MO_SIGN); TCGv_i64 tcg_rn = new_tmp_a64(s); TCGv_i64 tcg_rd = new_tmp_a64(s); TCGv_i64 tcg_round; @@ -10637,7 +10637,7 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_passres; - TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); + MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); int elt = pass + is_q * 2; @@ -12130,7 +12130,7 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, if (size == 2) { /* 32 + 32 -> 64 op */ - TCGMemOp memop = size + (u ? 0 : MO_SIGN); + MemOp memop = size + (u ? 0 : MO_SIGN); for (pass = 0; pass < maxpass; pass++) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); @@ -13157,7 +13157,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) switch (is_fp) { case 1: /* normal fp */ - /* convert insn encoded size to TCGMemOp size */ + /* convert insn encoded size to MemOp size */ switch (size) { case 0: /* half-precision */ size = MO_16; @@ -13205,7 +13205,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) return; } - /* Given TCGMemOp size, adjust register and indexing. */ + /* Given MemOp size, adjust register and indexing. */ switch (size) { case MO_16: index = h << 2 | l << 1 | m; @@ -13502,7 +13502,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) TCGv_i64 tcg_res[2]; int pass; bool satop = extract32(opcode, 0, 1); - TCGMemOp memop = MO_32; + MemOp memop = MO_32; if (satop || !u) { memop |= MO_SIGN; diff --git a/qemu/target/arm/translate-a64.h b/qemu/target/arm/translate-a64.h index fe96d76e..a3bf2439 100644 --- a/qemu/target/arm/translate-a64.h +++ b/qemu/target/arm/translate-a64.h @@ -64,7 +64,7 @@ static inline void assert_fp_access_checked(DisasContext *s) * the FP/vector register Qn. */ static inline int vec_reg_offset(DisasContext *s, int regno, - int element, TCGMemOp size) + int element, MemOp size) { int element_size = 1 << size; int offs = element * element_size; diff --git a/qemu/target/arm/translate-sve.c b/qemu/target/arm/translate-sve.c index 7a299810..01a76f12 100644 --- a/qemu/target/arm/translate-sve.c +++ b/qemu/target/arm/translate-sve.c @@ -4713,7 +4713,7 @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a) */ /* The memory mode of the dtype. */ -static const TCGMemOp dtype_mop[16] = { +static const MemOp dtype_mop[16] = { MO_UB, MO_UB, MO_UB, MO_UB, MO_SL, MO_UW, MO_UW, MO_UW, MO_SW, MO_SW, MO_UL, MO_UL, diff --git a/qemu/target/arm/translate.c b/qemu/target/arm/translate.c index a71fdad2..cbbbcecd 100644 --- a/qemu/target/arm/translate.c +++ b/qemu/target/arm/translate.c @@ -108,7 +108,7 @@ typedef enum ISSInfo { } ISSInfo; /* Save the syndrome information for a Data Abort */ -static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo) +static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo) { uint32_t syn; int sas = memop & MO_SIZE; @@ -959,7 +959,7 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) * that the address argument is TCGv_i32 rather than TCGv. */ -static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op) +static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv addr = tcg_temp_new(tcg_ctx); @@ -973,7 +973,7 @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op) } static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, - int index, TCGMemOp opc) + int index, MemOp opc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv addr; @@ -989,7 +989,7 @@ static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, } static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, - int index, TCGMemOp opc) + int index, MemOp opc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv addr; @@ -1029,7 +1029,7 @@ static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val) } static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, - int index, TCGMemOp opc) + int index, MemOp opc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv addr = gen_aa32_addr(s, a32, opc); @@ -1045,7 +1045,7 @@ static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, } static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, - int index, TCGMemOp opc) + int index, MemOp opc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv addr = gen_aa32_addr(s, a32, opc); @@ -1224,7 +1224,7 @@ neon_reg_offset (int reg, int n) * where 0 is the least significant end of the register. */ static inline long -neon_element_offset(int reg, int element, TCGMemOp size) +neon_element_offset(int reg, int element, MemOp size) { int element_size = 1 << size; int ofs = element * element_size; @@ -1247,7 +1247,7 @@ static TCGv_i32 neon_load_reg(DisasContext *s, int reg, int pass) return tmp; } -static void neon_load_element(DisasContext *s, TCGv_i32 var, int reg, int ele, TCGMemOp mop) +static void neon_load_element(DisasContext *s, TCGv_i32 var, int reg, int ele, MemOp mop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; long offset = neon_element_offset(reg, ele, mop & MO_SIZE); @@ -1267,7 +1267,7 @@ static void neon_load_element(DisasContext *s, TCGv_i32 var, int reg, int ele, T } } -static void neon_load_element64(DisasContext *s, TCGv_i64 var, int reg, int ele, TCGMemOp mop) +static void neon_load_element64(DisasContext *s, TCGv_i64 var, int reg, int ele, MemOp mop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; long offset = neon_element_offset(reg, ele, mop & MO_SIZE); @@ -1297,7 +1297,7 @@ static void neon_store_reg(DisasContext *s, int reg, int pass, TCGv_i32 var) tcg_temp_free_i32(tcg_ctx, var); } -static void neon_store_element(DisasContext *s, int reg, int ele, TCGMemOp size, TCGv_i32 var) +static void neon_store_element(DisasContext *s, int reg, int ele, MemOp size, TCGv_i32 var) { TCGContext *tcg_ctx = s->uc->tcg_ctx; long offset = neon_element_offset(reg, ele, size); @@ -1317,7 +1317,7 @@ static void neon_store_element(DisasContext *s, int reg, int ele, TCGMemOp size, } } -static void neon_store_element64(DisasContext *s, int reg, int ele, TCGMemOp size, TCGv_i64 var) +static void neon_store_element64(DisasContext *s, int reg, int ele, MemOp size, TCGv_i64 var) { TCGContext *tcg_ctx = s->uc->tcg_ctx; long offset = neon_element_offset(reg, ele, size); @@ -3404,7 +3404,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) int n; int vec_size; int mmu_idx; - TCGMemOp endian; + MemOp endian; TCGv_i32 addr; TCGv_i32 tmp; TCGv_i32 tmp2; @@ -6725,7 +6725,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) } else if ((insn & 0x380) == 0) { /* VDUP */ int element; - TCGMemOp size; + MemOp size; if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) { return 1; @@ -7294,7 +7294,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - TCGMemOp opc = size | MO_ALIGN | s->be_data; + MemOp opc = size | MO_ALIGN | s->be_data; s->is_ldex = true; @@ -7350,7 +7350,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, TCGv taddr; TCGLabel *done_label; TCGLabel *fail_label; - TCGMemOp opc = size | MO_ALIGN | s->be_data; + MemOp opc = size | MO_ALIGN | s->be_data; /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) { [addr] = {Rt}; diff --git a/qemu/target/arm/translate.h b/qemu/target/arm/translate.h index cd1c8941..0d5902d9 100644 --- a/qemu/target/arm/translate.h +++ b/qemu/target/arm/translate.h @@ -22,7 +22,7 @@ typedef struct DisasContext { int condexec_cond; int thumb; int sctlr_b; - TCGMemOp be_data; + MemOp be_data; #if !defined(CONFIG_USER_ONLY) int user; #endif diff --git a/qemu/target/i386/translate.c b/qemu/target/i386/translate.c index 0011d411..1593c84d 100644 --- a/qemu/target/i386/translate.c +++ b/qemu/target/i386/translate.c @@ -76,8 +76,8 @@ typedef struct DisasContext { /* current insn context */ int override; /* -1 if no override */ int prefix; - TCGMemOp aflag; - TCGMemOp dflag; + MemOp aflag; + MemOp dflag; target_ulong pc_start; target_ulong pc; /* pc = eip + cs_base */ /* current block context */ @@ -143,7 +143,7 @@ static void gen_eob(DisasContext *s); static void gen_jr(DisasContext *s, TCGv dest); static void gen_jmp(DisasContext *s, target_ulong eip); static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); -static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d); +static void gen_op(DisasContext *s, int op, MemOp ot, int d); /* i386 arith/logic operations */ enum { @@ -391,7 +391,7 @@ static inline bool byte_reg_is_xH(DisasContext *s, int reg) } /* Select the size of a push/pop operation. */ -static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot) +static inline MemOp mo_pushpop(DisasContext *s, MemOp ot) { if (CODE64(s)) { return ot == MO_16 ? MO_16 : MO_64; @@ -401,13 +401,13 @@ static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot) } /* Select the size of the stack pointer. */ -static inline TCGMemOp mo_stacksize(DisasContext *s) +static inline MemOp mo_stacksize(DisasContext *s) { return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; } /* Select only size 64 else 32. Used for SSE operand sizes. */ -static inline TCGMemOp mo_64_32(TCGMemOp ot) +static inline MemOp mo_64_32(MemOp ot) { #ifdef TARGET_X86_64 return ot == MO_64 ? MO_64 : MO_32; @@ -418,19 +418,19 @@ static inline TCGMemOp mo_64_32(TCGMemOp ot) /* Select size 8 if lsb of B is clear, else OT. Used for decoding byte vs word opcodes. */ -static inline TCGMemOp mo_b_d(int b, TCGMemOp ot) +static inline MemOp mo_b_d(int b, MemOp ot) { return b & 1 ? ot : MO_8; } /* Select size 8 if lsb of B is clear, else OT capped at 32. Used for decoding operand size of port opcodes. */ -static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot) +static inline MemOp mo_b_d32(int b, MemOp ot) { return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; } -static void gen_op_mov_reg_v(DisasContext *s, TCGMemOp ot, int reg, TCGv t0) +static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -461,7 +461,7 @@ static void gen_op_mov_reg_v(DisasContext *s, TCGMemOp ot, int reg, TCGv t0) } } -static inline void gen_op_mov_v_reg(DisasContext *s, TCGMemOp ot, TCGv t0, int reg) +static inline void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -490,7 +490,7 @@ static inline void gen_op_jmp_v(DisasContext *s, TCGv dest) tcg_gen_st_tl(tcg_ctx, dest, tcg_ctx->cpu_env, offsetof(CPUX86State, eip)); } -static inline void gen_op_add_reg_im(DisasContext *s, TCGMemOp size, int reg, int32_t val) +static inline void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -499,7 +499,7 @@ static inline void gen_op_add_reg_im(DisasContext *s, TCGMemOp size, int reg, in gen_op_mov_reg_v(s, size, reg, s->tmp0); } -static inline void gen_op_add_reg_T0(DisasContext *s, TCGMemOp size, int reg) +static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -542,7 +542,7 @@ static inline void gen_jmp_im(DisasContext *s, target_ulong pc) /* Compute SEG:REG into A0. SEG is selected from the override segment (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to indicate no override. */ -static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0, +static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, int def_seg, int ovr_seg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; @@ -614,7 +614,7 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s) gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1); } -static inline void gen_op_movl_T0_Dshift(DisasContext *s, TCGMemOp ot) +static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; @@ -622,7 +622,7 @@ static inline void gen_op_movl_T0_Dshift(DisasContext *s, TCGMemOp ot) tcg_gen_shli_tl(tcg_ctx, s->T0, s->T0, ot); }; -static TCGv gen_ext_tl(DisasContext *s, TCGv dst, TCGv src, TCGMemOp size, bool sign) +static TCGv gen_ext_tl(DisasContext *s, TCGv dst, TCGv src, MemOp size, bool sign) { TCGContext *tcg_ctx = s->uc->tcg_ctx; @@ -655,17 +655,17 @@ static TCGv gen_ext_tl(DisasContext *s, TCGv dst, TCGv src, TCGMemOp size, bool } } -static void gen_extu(DisasContext *s, TCGMemOp ot, TCGv reg) +static void gen_extu(DisasContext *s, MemOp ot, TCGv reg) { gen_ext_tl(s, reg, reg, ot, false); } -static void gen_exts(DisasContext *s, TCGMemOp ot, TCGv reg) +static void gen_exts(DisasContext *s, MemOp ot, TCGv reg) { gen_ext_tl(s, reg, reg, ot, true); } -static inline void gen_op_jnz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) +static inline void gen_op_jnz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -675,7 +675,7 @@ static inline void gen_op_jnz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *labe tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, s->tmp0, 0, label1); } -static inline void gen_op_jz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) +static inline void gen_op_jz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -685,7 +685,7 @@ static inline void gen_op_jz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, s->tmp0, 0, label1); } -static void gen_helper_in_func(DisasContext *s, TCGMemOp ot, TCGv v, TCGv_i32 n) +static void gen_helper_in_func(DisasContext *s, MemOp ot, TCGv v, TCGv_i32 n) { TCGContext *tcg_ctx = s->uc->tcg_ctx; @@ -704,7 +704,7 @@ static void gen_helper_in_func(DisasContext *s, TCGMemOp ot, TCGv v, TCGv_i32 n) } } -static void gen_helper_out_func(DisasContext *s, TCGMemOp ot, TCGv_i32 v, TCGv_i32 n) +static void gen_helper_out_func(DisasContext *s, MemOp ot, TCGv_i32 v, TCGv_i32 n) { TCGContext *tcg_ctx = s->uc->tcg_ctx; @@ -723,7 +723,7 @@ static void gen_helper_out_func(DisasContext *s, TCGMemOp ot, TCGv_i32 v, TCGv_i } } -static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, +static void gen_check_io(DisasContext *s, MemOp ot, target_ulong cur_eip, uint32_t svm_flags) { target_ulong next_eip; @@ -760,7 +760,7 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, } } -static inline void gen_movs(DisasContext *s, TCGMemOp ot) +static inline void gen_movs(DisasContext *s, MemOp ot) { gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); @@ -993,7 +993,7 @@ static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg) return ccprepare_make(TCG_COND_NEVER, 0, 0, 0, -1, false, false); default: { - TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; + MemOp size = (s->cc_op - CC_OP_ADDB) & 3; TCGv t0 = gen_ext_tl(s, reg, cpu_cc_dst, size, true); return ccprepare_make(TCG_COND_LT, t0, 0, 0, -1, false, false); } @@ -1041,7 +1041,7 @@ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) return ccprepare_make(TCG_COND_ALWAYS, 0, 0, 0, -1, false, false); default: { - TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; + MemOp size = (s->cc_op - CC_OP_ADDB) & 3; TCGv t0 = gen_ext_tl(s, reg, cpu_cc_dst, size, false); return ccprepare_make(TCG_COND_EQ, t0, 0, 0, -1, false, false); } @@ -1053,7 +1053,7 @@ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) { int inv, jcc_op, cond; - TCGMemOp size; + MemOp size; CCPrepare cc; TCGv t0; TCGContext *tcg_ctx = s->uc->tcg_ctx; @@ -1233,7 +1233,7 @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) return l2; } -static inline void gen_stos(DisasContext *s, TCGMemOp ot) +static inline void gen_stos(DisasContext *s, MemOp ot) { gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); gen_string_movl_A0_EDI(s); @@ -1242,7 +1242,7 @@ static inline void gen_stos(DisasContext *s, TCGMemOp ot) gen_op_add_reg_T0(s, s->aflag, R_EDI); } -static inline void gen_lods(DisasContext *s, TCGMemOp ot) +static inline void gen_lods(DisasContext *s, MemOp ot) { gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); @@ -1251,7 +1251,7 @@ static inline void gen_lods(DisasContext *s, TCGMemOp ot) gen_op_add_reg_T0(s, s->aflag, R_ESI); } -static inline void gen_scas(DisasContext *s, TCGMemOp ot) +static inline void gen_scas(DisasContext *s, MemOp ot) { gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); @@ -1260,7 +1260,7 @@ static inline void gen_scas(DisasContext *s, TCGMemOp ot) gen_op_add_reg_T0(s, s->aflag, R_EDI); } -static inline void gen_cmps(DisasContext *s, TCGMemOp ot) +static inline void gen_cmps(DisasContext *s, MemOp ot) { gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); @@ -1285,7 +1285,7 @@ static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) } } -static inline void gen_ins(DisasContext *s, TCGMemOp ot) +static inline void gen_ins(DisasContext *s, MemOp ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -1304,7 +1304,7 @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot) gen_bpt_io(s, s->tmp2_i32, ot); } -static inline void gen_outs(DisasContext *s, TCGMemOp ot) +static inline void gen_outs(DisasContext *s, MemOp ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -1325,7 +1325,7 @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot) /* same method as Valgrind : we generate jumps to current or next instruction */ #define GEN_REPZ(op) \ -static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ +static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ target_ulong cur_eip, target_ulong next_eip) \ { \ TCGLabel *l2; \ @@ -1341,7 +1341,7 @@ static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ } #define GEN_REPZ2(op) \ -static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ +static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ target_ulong cur_eip, \ target_ulong next_eip, \ int nz) \ @@ -1444,7 +1444,7 @@ static void gen_illegal_opcode(DisasContext *s) } /* if d == OR_TMP0, it means memory operand (address in A0) */ -static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) +static void gen_op(DisasContext *s, int op, MemOp ot, int d) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; @@ -1557,7 +1557,7 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) } /* if d == OR_TMP0, it means memory operand (address in A0) */ -static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c) +static void gen_inc(DisasContext *s1, MemOp ot, int d, int c) { TCGContext *tcg_ctx = s1->uc->tcg_ctx; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; @@ -1587,7 +1587,7 @@ static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c) set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot); } -static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, +static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, TCGv shm1, TCGv count, bool is_right) { TCGv_i32 z32, s32, oldop; @@ -1636,7 +1636,7 @@ static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, set_cc_op(s, CC_OP_DYNAMIC); } -static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, +static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right, int is_arith) { target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); @@ -1673,7 +1673,7 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right); } -static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, +static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2, int is_right, int is_arith) { int mask = (ot == MO_64 ? 0x3f : 0x1f); @@ -1716,7 +1716,7 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, } } -static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) +static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) { target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); TCGv_i32 t0, t1; @@ -1805,7 +1805,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) set_cc_op(s, CC_OP_DYNAMIC); } -static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, +static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2, int is_right) { int mask = (ot == MO_64 ? 0x3f : 0x1f); @@ -1886,7 +1886,7 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, } /* XXX: add faster immediate = 1 case */ -static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, +static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) { TCGContext *tcg_ctx = s->uc->tcg_ctx; @@ -1944,7 +1944,7 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, } /* XXX: add faster immediate case */ -static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, +static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1, bool is_right, TCGv count_in) { target_ulong mask = (ot == MO_64 ? 63 : 31); @@ -2026,7 +2026,7 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, tcg_temp_free(tcg_ctx, count); } -static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) +static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s) { if (s != OR_TMP1) gen_op_mov_v_reg(s1, ot, s1->T1, s); @@ -2056,7 +2056,7 @@ static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) } } -static void gen_shifti(DisasContext *s, int op, TCGMemOp ot, int d, int c) +static void gen_shifti(DisasContext *s, int op, MemOp ot, int d, int c) { TCGContext *tcg_ctx = s->uc->tcg_ctx; @@ -2345,7 +2345,7 @@ static void gen_add_A0_ds_seg(DisasContext *s) /* generate modrm memory load or store of 'reg'. TMP0 is used if reg == OR_TMP0 */ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, - TCGMemOp ot, int reg, int is_store) + MemOp ot, int reg, int is_store) { int mod, rm; @@ -2375,7 +2375,7 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, } } -static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot) +static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot) { uint32_t ret; @@ -2398,7 +2398,7 @@ static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot) return ret; } -static inline int insn_const_size(TCGMemOp ot) +static inline int insn_const_size(MemOp ot) { if (ot <= MO_32) { return 1 << ot; @@ -2463,7 +2463,7 @@ static inline void gen_jcc(DisasContext *s, int b, } } -static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b, +static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, int modrm, int reg) { CCPrepare cc; @@ -2571,8 +2571,8 @@ static inline void gen_stack_update(DisasContext *s, int addend) /* Generate a push. It depends on ss32, addseg and dflag. */ static void gen_push_v(DisasContext *s, TCGv val) { - TCGMemOp d_ot = mo_pushpop(s, s->dflag); - TCGMemOp a_ot = mo_stacksize(s); + MemOp d_ot = mo_pushpop(s, s->dflag); + MemOp a_ot = mo_stacksize(s); int size = 1 << d_ot; TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv new_esp = s->A0; @@ -2593,9 +2593,9 @@ static void gen_push_v(DisasContext *s, TCGv val) } /* two step pop is necessary for precise exceptions */ -static TCGMemOp gen_pop_T0(DisasContext *s) +static MemOp gen_pop_T0(DisasContext *s) { - TCGMemOp d_ot = mo_pushpop(s, s->dflag); + MemOp d_ot = mo_pushpop(s, s->dflag); TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -2605,7 +2605,7 @@ static TCGMemOp gen_pop_T0(DisasContext *s) return d_ot; } -static inline void gen_pop_update(DisasContext *s, TCGMemOp ot) +static inline void gen_pop_update(DisasContext *s, MemOp ot) { gen_stack_update(s, 1 << ot); } @@ -2620,8 +2620,8 @@ static inline void gen_stack_A0(DisasContext *s) static void gen_pusha(DisasContext *s) { - TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16; - TCGMemOp d_ot = s->dflag; + MemOp s_ot = s->ss32 ? MO_32 : MO_16; + MemOp d_ot = s->dflag; int size = 1 << d_ot; int i; TCGContext *tcg_ctx = s->uc->tcg_ctx; @@ -2638,8 +2638,8 @@ static void gen_pusha(DisasContext *s) static void gen_popa(DisasContext *s) { - TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16; - TCGMemOp d_ot = s->dflag; + MemOp s_ot = s->ss32 ? MO_32 : MO_16; + MemOp d_ot = s->dflag; int size = 1 << d_ot; int i; TCGContext *tcg_ctx = s->uc->tcg_ctx; @@ -2661,8 +2661,8 @@ static void gen_popa(DisasContext *s) static void gen_enter(DisasContext *s, int esp_addend, int level) { - TCGMemOp d_ot = mo_pushpop(s, s->dflag); - TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; + MemOp d_ot = mo_pushpop(s, s->dflag); + MemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; int size = 1 << d_ot; TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -2703,8 +2703,8 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) static void gen_leave(DisasContext *s) { - TCGMemOp d_ot = mo_pushpop(s, s->dflag); - TCGMemOp a_ot = mo_stacksize(s); + MemOp d_ot = mo_pushpop(s, s->dflag); + MemOp a_ot = mo_stacksize(s); TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -3488,7 +3488,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, SSEFunc_0_eppi sse_fn_eppi; SSEFunc_0_ppi sse_fn_ppi; SSEFunc_0_eppt sse_fn_eppt; - TCGMemOp ot; + MemOp ot; TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr cpu_env = tcg_ctx->cpu_env; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; @@ -4942,7 +4942,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) CPUX86State *env = cpu->env_ptr; int b, prefixes; int shift; - TCGMemOp ot, aflag, dflag; + MemOp ot, aflag, dflag; int modrm, reg, rm, mod, op, opreg, val; target_ulong next_eip, tval; int rex_w, rex_r; @@ -6100,8 +6100,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x1be: /* movsbS Gv, Eb */ case 0x1bf: /* movswS Gv, Eb */ { - TCGMemOp d_ot; - TCGMemOp s_ot; + MemOp d_ot; + MemOp s_ot; /* d_ot is the size of destination */ d_ot = dflag; diff --git a/qemu/target/m68k/translate.c b/qemu/target/m68k/translate.c index c08fb5c7..f4a515ec 100644 --- a/qemu/target/m68k/translate.c +++ b/qemu/target/m68k/translate.c @@ -2486,7 +2486,7 @@ DISAS_INSN(cas) uint16_t ext; TCGv load; TCGv cmp; - TCGMemOp opc; + MemOp opc; switch ((insn >> 9) & 3) { case 1: diff --git a/qemu/target/mips/translate.c b/qemu/target/mips/translate.c index c084d791..9b3b61d9 100644 --- a/qemu/target/mips/translate.c +++ b/qemu/target/mips/translate.c @@ -2495,7 +2495,7 @@ typedef struct DisasContext { int32_t CP0_Config5; /* Routine used to access memory */ int mem_idx; - TCGMemOp default_tcg_memop_mask; + MemOp default_tcg_memop_mask; uint32_t hflags, saved_hflags; target_ulong btarget; bool ulri; @@ -3717,7 +3717,7 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt, /* Store conditional */ static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, - TCGMemOp tcg_mo, bool eva) + MemOp tcg_mo, bool eva) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv addr, t0, val; @@ -4592,7 +4592,7 @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) } static inline void gen_r6_ld(DisasContext *ctx, target_long addr, int reg, int memidx, - TCGMemOp memop) + MemOp memop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_const_tl(tcg_ctx, addr); @@ -21992,8 +21992,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) extract32(ctx->opcode, 0, 8); TCGv va = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); - TCGMemOp memop = (extract32(ctx->opcode, 8, 3)) == - NM_P_LS_UAWM ? MO_UNALN : 0; + MemOp memop = (extract32(ctx->opcode, 8, 3)) == + NM_P_LS_UAWM ? MO_UNALN : 0; count = (count == 0) ? 8 : count; while (counter != count) { diff --git a/qemu/target/riscv/insn_trans/trans_rva.inc.c b/qemu/target/riscv/insn_trans/trans_rva.inc.c index eaed16a1..8d218fbe 100644 --- a/qemu/target/riscv/insn_trans/trans_rva.inc.c +++ b/qemu/target/riscv/insn_trans/trans_rva.inc.c @@ -18,7 +18,7 @@ * this program. If not, see . */ -static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) +static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv src1 = tcg_temp_new(tcg_ctx); @@ -38,7 +38,7 @@ static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) return true; } -static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) +static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv src1 = tcg_temp_new(tcg_ctx); @@ -84,8 +84,8 @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) } static bool gen_amo(DisasContext *ctx, arg_atomic *a, - void(*func)(TCGContext *, TCGv, TCGv, TCGv, TCGArg, TCGMemOp), - TCGMemOp mop) + void(*func)(TCGContext *, TCGv, TCGv, TCGv, TCGArg, MemOp), + MemOp mop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv src1 = tcg_temp_new(tcg_ctx); diff --git a/qemu/target/riscv/insn_trans/trans_rvi.inc.c b/qemu/target/riscv/insn_trans/trans_rvi.inc.c index 26491142..31e9ab35 100644 --- a/qemu/target/riscv/insn_trans/trans_rvi.inc.c +++ b/qemu/target/riscv/insn_trans/trans_rvi.inc.c @@ -140,7 +140,7 @@ static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a) return gen_branch(ctx, a, TCG_COND_GEU); } -static bool gen_load(DisasContext *ctx, arg_lb *a, TCGMemOp memop) +static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; @@ -181,7 +181,7 @@ static bool trans_lhu(DisasContext *ctx, arg_lhu *a) return gen_load(ctx, a, MO_TEUW); } -static bool gen_store(DisasContext *ctx, arg_sb *a, TCGMemOp memop) +static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; diff --git a/qemu/target/sparc/translate.c b/qemu/target/sparc/translate.c index 0a4f2242..eeaa74a2 100644 --- a/qemu/target/sparc/translate.c +++ b/qemu/target/sparc/translate.c @@ -2165,7 +2165,7 @@ static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, } static void gen_swap(DisasContext *dc, TCGv dst, TCGv src, - TCGv addr, int mmu_idx, TCGMemOp memop) + TCGv addr, int mmu_idx, MemOp memop) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; gen_address_mask(dc, addr); @@ -2199,10 +2199,10 @@ typedef struct { ASIType type; int asi; int mem_idx; - TCGMemOp memop; + MemOp memop; } DisasASI; -static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop) +static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) { int asi = GET_FIELD(insn, 19, 26); ASIType type = GET_ASI_HELPER; @@ -2421,7 +2421,7 @@ static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop) } static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, - int insn, TCGMemOp memop) + int insn, MemOp memop) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasASI da = get_asi(dc, insn, memop); @@ -2460,7 +2460,7 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, } static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, - int insn, TCGMemOp memop) + int insn, MemOp memop) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasASI da = get_asi(dc, insn, memop); @@ -2670,7 +2670,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, case GET_ASI_BLOCK: /* Valid for lddfa on aligned registers only. */ if (size == 8 && (rd & 7) == 0) { - TCGMemOp memop; + MemOp memop; TCGv eight; int i; @@ -2785,7 +2785,7 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, case GET_ASI_BLOCK: /* Valid for stdfa on aligned registers only. */ if (size == 8 && (rd & 7) == 0) { - TCGMemOp memop; + MemOp memop; TCGv eight; int i; diff --git a/qemu/tcg/README b/qemu/tcg/README index 1c4138fa..45ddde05 100644 --- a/qemu/tcg/README +++ b/qemu/tcg/README @@ -507,7 +507,7 @@ Both t0 and t1 may be split into little-endian ordered pairs of registers if dealing with 64-bit quantities on a 32-bit host. The memidx selects the qemu tlb index to use (e.g. user or kernel access). -The flags are the TCGMemOp bits, selecting the sign, width, and endianness +The flags are the MemOp bits, selecting the sign, width, and endianness of the memory access. For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a diff --git a/qemu/tcg/aarch64/tcg-target.inc.c b/qemu/tcg/aarch64/tcg-target.inc.c index f7c7c1d7..9f747416 100644 --- a/qemu/tcg/aarch64/tcg-target.inc.c +++ b/qemu/tcg/aarch64/tcg-target.inc.c @@ -1420,7 +1420,7 @@ static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn) tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn); } -static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, +static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits, TCGReg rd, TCGReg rn) { /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */ @@ -1428,7 +1428,7 @@ static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, tcg_out_sbfm(s, ext, rd, rn, 0, bits); } -static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits, +static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits, TCGReg rd, TCGReg rn) { /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */ @@ -1577,8 +1577,8 @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target) static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp size = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { return false; @@ -1602,8 +1602,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp size = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { return false; @@ -1638,7 +1638,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, slow path for the failure case, which will be patched later when finalizing the slow path. Generated code returns the host addend in X1, clobbers X0,X2,X3,TMP. */ -static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc, +static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, tcg_insn_unit **label_ptr, int mem_index, bool is_read) { @@ -1711,11 +1711,11 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc, #endif /* CONFIG_SOFTMMU */ -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, TCGReg data_r, TCGReg addr_r, TCGType otype, TCGReg off_r) { - const TCGMemOp bswap = memop & MO_BSWAP; + const MemOp bswap = memop & MO_BSWAP; switch (memop & MO_SSIZE) { case MO_UB: @@ -1767,11 +1767,11 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, } } -static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, TCGReg data_r, TCGReg addr_r, TCGType otype, TCGReg off_r) { - const TCGMemOp bswap = memop & MO_BSWAP; + const MemOp bswap = memop & MO_BSWAP; switch (memop & MO_SIZE) { case MO_8: @@ -1806,7 +1806,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi, TCGType ext) { - TCGMemOp memop = get_memop(oi); + MemOp memop = get_memop(oi); const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); @@ -1831,7 +1831,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi) { - TCGMemOp memop = get_memop(oi); + MemOp memop = get_memop(oi); const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); diff --git a/qemu/tcg/arm/tcg-target.inc.c b/qemu/tcg/arm/tcg-target.inc.c index 9c149f9d..ac1f7fb8 100644 --- a/qemu/tcg/arm/tcg-target.inc.c +++ b/qemu/tcg/arm/tcg-target.inc.c @@ -1221,7 +1221,7 @@ QEMU_BUILD_BUG_ON(CPU_TLB_BITS > 8); containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, - TCGMemOp opc, int mem_index, bool is_load) + MemOp opc, int mem_index, bool is_load) { TCGReg base = TCG_AREG0; int cmp_off = @@ -1362,7 +1362,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); void *func; if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { @@ -1426,7 +1426,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { return false; @@ -1467,11 +1467,11 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } #endif /* SOFTMMU */ -static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, +static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addend) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SSIZE) { case MO_UB: @@ -1528,11 +1528,11 @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, } } -static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, +static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SSIZE) { case MO_UB: @@ -1591,7 +1591,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; TCGReg addend; @@ -1628,11 +1628,11 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) #endif } -static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, +static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addend) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SIZE) { case MO_8: @@ -1673,11 +1673,11 @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, } } -static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, +static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SIZE) { case MO_8: @@ -1722,7 +1722,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; TCGReg addend; diff --git a/qemu/tcg/i386/tcg-target.inc.c b/qemu/tcg/i386/tcg-target.inc.c index 57da3277..8e5311ac 100644 --- a/qemu/tcg/i386/tcg-target.inc.c +++ b/qemu/tcg/i386/tcg-target.inc.c @@ -1683,7 +1683,7 @@ static void * const qemu_st_helpers[16] = { First argument register is clobbered. */ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, - int mem_index, TCGMemOp opc, + int mem_index, MemOp opc, tcg_insn_unit **label_ptr, int which) { const TCGReg r0 = TCG_REG_L0; @@ -1798,7 +1798,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); TCGReg data_reg; tcg_insn_unit **label_ptr = &l->label_ptr[0]; int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0); @@ -1883,8 +1883,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp s_bits = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; tcg_insn_unit **label_ptr = &l->label_ptr[0]; TCGReg retaddr; @@ -1963,10 +1963,10 @@ static inline void setup_guest_base_seg(TCGContext *s) { } static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, TCGReg base, int index, intptr_t ofs, - int seg, bool is64, TCGMemOp memop) + int seg, bool is64, MemOp memop) { - const TCGMemOp real_bswap = memop & MO_BSWAP; - TCGMemOp bswap = real_bswap; + const MemOp real_bswap = memop & MO_BSWAP; + MemOp bswap = real_bswap; int rexw = is64 * P_REXW; int movop = OPC_MOVL_GvEv; @@ -2071,7 +2071,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) TCGReg datalo, datahi, addrlo; TCGReg addrhi QEMU_UNUSED_VAR; TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) int mem_index; tcg_insn_unit *label_ptr[2]; @@ -2123,15 +2123,15 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, TCGReg base, int index, intptr_t ofs, - int seg, TCGMemOp memop) + int seg, MemOp memop) { /* ??? Ideally we wouldn't need a scratch register. For user-only, we could perform the bswap twice to restore the original value instead of moving to the scratch. But as it is, the L constraint means that TCG_REG_L0 is definitely free here. */ const TCGReg scratch = TCG_REG_L0; - const TCGMemOp real_bswap = memop & MO_BSWAP; - TCGMemOp bswap = real_bswap; + const MemOp real_bswap = memop & MO_BSWAP; + MemOp bswap = real_bswap; int movop = OPC_MOVL_EvGv; if (s->have_movbe && real_bswap) { @@ -2207,7 +2207,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) TCGReg datalo, datahi, addrlo; TCGReg addrhi QEMU_UNUSED_VAR; TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) int mem_index; tcg_insn_unit *label_ptr[2]; diff --git a/qemu/tcg/mips/tcg-target.inc.c b/qemu/tcg/mips/tcg-target.inc.c index 8967f222..6b6c092f 100644 --- a/qemu/tcg/mips/tcg-target.inc.c +++ b/qemu/tcg/mips/tcg-target.inc.c @@ -1164,7 +1164,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, TCGReg addrh, TCGMemOpIdx oi, tcg_insn_unit *label_ptr[2], bool is_load) { - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); target_ulong mask; @@ -1266,7 +1266,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); TCGReg v0; int i; @@ -1315,8 +1315,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp s_bits = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; int i; /* resolve label address */ @@ -1365,7 +1365,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) #endif static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, TCGMemOp opc, bool is_64) + TCGReg base, MemOp opc, bool is_64) { switch (opc & (MO_SSIZE | MO_BSWAP)) { case MO_UB: @@ -1472,7 +1472,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) TCGReg addr_regl, addr_regh QEMU_UNUSED_VAR; TCGReg data_regl, data_regh; TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[2]; #endif @@ -1509,7 +1509,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) } static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, TCGMemOp opc) + TCGReg base, MemOp opc) { /* Don't clutter the code below with checks to avoid bswapping ZERO. */ if ((lo | hi) == 0) { @@ -1624,7 +1624,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) TCGReg addr_regl, addr_regh QEMU_UNUSED_VAR; TCGReg data_regl, data_regh; TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[2]; #endif diff --git a/qemu/tcg/optimize.c b/qemu/tcg/optimize.c index 8ae4922f..f4734424 100644 --- a/qemu/tcg/optimize.c +++ b/qemu/tcg/optimize.c @@ -1014,7 +1014,7 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(qemu_ld): { TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs]; - TCGMemOp mop = get_memop(oi); + MemOp mop = get_memop(oi); if (!(mop & MO_SIGN)) { mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; } diff --git a/qemu/tcg/ppc/tcg-target.inc.c b/qemu/tcg/ppc/tcg-target.inc.c index 6e203670..411f8006 100644 --- a/qemu/tcg/ppc/tcg-target.inc.c +++ b/qemu/tcg/ppc/tcg-target.inc.c @@ -1500,7 +1500,7 @@ static void * const qemu_st_helpers[16] = { in CR7, loads the addend of the TLB into R3, and returns the register containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ -static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc, +static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, TCGReg addrlo, TCGReg addrhi, int mem_index, bool is_read) { @@ -1638,7 +1638,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); TCGReg hi, lo, arg = TCG_REG_R3; if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { @@ -1685,8 +1685,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp s_bits = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; TCGReg hi, lo, arg = TCG_REG_R3; if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { @@ -1749,7 +1749,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc, s_bits; + MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; @@ -1824,7 +1824,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc, s_bits; + MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; diff --git a/qemu/tcg/s390/tcg-target.inc.c b/qemu/tcg/s390/tcg-target.inc.c index 16cc8125..672ae02f 100644 --- a/qemu/tcg/s390/tcg-target.inc.c +++ b/qemu/tcg/s390/tcg-target.inc.c @@ -1435,7 +1435,7 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) } } -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, TCGReg base, TCGReg index, int disp) { switch (opc & (MO_SSIZE | MO_BSWAP)) { @@ -1494,7 +1494,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, } } -static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data, +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, TCGReg base, TCGReg index, int disp) { switch (opc & (MO_SIZE | MO_BSWAP)) { @@ -1551,7 +1551,7 @@ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1]) /* Load and compare a TLB entry, leaving the flags set. Loads the TLB addend into R2. Returns a register with the santitized guest address. */ -static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc, +static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, MemOp opc, int mem_index, bool is_ld) { int s_mask = (1 << (opc & MO_SIZE)) - 1; @@ -1628,7 +1628,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, 2)) { @@ -1653,7 +1653,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, 2)) { @@ -1708,7 +1708,7 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi) { - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; @@ -1735,7 +1735,7 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi) { - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; diff --git a/qemu/tcg/sparc/tcg-target.inc.c b/qemu/tcg/sparc/tcg-target.inc.c index 04919676..d6568c63 100644 --- a/qemu/tcg/sparc/tcg-target.inc.c +++ b/qemu/tcg/sparc/tcg-target.inc.c @@ -1079,7 +1079,7 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count) is in the returned register, maybe %o0. The TLB addend is in %o1. */ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, - TCGMemOp opc, int which) + MemOp opc, int which) { const TCGReg r0 = TCG_REG_O0; const TCGReg r1 = TCG_REG_O1; @@ -1169,7 +1169,7 @@ static const int qemu_st_opc[16] = { static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOpIdx oi, bool is_64) { - TCGMemOp memop = get_memop(oi); + MemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); TCGReg addrz, param; @@ -1251,7 +1251,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOpIdx oi) { - TCGMemOp memop = get_memop(oi); + MemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); TCGReg addrz, param; diff --git a/qemu/tcg/tcg-op.c b/qemu/tcg/tcg-op.c index 70b6206e..f8789958 100644 --- a/qemu/tcg/tcg-op.c +++ b/qemu/tcg/tcg-op.c @@ -2735,7 +2735,7 @@ void tcg_gen_lookup_and_goto_ptr(TCGContext *s) } } -static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) +static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) { /* Trigger the asserts within as early as possible. */ (void)get_alignment_bits(op); @@ -2764,7 +2764,7 @@ static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) } static void gen_ldst_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 val, TCGv addr, - TCGMemOp memop, TCGArg idx) + MemOp memop, TCGArg idx) { TCGMemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 @@ -2779,7 +2779,7 @@ static void gen_ldst_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 val, TCGv addr, } static void gen_ldst_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 val, TCGv addr, - TCGMemOp memop, TCGArg idx) + MemOp memop, TCGArg idx) { TCGMemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 @@ -2824,9 +2824,9 @@ static void tcg_gen_req_mo(TCGContext *s, TCGBar type) } } -void tcg_gen_qemu_ld_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) +void tcg_gen_qemu_ld_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { - TCGMemOp orig_memop; + MemOp orig_memop; TCGContext *tcg_ctx = uc->tcg_ctx; tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_LD | TCG_MO_ST_LD); @@ -2862,7 +2862,7 @@ void tcg_gen_qemu_ld_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg i check_exit_request(tcg_ctx); } -void tcg_gen_qemu_st_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) +void tcg_gen_qemu_st_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i32 swap = NULL; TCGContext *tcg_ctx = uc->tcg_ctx; @@ -2897,10 +2897,10 @@ void tcg_gen_qemu_st_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg i check_exit_request(tcg_ctx); } -void tcg_gen_qemu_ld_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) +void tcg_gen_qemu_ld_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { TCGContext *tcg_ctx = uc->tcg_ctx; - TCGMemOp orig_memop; + MemOp orig_memop; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32(uc, TCGV_LOW(tcg_ctx, val), addr, idx, memop); @@ -2953,7 +2953,7 @@ void tcg_gen_qemu_ld_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg i check_exit_request(tcg_ctx); } -void tcg_gen_qemu_st_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) +void tcg_gen_qemu_st_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { TCGContext *tcg_ctx = uc->tcg_ctx; TCGv_i64 swap = NULL; @@ -2997,7 +2997,7 @@ void tcg_gen_qemu_st_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg i check_exit_request(tcg_ctx); } -static void tcg_gen_ext_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc) +static void tcg_gen_ext_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 val, MemOp opc) { switch (opc & MO_SSIZE) { case MO_SB: @@ -3018,7 +3018,7 @@ static void tcg_gen_ext_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 val, TCGMemOp } } -static void tcg_gen_ext_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 val, TCGMemOp opc) +static void tcg_gen_ext_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 val, MemOp opc) { switch (opc & MO_SSIZE) { case MO_SB: @@ -3083,7 +3083,7 @@ static void * const table_cmpxchg[16] = { void tcg_gen_atomic_cmpxchg_i32(TCGContext *s, TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, - TCGv_i32 newv, TCGArg idx, TCGMemOp memop) + TCGv_i32 newv, TCGArg idx, MemOp memop) { memop = tcg_canonicalize_memop(memop, 0, 0); @@ -3128,7 +3128,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGContext *s, void tcg_gen_atomic_cmpxchg_i64(TCGContext *s, TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, - TCGv_i64 newv, TCGArg idx, TCGMemOp memop) + TCGv_i64 newv, TCGArg idx, MemOp memop) { memop = tcg_canonicalize_memop(memop, 1, 0); @@ -3193,7 +3193,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGContext *s, static void do_nonatomic_op_i32(TCGContext *s, TCGv_i32 ret, TCGv addr, TCGv_i32 val, - TCGArg idx, TCGMemOp memop, bool new_val, + TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32)) { TCGv_i32 t1 = tcg_temp_new_i32(s); @@ -3212,7 +3212,7 @@ static void do_nonatomic_op_i32(TCGContext *s, static void do_atomic_op_i32(TCGContext *s, TCGv_i32 ret, TCGv addr, TCGv_i32 val, - TCGArg idx, TCGMemOp memop, void * const table[]) + TCGArg idx, MemOp memop, void * const table[]) { gen_atomic_op_i32 gen; @@ -3238,7 +3238,7 @@ static void do_atomic_op_i32(TCGContext *s, static void do_nonatomic_op_i64(TCGContext *s, TCGv_i64 ret, TCGv addr, TCGv_i64 val, - TCGArg idx, TCGMemOp memop, bool new_val, + TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64)) { TCGv_i64 t1 = tcg_temp_new_i64(s); @@ -3257,7 +3257,7 @@ static void do_nonatomic_op_i64(TCGContext *s, static void do_atomic_op_i64(TCGContext *s, TCGv_i64 ret, TCGv addr, TCGv_i64 val, - TCGArg idx, TCGMemOp memop, void * const table[]) + TCGArg idx, MemOp memop, void * const table[]) { memop = tcg_canonicalize_memop(memop, 1, 0); @@ -3311,7 +3311,7 @@ static void * const table_##NAME[16] = { \ WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ }; \ void tcg_gen_atomic_##NAME##_i32 \ - (TCGContext *s, TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \ + (TCGContext *s, TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ { \ if (s->tb_cflags & CF_PARALLEL) { \ do_atomic_op_i32(s, ret, addr, val, idx, memop, table_##NAME); \ @@ -3321,7 +3321,7 @@ void tcg_gen_atomic_##NAME##_i32 \ } \ } \ void tcg_gen_atomic_##NAME##_i64 \ - (TCGContext *s, TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \ + (TCGContext *s, TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ { \ if (s->tb_cflags & CF_PARALLEL) { \ do_atomic_op_i64(s, ret, addr, val, idx, memop, table_##NAME); \ diff --git a/qemu/tcg/tcg-op.h b/qemu/tcg/tcg-op.h index da7137a2..15c0d4af 100644 --- a/qemu/tcg/tcg-op.h +++ b/qemu/tcg/tcg-op.h @@ -861,10 +861,10 @@ void tcg_gen_lookup_and_goto_ptr(TCGContext *s); #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 #endif -void tcg_gen_qemu_ld_i32(struct uc_struct *uc, TCGv_i32, TCGv, TCGArg, TCGMemOp); -void tcg_gen_qemu_st_i32(struct uc_struct *uc, TCGv_i32, TCGv, TCGArg, TCGMemOp); -void tcg_gen_qemu_ld_i64(struct uc_struct *uc, TCGv_i64, TCGv, TCGArg, TCGMemOp); -void tcg_gen_qemu_st_i64(struct uc_struct *uc, TCGv_i64, TCGv, TCGArg, TCGMemOp); +void tcg_gen_qemu_ld_i32(struct uc_struct *uc, TCGv_i32, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_st_i32(struct uc_struct *uc, TCGv_i32, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_ld_i64(struct uc_struct *uc, TCGv_i64, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_st_i64(struct uc_struct *uc, TCGv_i64, TCGv, TCGArg, MemOp); static inline void tcg_gen_qemu_ld8u(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) { @@ -924,46 +924,46 @@ static inline void tcg_gen_qemu_st64(struct uc_struct *uc, TCGv_i64 arg, TCGv ad void check_exit_request(TCGContext *tcg_ctx); void tcg_gen_atomic_cmpxchg_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGv_i32, - TCGArg, TCGMemOp); + TCGArg, MemOp); void tcg_gen_atomic_cmpxchg_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGv_i64, - TCGArg, TCGMemOp); + TCGArg, MemOp); -void tcg_gen_atomic_xchg_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_xchg_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); +void tcg_gen_atomic_xchg_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_xchg_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_add_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_add_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_and_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_and_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_or_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_or_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_xor_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_xor_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_smin_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_smin_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_umin_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_umin_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_smax_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_smax_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_umax_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_umax_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); +void tcg_gen_atomic_fetch_add_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_add_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_and_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_and_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_or_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_or_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_xor_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_xor_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smin_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smin_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umin_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umin_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smax_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smax_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umax_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umax_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_add_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_add_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_and_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_and_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_or_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_or_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_xor_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_xor_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_smin_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_smin_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_umin_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_umin_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_smax_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_smax_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_umax_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_umax_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); +void tcg_gen_atomic_add_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_add_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_and_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_and_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_or_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_or_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_xor_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_xor_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_smin_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_smin_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_umin_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_umin_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_smax_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_smax_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_umax_fetch_i32(TCGContext *, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_umax_fetch_i64(TCGContext *, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_mov_vec(TCGContext *, TCGv_vec, TCGv_vec); void tcg_gen_dup_i32_vec(TCGContext *, unsigned vece, TCGv_vec, TCGv_i32); diff --git a/qemu/tcg/tcg.c b/qemu/tcg/tcg.c index d66eafff..135e66d4 100644 --- a/qemu/tcg/tcg.c +++ b/qemu/tcg/tcg.c @@ -1487,7 +1487,7 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) case INDEX_op_qemu_st_i64: { TCGMemOpIdx oi = op->args[k++]; - TCGMemOp op = get_memop(oi); + MemOp op = get_memop(oi); unsigned ix = get_mmuidx(oi); if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { diff --git a/qemu/tcg/tcg.h b/qemu/tcg/tcg.h index ece329de..b4cf228d 100644 --- a/qemu/tcg/tcg.h +++ b/qemu/tcg/tcg.h @@ -27,6 +27,7 @@ #include "qemu-common.h" #include "cpu.h" +#include "exec/memop.h" #include "exec/tb-context.h" #include "qemu/bitops.h" #include "qemu/queue.h" @@ -317,103 +318,13 @@ typedef enum TCGType { #endif } TCGType; -/* Constants for qemu_ld and qemu_st for the Memory Operation field. */ -typedef enum TCGMemOp { - MO_8 = 0, - MO_16 = 1, - MO_32 = 2, - MO_64 = 3, - MO_SIZE = 3, /* Mask for the above. */ - - MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ - - MO_BSWAP = 8, /* Host reverse endian. */ -#ifdef HOST_WORDS_BIGENDIAN - MO_LE = MO_BSWAP, - MO_BE = 0, -#else - MO_LE = 0, - MO_BE = MO_BSWAP, -#endif -#ifdef TARGET_WORDS_BIGENDIAN - MO_TE = MO_BE, -#else - MO_TE = MO_LE, -#endif - - /* - * MO_UNALN accesses are never checked for alignment. - * MO_ALIGN accesses will result in a call to the CPU's - * do_unaligned_access hook if the guest address is not aligned. - * The default depends on whether the target CPU defines - * TARGET_ALIGNED_ONLY. - * - * Some architectures (e.g. ARMv8) need the address which is aligned - * to a size more than the size of the memory access. - * Some architectures (e.g. SPARCv9) need an address which is aligned, - * but less strictly than the natural alignment. - * - * MO_ALIGN supposes the alignment size is the size of a memory access. - * - * There are three options: - * - unaligned access permitted (MO_UNALN). - * - an alignment to the size of an access (MO_ALIGN); - * - an alignment to a specified size, which may be more or less than - * the access size (MO_ALIGN_x where 'x' is a size in bytes); - */ - MO_ASHIFT = 4, - MO_AMASK = 7 << MO_ASHIFT, -#ifdef TARGET_ALIGNED_ONLY - MO_ALIGN = 0, - MO_UNALN = MO_AMASK, -#else - MO_ALIGN = MO_AMASK, - MO_UNALN = 0, -#endif - MO_ALIGN_2 = 1 << MO_ASHIFT, - MO_ALIGN_4 = 2 << MO_ASHIFT, - MO_ALIGN_8 = 3 << MO_ASHIFT, - MO_ALIGN_16 = 4 << MO_ASHIFT, - MO_ALIGN_32 = 5 << MO_ASHIFT, - MO_ALIGN_64 = 6 << MO_ASHIFT, - - /* Combinations of the above, for ease of use. */ - MO_UB = MO_8, - MO_UW = MO_16, - MO_UL = MO_32, - MO_SB = MO_SIGN | MO_8, - MO_SW = MO_SIGN | MO_16, - MO_SL = MO_SIGN | MO_32, - MO_Q = MO_64, - - MO_LEUW = MO_LE | MO_UW, - MO_LEUL = MO_LE | MO_UL, - MO_LESW = MO_LE | MO_SW, - MO_LESL = MO_LE | MO_SL, - MO_LEQ = MO_LE | MO_Q, - - MO_BEUW = MO_BE | MO_UW, - MO_BEUL = MO_BE | MO_UL, - MO_BESW = MO_BE | MO_SW, - MO_BESL = MO_BE | MO_SL, - MO_BEQ = MO_BE | MO_Q, - - MO_TEUW = MO_TE | MO_UW, - MO_TEUL = MO_TE | MO_UL, - MO_TESW = MO_TE | MO_SW, - MO_TESL = MO_TE | MO_SL, - MO_TEQ = MO_TE | MO_Q, - - MO_SSIZE = MO_SIZE | MO_SIGN, -} TCGMemOp; - /** * get_alignment_bits - * @memop: TCGMemOp value + * @memop: MemOp value * * Extract the alignment size from the memop. */ -static inline unsigned get_alignment_bits(TCGMemOp memop) +static inline unsigned get_alignment_bits(MemOp memop) { unsigned a = memop & MO_AMASK; @@ -1314,7 +1225,7 @@ static inline size_t tcg_current_code_size(TCGContext *s) return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); } -/* Combine the TCGMemOp and mmu_idx parameters into a single value. */ +/* Combine the MemOp and mmu_idx parameters into a single value. */ typedef uint32_t TCGMemOpIdx; /** @@ -1324,7 +1235,7 @@ typedef uint32_t TCGMemOpIdx; * * Encode these values into a single parameter. */ -static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx) +static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx) { tcg_debug_assert(idx <= 15); return (op << 4) | idx; @@ -1336,7 +1247,7 @@ static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx) * * Extract the memory operation from the combined value. */ -static inline TCGMemOp get_memop(TCGMemOpIdx oi) +static inline MemOp get_memop(TCGMemOpIdx oi) { return oi >> 4; }