From 340f97bf4c87807db1daec8c173ca169ab2a3d71 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 15 May 2020 20:55:34 -0400 Subject: [PATCH] target/arm: Create gen_gvec_{ceq,clt,cle,cgt,cge}0 Provide a functional interface for the vector expansion. This fits better with the existing set of helpers that we provide for other operations. Macro-ize the 5 nearly identical comparisons. Backports commit 69d5e2bf8c3cefedbfa1c1670137e636dbd7faa5 from qemu --- qemu/aarch64.h | 10 +- qemu/aarch64eb.h | 10 +- qemu/arm.h | 10 +- qemu/armeb.h | 10 +- qemu/header_gen.py | 20 +-- qemu/target/arm/translate-a64.c | 24 ++- qemu/target/arm/translate.c | 254 +++++++------------------------- qemu/target/arm/translate.h | 16 +- 8 files changed, 104 insertions(+), 250 deletions(-) diff --git a/qemu/aarch64.h b/qemu/aarch64.h index 071168d0..8e79a281 100644 --- a/qemu/aarch64.h +++ b/qemu/aarch64.h @@ -3410,11 +3410,6 @@ #define bif_op bif_op_aarch64 #define bit_op bit_op_aarch64 #define bsl_op bsl_op_aarch64 -#define ceq0_op ceq0_op_aarch64 -#define cge0_op cge0_op_aarch64 -#define cgt0_op cgt0_op_aarch64 -#define cle0_op cle0_op_aarch64 -#define clt0_op clt0_op_aarch64 #define cmtst_op cmtst_op_aarch64 #define cpu_mmu_index cpu_mmu_index_aarch64 #define cpu_reg cpu_reg_aarch64 @@ -3423,6 +3418,11 @@ #define fp_exception_el fp_exception_el_aarch64 #define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64 #define gen_cmtst_i64 gen_cmtst_i64_aarch64 +#define gen_gvec_ceq0 gen_gvec_ceq0_aarch64 +#define gen_gvec_cge0 gen_gvec_cge0_aarch64 +#define gen_gvec_cgt0 gen_gvec_cgt0_aarch64 +#define gen_gvec_cle0 gen_gvec_cle0_aarch64 +#define gen_gvec_clt0 gen_gvec_clt0_aarch64 #define gen_gvec_sli gen_gvec_sli_aarch64 #define gen_gvec_ssra gen_gvec_ssra_aarch64 #define gen_gvec_sri gen_gvec_sri_aarch64 diff --git a/qemu/aarch64eb.h b/qemu/aarch64eb.h index 1c3dedad..66e5779a 100644 --- a/qemu/aarch64eb.h +++ b/qemu/aarch64eb.h @@ -3410,11 +3410,6 @@ #define bif_op bif_op_aarch64eb #define bit_op bit_op_aarch64eb #define bsl_op bsl_op_aarch64eb -#define ceq0_op ceq0_op_aarch64eb -#define cge0_op cge0_op_aarch64eb -#define cgt0_op cgt0_op_aarch64eb -#define cle0_op cle0_op_aarch64eb -#define clt0_op clt0_op_aarch64eb #define cmtst_op cmtst_op_aarch64eb #define cpu_mmu_index cpu_mmu_index_aarch64eb #define cpu_reg cpu_reg_aarch64eb @@ -3423,6 +3418,11 @@ #define fp_exception_el fp_exception_el_aarch64eb #define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64eb #define gen_cmtst_i64 gen_cmtst_i64_aarch64eb +#define gen_gvec_ceq0 gen_gvec_ceq0_aarch64eb +#define gen_gvec_cge0 gen_gvec_cge0_aarch64eb +#define gen_gvec_cgt0 gen_gvec_cgt0_aarch64eb +#define gen_gvec_cle0 gen_gvec_cle0_aarch64eb +#define gen_gvec_clt0 gen_gvec_clt0_aarch64eb #define gen_gvec_sli gen_gvec_sli_aarch64eb #define gen_gvec_ssra gen_gvec_ssra_aarch64eb #define gen_gvec_sri gen_gvec_sri_aarch64eb diff --git a/qemu/arm.h b/qemu/arm.h index 71d41077..7aa1753d 100644 --- a/qemu/arm.h +++ b/qemu/arm.h @@ -3399,15 +3399,15 @@ #define arm_set_cpu_off arm_set_cpu_off_arm #define arm_set_cpu_on arm_set_cpu_on_arm #define arm_stage1_mmu_idx arm_stage1_mmu_idx_arm -#define ceq0_op ceq0_op_arm -#define cge0_op cge0_op_arm -#define cgt0_op cgt0_op_arm -#define cle0_op cle0_op_arm -#define clt0_op clt0_op_arm #define cmtst_op cmtst_op_arm #define cpu_mmu_index cpu_mmu_index_arm #define fp_exception_el fp_exception_el_arm #define gen_cmtst_i64 gen_cmtst_i64_arm +#define gen_gvec_ceq0 gen_gvec_ceq0_arm +#define gen_gvec_cge0 gen_gvec_cge0_arm +#define gen_gvec_cgt0 gen_gvec_cgt0_arm +#define gen_gvec_cle0 gen_gvec_cle0_arm +#define gen_gvec_clt0 gen_gvec_clt0_arm #define gen_gvec_sli gen_gvec_sli_arm #define gen_gvec_ssra gen_gvec_ssra_arm #define gen_gvec_sri gen_gvec_sri_arm diff --git a/qemu/armeb.h b/qemu/armeb.h index 4ad4b766..7f2835ca 100644 --- a/qemu/armeb.h +++ b/qemu/armeb.h @@ -3399,15 +3399,15 @@ #define arm_set_cpu_off arm_set_cpu_off_armeb #define arm_set_cpu_on arm_set_cpu_on_armeb #define arm_stage1_mmu_idx arm_stage1_mmu_idx_armeb -#define ceq0_op ceq0_op_armeb -#define cge0_op cge0_op_armeb -#define cgt0_op cgt0_op_armeb -#define cle0_op cle0_op_armeb -#define clt0_op clt0_op_armeb #define cmtst_op cmtst_op_armeb #define cpu_mmu_index cpu_mmu_index_armeb #define fp_exception_el fp_exception_el_armeb #define gen_cmtst_i64 gen_cmtst_i64_armeb +#define gen_gvec_ceq0 gen_gvec_ceq0_armeb +#define gen_gvec_cge0 gen_gvec_cge0_armeb +#define gen_gvec_cgt0 gen_gvec_cgt0_armeb +#define gen_gvec_cle0 gen_gvec_cle0_armeb +#define gen_gvec_clt0 gen_gvec_clt0_armeb #define gen_gvec_sli gen_gvec_sli_armeb #define gen_gvec_ssra gen_gvec_ssra_armeb #define gen_gvec_sri gen_gvec_sri_armeb diff --git a/qemu/header_gen.py b/qemu/header_gen.py index 23ba5ee2..bc8369b4 100644 --- a/qemu/header_gen.py +++ b/qemu/header_gen.py @@ -3408,15 +3408,15 @@ arm_symbols = ( 'arm_set_cpu_off', 'arm_set_cpu_on', 'arm_stage1_mmu_idx', - 'ceq0_op', - 'cge0_op', - 'cgt0_op', - 'cle0_op', - 'clt0_op', 'cmtst_op', 'cpu_mmu_index', 'fp_exception_el', 'gen_cmtst_i64', + 'gen_gvec_ceq0', + 'gen_gvec_cge0', + 'gen_gvec_cgt0', + 'gen_gvec_cle0', + 'gen_gvec_clt0', 'gen_gvec_sli', 'gen_gvec_ssra', 'gen_gvec_sri', @@ -3517,11 +3517,6 @@ aarch64_symbols = ( 'bif_op', 'bit_op', 'bsl_op', - 'ceq0_op', - 'cge0_op', - 'cgt0_op', - 'cle0_op', - 'clt0_op', 'cmtst_op', 'cpu_mmu_index', 'cpu_reg', @@ -3530,6 +3525,11 @@ aarch64_symbols = ( 'fp_exception_el', 'gen_a64_set_pc_im', 'gen_cmtst_i64', + 'gen_gvec_ceq0', + 'gen_gvec_cge0', + 'gen_gvec_cgt0', + 'gen_gvec_cle0', + 'gen_gvec_clt0', 'gen_gvec_sli', 'gen_gvec_ssra', 'gen_gvec_sri', diff --git a/qemu/target/arm/translate-a64.c b/qemu/target/arm/translate-a64.c index 34e9ca56..8056c0b4 100644 --- a/qemu/target/arm/translate-a64.c +++ b/qemu/target/arm/translate-a64.c @@ -741,16 +741,6 @@ static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm, vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s)); } -/* Expand a 2-operand AdvSIMD vector operation using an op descriptor. */ -static void gen_gvec_op2(DisasContext *s, bool is_q, int rd, - int rn, const GVecGen2 *gvec_op) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - tcg_gen_gvec_2(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), - is_q ? 16 : 8, vec_full_reg_size(s), gvec_op); -} - /* Expand a 3-operand AdvSIMD vector operation using an op descriptor. */ static void gen_gvec_op3(DisasContext *s, bool is_q, int rd, int rn, int rm, const GVecGen3 *gvec_op) @@ -12615,13 +12605,21 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) } break; case 0x8: /* CMGT, CMGE */ - gen_gvec_op2(s, is_q, rd, rn, u ? &cge0_op[size] : &cgt0_op[size]); + if (u) { + gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size); + } else { + gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size); + } return; case 0x9: /* CMEQ, CMLE */ - gen_gvec_op2(s, is_q, rd, rn, u ? &cle0_op[size] : &ceq0_op[size]); + if (u) { + gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size); + } else { + gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size); + } return; case 0xa: /* CMLT */ - gen_gvec_op2(s, is_q, rd, rn, &clt0_op[size]); + gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size); return; case 0xb: if (u) { /* ABS, NEG */ diff --git a/qemu/target/arm/translate.c b/qemu/target/arm/translate.c index dda37fe8..fa8c374c 100644 --- a/qemu/target/arm/translate.c +++ b/qemu/target/arm/translate.c @@ -3775,204 +3775,59 @@ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn, return 1; } -static void gen_ceq0_i32(TCGContext *s, TCGv_i32 d, TCGv_i32 a) -{ - tcg_gen_setcondi_i32(s, TCG_COND_EQ, d, a, 0); - tcg_gen_neg_i32(s, d, d); -} +#define GEN_CMP0(NAME, COND) \ + static void gen_##NAME##0_i32(TCGContext *s, TCGv_i32 d, TCGv_i32 a) \ + { \ + tcg_gen_setcondi_i32(s, COND, d, a, 0); \ + tcg_gen_neg_i32(s, d, d); \ + } \ + static void gen_##NAME##0_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a) \ + { \ + tcg_gen_setcondi_i64(s, COND, d, a, 0); \ + tcg_gen_neg_i64(s, d, d); \ + } \ + static void gen_##NAME##0_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a) \ + { \ + TCGv_vec zero = tcg_const_zeros_vec_matching(s, d); \ + tcg_gen_cmp_vec(s, COND, vece, d, a, zero); \ + tcg_temp_free_vec(s, zero); \ + } \ + void gen_gvec_##NAME##0(TCGContext *s, unsigned vece, uint32_t d, uint32_t m, \ + uint32_t opr_sz, uint32_t max_sz) \ + { \ + const GVecGen2 op[4] = { \ + { .fno = gen_helper_gvec_##NAME##0_b, \ + .fniv = gen_##NAME##0_vec, \ + .opt_opc = vecop_list_cmp, \ + .vece = MO_8 }, \ + { .fno = gen_helper_gvec_##NAME##0_h, \ + .fniv = gen_##NAME##0_vec, \ + .opt_opc = vecop_list_cmp, \ + .vece = MO_16 }, \ + { .fni4 = gen_##NAME##0_i32, \ + .fniv = gen_##NAME##0_vec, \ + .opt_opc = vecop_list_cmp, \ + .vece = MO_32 }, \ + { .fni8 = gen_##NAME##0_i64, \ + .fniv = gen_##NAME##0_vec, \ + .opt_opc = vecop_list_cmp, \ + .prefer_i64 = TCG_TARGET_REG_BITS == 64, \ + .vece = MO_64 }, \ + }; \ + tcg_gen_gvec_2(s, d, m, opr_sz, max_sz, &op[vece]); \ + } -static void gen_ceq0_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a) -{ - tcg_gen_setcondi_i64(s, TCG_COND_EQ, d, a, 0); - tcg_gen_neg_i64(s, d, d); -} - -static void gen_ceq0_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a) -{ - TCGv_vec zero = tcg_const_zeros_vec_matching(s, d); - tcg_gen_cmp_vec(s, TCG_COND_EQ, vece, d, a, zero); - tcg_temp_free_vec(s, zero); -} static const TCGOpcode vecop_list_cmp[] = { INDEX_op_cmp_vec, 0 }; -const GVecGen2 ceq0_op[4] = { - { .fno = gen_helper_gvec_ceq0_b, - .fniv = gen_ceq0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_8 }, - { .fno = gen_helper_gvec_ceq0_h, - .fniv = gen_ceq0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_16 }, - { .fni4 = gen_ceq0_i32, - .fniv = gen_ceq0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_32 }, - { .fni8 = gen_ceq0_i64, - .fniv = gen_ceq0_vec, - .opt_opc = vecop_list_cmp, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .vece = MO_64 }, -}; - -static void gen_cle0_i32(TCGContext *s, TCGv_i32 d, TCGv_i32 a) -{ - tcg_gen_setcondi_i32(s, TCG_COND_LE, d, a, 0); - tcg_gen_neg_i32(s, d, d); -} - -static void gen_cle0_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a) -{ - tcg_gen_setcondi_i64(s, TCG_COND_LE, d, a, 0); - tcg_gen_neg_i64(s, d, d); -} - -static void gen_cle0_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a) -{ - TCGv_vec zero = tcg_const_zeros_vec_matching(s, d); - tcg_gen_cmp_vec(s, TCG_COND_LE, vece, d, a, zero); - tcg_temp_free_vec(s, zero); -} - -const GVecGen2 cle0_op[4] = { - { .fno = gen_helper_gvec_cle0_b, - .fniv = gen_cle0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_8 }, - { .fno = gen_helper_gvec_cle0_h, - .fniv = gen_cle0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_16 }, - { .fni4 = gen_cle0_i32, - .fniv = gen_cle0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_32 }, - { .fni8 = gen_cle0_i64, - .fniv = gen_cle0_vec, - .opt_opc = vecop_list_cmp, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .vece = MO_64 }, -}; - -static void gen_cge0_i32(TCGContext *s, TCGv_i32 d, TCGv_i32 a) -{ - tcg_gen_setcondi_i32(s, TCG_COND_GE, d, a, 0); - tcg_gen_neg_i32(s, d, d); -} - -static void gen_cge0_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a) -{ - tcg_gen_setcondi_i64(s, TCG_COND_GE, d, a, 0); - tcg_gen_neg_i64(s, d, d); -} - -static void gen_cge0_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a) -{ - TCGv_vec zero = tcg_const_zeros_vec_matching(s, d); - tcg_gen_cmp_vec(s, TCG_COND_GE, vece, d, a, zero); - tcg_temp_free_vec(s, zero); -} - -const GVecGen2 cge0_op[4] = { - { .fno = gen_helper_gvec_cge0_b, - .fniv = gen_cge0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_8 }, - { .fno = gen_helper_gvec_cge0_h, - .fniv = gen_cge0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_16 }, - { .fni4 = gen_cge0_i32, - .fniv = gen_cge0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_32 }, - { .fni8 = gen_cge0_i64, - .fniv = gen_cge0_vec, - .opt_opc = vecop_list_cmp, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .vece = MO_64 }, -}; - -static void gen_clt0_i32(TCGContext *s, TCGv_i32 d, TCGv_i32 a) -{ - tcg_gen_setcondi_i32(s, TCG_COND_LT, d, a, 0); - tcg_gen_neg_i32(s, d, d); -} - -static void gen_clt0_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a) -{ - tcg_gen_setcondi_i64(s, TCG_COND_LT, d, a, 0); - tcg_gen_neg_i64(s, d, d); -} - -static void gen_clt0_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a) -{ - TCGv_vec zero = tcg_const_zeros_vec_matching(s, d); - tcg_gen_cmp_vec(s, TCG_COND_LT, vece, d, a, zero); - tcg_temp_free_vec(s, zero); -} - -const GVecGen2 clt0_op[4] = { - { .fno = gen_helper_gvec_clt0_b, - .fniv = gen_clt0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_8 }, - { .fno = gen_helper_gvec_clt0_h, - .fniv = gen_clt0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_16 }, - { .fni4 = gen_clt0_i32, - .fniv = gen_clt0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_32 }, - { .fni8 = gen_clt0_i64, - .fniv = gen_clt0_vec, - .opt_opc = vecop_list_cmp, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .vece = MO_64 }, -}; - -static void gen_cgt0_i32(TCGContext *s, TCGv_i32 d, TCGv_i32 a) -{ - tcg_gen_setcondi_i32(s, TCG_COND_GT, d, a, 0); - tcg_gen_neg_i32(s, d, d); -} - -static void gen_cgt0_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a) -{ - tcg_gen_setcondi_i64(s, TCG_COND_GT, d, a, 0); - tcg_gen_neg_i64(s, d, d); -} - -static void gen_cgt0_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a) -{ - TCGv_vec zero = tcg_const_zeros_vec_matching(s, d); - tcg_gen_cmp_vec(s, TCG_COND_GT, vece, d, a, zero); - tcg_temp_free_vec(s, zero); -} - -const GVecGen2 cgt0_op[4] = { - { .fno = gen_helper_gvec_cgt0_b, - .fniv = gen_cgt0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_8 }, - { .fno = gen_helper_gvec_cgt0_h, - .fniv = gen_cgt0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_16 }, - { .fni4 = gen_cgt0_i32, - .fniv = gen_cgt0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_32 }, - { .fni8 = gen_cgt0_i64, - .fniv = gen_cgt0_vec, - .opt_opc = vecop_list_cmp, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .vece = MO_64 }, -}; +GEN_CMP0(ceq, TCG_COND_EQ) +GEN_CMP0(cle, TCG_COND_LE) +GEN_CMP0(cge, TCG_COND_GE) +GEN_CMP0(clt, TCG_COND_LT) +GEN_CMP0(cgt, TCG_COND_GT) +#undef GEN_CMP0 static void gen_ssra8_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t shift) { @@ -6900,24 +6755,19 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) break; case NEON_2RM_VCEQ0: - tcg_gen_gvec_2(tcg_ctx, rd_ofs, rm_ofs, vec_size, - vec_size, &ceq0_op[size]); + gen_gvec_ceq0(tcg_ctx, size, rd_ofs, rm_ofs, vec_size, vec_size); break; case NEON_2RM_VCGT0: - tcg_gen_gvec_2(tcg_ctx, rd_ofs, rm_ofs, vec_size, - vec_size, &cgt0_op[size]); + gen_gvec_cgt0(tcg_ctx, size, rd_ofs, rm_ofs, vec_size, vec_size); break; case NEON_2RM_VCLE0: - tcg_gen_gvec_2(tcg_ctx, rd_ofs, rm_ofs, vec_size, - vec_size, &cle0_op[size]); + gen_gvec_cle0(tcg_ctx, size, rd_ofs, rm_ofs, vec_size, vec_size); break; case NEON_2RM_VCGE0: - tcg_gen_gvec_2(tcg_ctx, rd_ofs, rm_ofs, vec_size, - vec_size, &cge0_op[size]); + gen_gvec_cge0(tcg_ctx, size, rd_ofs, rm_ofs, vec_size, vec_size); break; case NEON_2RM_VCLT0: - tcg_gen_gvec_2(tcg_ctx, rd_ofs, rm_ofs, vec_size, - vec_size, &clt0_op[size]); + gen_gvec_clt0(tcg_ctx, size, rd_ofs, rm_ofs, vec_size, vec_size); break; default: diff --git a/qemu/target/arm/translate.h b/qemu/target/arm/translate.h index 5a5146bb..22afcb58 100644 --- a/qemu/target/arm/translate.h +++ b/qemu/target/arm/translate.h @@ -282,11 +282,17 @@ static inline void gen_swstep_exception(DisasContext *s, int isv, int ex) uint64_t vfp_expand_imm(int size, uint8_t imm8); /* Vector operations shared between ARM and AArch64. */ -extern const GVecGen2 ceq0_op[4]; -extern const GVecGen2 clt0_op[4]; -extern const GVecGen2 cgt0_op[4]; -extern const GVecGen2 cle0_op[4]; -extern const GVecGen2 cge0_op[4]; +void gen_gvec_ceq0(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_clt0(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_cgt0(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_cle0(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_cge0(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, + uint32_t opr_sz, uint32_t max_sz); + extern const GVecGen3 mla_op[4]; extern const GVecGen3 mls_op[4]; extern const GVecGen3 cmtst_op[4];