diff --git a/qemu/aarch64.h b/qemu/aarch64.h index a7481311..a35a3d50 100644 --- a/qemu/aarch64.h +++ b/qemu/aarch64.h @@ -3596,6 +3596,10 @@ #define helper_sve_smax_zpzz_d helper_sve_smax_zpzz_d_aarch64 #define helper_sve_smax_zpzz_h helper_sve_smax_zpzz_h_aarch64 #define helper_sve_smax_zpzz_s helper_sve_smax_zpzz_s_aarch64 +#define helper_sve_smaxi_b helper_sve_smaxi_b_aarch64 +#define helper_sve_smaxi_d helper_sve_smaxi_d_aarch64 +#define helper_sve_smaxi_h helper_sve_smaxi_h_aarch64 +#define helper_sve_smaxi_s helper_sve_smaxi_s_aarch64 #define helper_sve_smaxv_b helper_sve_smaxv_b_aarch64 #define helper_sve_smaxv_d helper_sve_smaxv_d_aarch64 #define helper_sve_smaxv_h helper_sve_smaxv_h_aarch64 @@ -3604,6 +3608,10 @@ #define helper_sve_smin_zpzz_d helper_sve_smin_zpzz_d_aarch64 #define helper_sve_smin_zpzz_h helper_sve_smin_zpzz_h_aarch64 #define helper_sve_smin_zpzz_s helper_sve_smin_zpzz_s_aarch64 +#define helper_sve_smini_b helper_sve_smini_b_aarch64 +#define helper_sve_smini_d helper_sve_smini_d_aarch64 +#define helper_sve_smini_h helper_sve_smini_h_aarch64 +#define helper_sve_smini_s helper_sve_smini_s_aarch64 #define helper_sve_sminv_b helper_sve_sminv_b_aarch64 #define helper_sve_sminv_d helper_sve_sminv_d_aarch64 #define helper_sve_sminv_h helper_sve_sminv_h_aarch64 @@ -3621,6 +3629,10 @@ #define helper_sve_sub_zpzz_d helper_sve_sub_zpzz_d_aarch64 #define helper_sve_sub_zpzz_h helper_sve_sub_zpzz_h_aarch64 #define helper_sve_sub_zpzz_s helper_sve_sub_zpzz_s_aarch64 +#define helper_sve_subri_b helper_sve_subri_b_aarch64 +#define helper_sve_subri_d helper_sve_subri_d_aarch64 +#define helper_sve_subri_h helper_sve_subri_h_aarch64 +#define helper_sve_subri_s helper_sve_subri_s_aarch64 #define helper_sve_sunpk_d helper_sve_sunpk_d_aarch64 #define helper_sve_sunpk_h helper_sve_sunpk_h_aarch64 #define helper_sve_sunpk_s helper_sve_sunpk_s_aarch64 @@ -3653,6 +3665,10 @@ #define helper_sve_umax_zpzz_d helper_sve_umax_zpzz_d_aarch64 #define helper_sve_umax_zpzz_h helper_sve_umax_zpzz_h_aarch64 #define helper_sve_umax_zpzz_s helper_sve_umax_zpzz_s_aarch64 +#define helper_sve_umaxi_b helper_sve_umaxi_b_aarch64 +#define helper_sve_umaxi_d helper_sve_umaxi_d_aarch64 +#define helper_sve_umaxi_h helper_sve_umaxi_h_aarch64 +#define helper_sve_umaxi_s helper_sve_umaxi_s_aarch64 #define helper_sve_umaxv_b helper_sve_umaxv_b_aarch64 #define helper_sve_umaxv_d helper_sve_umaxv_d_aarch64 #define helper_sve_umaxv_h helper_sve_umaxv_h_aarch64 @@ -3661,6 +3677,10 @@ #define helper_sve_umin_zpzz_d helper_sve_umin_zpzz_d_aarch64 #define helper_sve_umin_zpzz_h helper_sve_umin_zpzz_h_aarch64 #define helper_sve_umin_zpzz_s helper_sve_umin_zpzz_s_aarch64 +#define helper_sve_umini_b helper_sve_umini_b_aarch64 +#define helper_sve_umini_d helper_sve_umini_d_aarch64 +#define helper_sve_umini_h helper_sve_umini_h_aarch64 +#define helper_sve_umini_s helper_sve_umini_s_aarch64 #define helper_sve_uminv_b helper_sve_uminv_b_aarch64 #define helper_sve_uminv_d helper_sve_uminv_d_aarch64 #define helper_sve_uminv_h helper_sve_uminv_h_aarch64 diff --git a/qemu/aarch64eb.h b/qemu/aarch64eb.h index 6aceda46..6107a56a 100644 --- a/qemu/aarch64eb.h +++ b/qemu/aarch64eb.h @@ -3596,6 +3596,10 @@ #define helper_sve_smax_zpzz_d helper_sve_smax_zpzz_d_aarch64eb #define helper_sve_smax_zpzz_h helper_sve_smax_zpzz_h_aarch64eb #define helper_sve_smax_zpzz_s helper_sve_smax_zpzz_s_aarch64eb +#define helper_sve_smaxi_b helper_sve_smaxi_b_aarch64eb +#define helper_sve_smaxi_d helper_sve_smaxi_d_aarch64eb +#define helper_sve_smaxi_h helper_sve_smaxi_h_aarch64eb +#define helper_sve_smaxi_s helper_sve_smaxi_s_aarch64eb #define helper_sve_smaxv_b helper_sve_smaxv_b_aarch64eb #define helper_sve_smaxv_d helper_sve_smaxv_d_aarch64eb #define helper_sve_smaxv_h helper_sve_smaxv_h_aarch64eb @@ -3604,6 +3608,10 @@ #define helper_sve_smin_zpzz_d helper_sve_smin_zpzz_d_aarch64eb #define helper_sve_smin_zpzz_h helper_sve_smin_zpzz_h_aarch64eb #define helper_sve_smin_zpzz_s helper_sve_smin_zpzz_s_aarch64eb +#define helper_sve_smini_b helper_sve_smini_b_aarch64eb +#define helper_sve_smini_d helper_sve_smini_d_aarch64eb +#define helper_sve_smini_h helper_sve_smini_h_aarch64eb +#define helper_sve_smini_s helper_sve_smini_s_aarch64eb #define helper_sve_sminv_b helper_sve_sminv_b_aarch64eb #define helper_sve_sminv_d helper_sve_sminv_d_aarch64eb #define helper_sve_sminv_h helper_sve_sminv_h_aarch64eb @@ -3621,6 +3629,10 @@ #define helper_sve_sub_zpzz_d helper_sve_sub_zpzz_d_aarch64eb #define helper_sve_sub_zpzz_h helper_sve_sub_zpzz_h_aarch64eb #define helper_sve_sub_zpzz_s helper_sve_sub_zpzz_s_aarch64eb +#define helper_sve_subri_b helper_sve_subri_b_aarch64eb +#define helper_sve_subri_d helper_sve_subri_d_aarch64eb +#define helper_sve_subri_h helper_sve_subri_h_aarch64eb +#define helper_sve_subri_s helper_sve_subri_s_aarch64eb #define helper_sve_sunpk_d helper_sve_sunpk_d_aarch64eb #define helper_sve_sunpk_h helper_sve_sunpk_h_aarch64eb #define helper_sve_sunpk_s helper_sve_sunpk_s_aarch64eb @@ -3653,6 +3665,10 @@ #define helper_sve_umax_zpzz_d helper_sve_umax_zpzz_d_aarch64eb #define helper_sve_umax_zpzz_h helper_sve_umax_zpzz_h_aarch64eb #define helper_sve_umax_zpzz_s helper_sve_umax_zpzz_s_aarch64eb +#define helper_sve_umaxi_b helper_sve_umaxi_b_aarch64eb +#define helper_sve_umaxi_d helper_sve_umaxi_d_aarch64eb +#define helper_sve_umaxi_h helper_sve_umaxi_h_aarch64eb +#define helper_sve_umaxi_s helper_sve_umaxi_s_aarch64eb #define helper_sve_umaxv_b helper_sve_umaxv_b_aarch64eb #define helper_sve_umaxv_d helper_sve_umaxv_d_aarch64eb #define helper_sve_umaxv_h helper_sve_umaxv_h_aarch64eb @@ -3661,6 +3677,10 @@ #define helper_sve_umin_zpzz_d helper_sve_umin_zpzz_d_aarch64eb #define helper_sve_umin_zpzz_h helper_sve_umin_zpzz_h_aarch64eb #define helper_sve_umin_zpzz_s helper_sve_umin_zpzz_s_aarch64eb +#define helper_sve_umini_b helper_sve_umini_b_aarch64eb +#define helper_sve_umini_d helper_sve_umini_d_aarch64eb +#define helper_sve_umini_h helper_sve_umini_h_aarch64eb +#define helper_sve_umini_s helper_sve_umini_s_aarch64eb #define helper_sve_uminv_b helper_sve_uminv_b_aarch64eb #define helper_sve_uminv_d helper_sve_uminv_d_aarch64eb #define helper_sve_uminv_h helper_sve_uminv_h_aarch64eb diff --git a/qemu/header_gen.py b/qemu/header_gen.py index 7a9fdf37..0210ab54 100644 --- a/qemu/header_gen.py +++ b/qemu/header_gen.py @@ -3617,6 +3617,10 @@ aarch64_symbols = ( 'helper_sve_smax_zpzz_d', 'helper_sve_smax_zpzz_h', 'helper_sve_smax_zpzz_s', + 'helper_sve_smaxi_b', + 'helper_sve_smaxi_d', + 'helper_sve_smaxi_h', + 'helper_sve_smaxi_s', 'helper_sve_smaxv_b', 'helper_sve_smaxv_d', 'helper_sve_smaxv_h', @@ -3625,6 +3629,10 @@ aarch64_symbols = ( 'helper_sve_smin_zpzz_d', 'helper_sve_smin_zpzz_h', 'helper_sve_smin_zpzz_s', + 'helper_sve_smini_b', + 'helper_sve_smini_d', + 'helper_sve_smini_h', + 'helper_sve_smini_s', 'helper_sve_sminv_b', 'helper_sve_sminv_d', 'helper_sve_sminv_h', @@ -3642,6 +3650,10 @@ aarch64_symbols = ( 'helper_sve_sub_zpzz_d', 'helper_sve_sub_zpzz_h', 'helper_sve_sub_zpzz_s', + 'helper_sve_subri_b', + 'helper_sve_subri_d', + 'helper_sve_subri_h', + 'helper_sve_subri_s', 'helper_sve_sunpk_d', 'helper_sve_sunpk_h', 'helper_sve_sunpk_s', @@ -3674,6 +3686,10 @@ aarch64_symbols = ( 'helper_sve_umax_zpzz_d', 'helper_sve_umax_zpzz_h', 'helper_sve_umax_zpzz_s', + 'helper_sve_umaxi_b', + 'helper_sve_umaxi_d', + 'helper_sve_umaxi_h', + 'helper_sve_umaxi_s', 'helper_sve_umaxv_b', 'helper_sve_umaxv_d', 'helper_sve_umaxv_h', @@ -3682,6 +3698,10 @@ aarch64_symbols = ( 'helper_sve_umin_zpzz_d', 'helper_sve_umin_zpzz_h', 'helper_sve_umin_zpzz_s', + 'helper_sve_umini_b', + 'helper_sve_umini_d', + 'helper_sve_umini_h', + 'helper_sve_umini_s', 'helper_sve_uminv_b', 'helper_sve_uminv_d', 'helper_sve_uminv_h', diff --git a/qemu/target/arm/helper-sve.h b/qemu/target/arm/helper-sve.h index 1863106d..97bfe0f4 100644 --- a/qemu/target/arm/helper-sve.h +++ b/qemu/target/arm/helper-sve.h @@ -680,3 +680,28 @@ DEF_HELPER_FLAGS_4(sve_brkns, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_cntp, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_while, TCG_CALL_NO_RWG, i32, ptr, i32, i32) + +DEF_HELPER_FLAGS_4(sve_subri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_subri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_subri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_subri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(sve_smaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_smaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_smaxi_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_smaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(sve_smini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_smini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_smini_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_smini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(sve_umaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_umaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_umaxi_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_umaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(sve_umini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_umini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_umini_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_umini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) diff --git a/qemu/target/arm/sve.decode b/qemu/target/arm/sve.decode index 62f2fb47..18c40294 100644 --- a/qemu/target/arm/sve.decode +++ b/qemu/target/arm/sve.decode @@ -42,6 +42,8 @@ # Signed 8-bit immediate, optionally shifted left by 8. %sh8_i8s 5:9 !function=expand_imm_sh8s +# Unsigned 8-bit immediate, optionally shifted left by 8. +%sh8_i8u 5:9 !function=expand_imm_sh8u # Either a copy of rd (at bit 0), or a different source # as propagated via the MOVPRFX instruction. @@ -95,6 +97,12 @@ @pd_pn_pm ........ esz:2 .. rm:4 ....... rn:4 . rd:4 &rrr_esz @rdn_rm ........ esz:2 ...... ...... rm:5 rd:5 \ &rrr_esz rn=%reg_movprfx +@rdn_sh_i8u ........ esz:2 ...... ...... ..... rd:5 \ + &rri_esz rn=%reg_movprfx imm=%sh8_i8u +@rdn_i8u ........ esz:2 ...... ... imm:8 rd:5 \ + &rri_esz rn=%reg_movprfx +@rdn_i8s ........ esz:2 ...... ... imm:s8 rd:5 \ + &rri_esz rn=%reg_movprfx # Three operand with "memory" size, aka immediate left shift @rd_rn_msz_rm ........ ... rm:5 .... imm:2 rn:5 rd:5 &rrri @@ -622,6 +630,24 @@ FDUP 00100101 esz:2 111 00 1110 imm:8 rd:5 # SVE broadcast integer immediate (unpredicated) DUP_i 00100101 esz:2 111 00 011 . ........ rd:5 imm=%sh8_i8s +# SVE integer add/subtract immediate (unpredicated) +ADD_zzi 00100101 .. 100 000 11 . ........ ..... @rdn_sh_i8u +SUB_zzi 00100101 .. 100 001 11 . ........ ..... @rdn_sh_i8u +SUBR_zzi 00100101 .. 100 011 11 . ........ ..... @rdn_sh_i8u +SQADD_zzi 00100101 .. 100 100 11 . ........ ..... @rdn_sh_i8u +UQADD_zzi 00100101 .. 100 101 11 . ........ ..... @rdn_sh_i8u +SQSUB_zzi 00100101 .. 100 110 11 . ........ ..... @rdn_sh_i8u +UQSUB_zzi 00100101 .. 100 111 11 . ........ ..... @rdn_sh_i8u + +# SVE integer min/max immediate (unpredicated) +SMAX_zzi 00100101 .. 101 000 110 ........ ..... @rdn_i8s +UMAX_zzi 00100101 .. 101 001 110 ........ ..... @rdn_i8u +SMIN_zzi 00100101 .. 101 010 110 ........ ..... @rdn_i8s +UMIN_zzi 00100101 .. 101 011 110 ........ ..... @rdn_i8u + +# SVE integer multiply immediate (unpredicated) +MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s + ### SVE Memory - 32-bit Gather and Unsized Contiguous Group # SVE load predicate register diff --git a/qemu/target/arm/sve_helper.c b/qemu/target/arm/sve_helper.c index 0a921875..fb89394d 100644 --- a/qemu/target/arm/sve_helper.c +++ b/qemu/target/arm/sve_helper.c @@ -803,6 +803,46 @@ DO_VPZ_D(sve_uminv_d, uint64_t, uint64_t, -1, DO_MIN) #undef DO_VPZ #undef DO_VPZ_D +/* Two vector operand, one scalar operand, unpredicated. */ +#define DO_ZZI(NAME, TYPE, OP) \ +void HELPER(NAME)(void *vd, void *vn, uint64_t s64, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(TYPE); \ + TYPE s = s64, *d = vd, *n = vn; \ + for (i = 0; i < opr_sz; ++i) { \ + d[i] = OP(n[i], s); \ + } \ +} + +#define DO_SUBR(X, Y) (Y - X) + +DO_ZZI(sve_subri_b, uint8_t, DO_SUBR) +DO_ZZI(sve_subri_h, uint16_t, DO_SUBR) +DO_ZZI(sve_subri_s, uint32_t, DO_SUBR) +DO_ZZI(sve_subri_d, uint64_t, DO_SUBR) + +DO_ZZI(sve_smaxi_b, int8_t, DO_MAX) +DO_ZZI(sve_smaxi_h, int16_t, DO_MAX) +DO_ZZI(sve_smaxi_s, int32_t, DO_MAX) +DO_ZZI(sve_smaxi_d, int64_t, DO_MAX) + +DO_ZZI(sve_smini_b, int8_t, DO_MIN) +DO_ZZI(sve_smini_h, int16_t, DO_MIN) +DO_ZZI(sve_smini_s, int32_t, DO_MIN) +DO_ZZI(sve_smini_d, int64_t, DO_MIN) + +DO_ZZI(sve_umaxi_b, uint8_t, DO_MAX) +DO_ZZI(sve_umaxi_h, uint16_t, DO_MAX) +DO_ZZI(sve_umaxi_s, uint32_t, DO_MAX) +DO_ZZI(sve_umaxi_d, uint64_t, DO_MAX) + +DO_ZZI(sve_umini_b, uint8_t, DO_MIN) +DO_ZZI(sve_umini_h, uint16_t, DO_MIN) +DO_ZZI(sve_umini_s, uint32_t, DO_MIN) +DO_ZZI(sve_umini_d, uint64_t, DO_MIN) + +#undef DO_ZZI + #undef DO_AND #undef DO_ORR #undef DO_EOR @@ -817,6 +857,7 @@ DO_VPZ_D(sve_uminv_d, uint64_t, uint64_t, -1, DO_MIN) #undef DO_ASR #undef DO_LSR #undef DO_LSL +#undef DO_SUBR /* Similar to the ARM LastActiveElement pseudocode function, except the result is multiplied by the element size. This includes the not found diff --git a/qemu/target/arm/translate-sve.c b/qemu/target/arm/translate-sve.c index 7a4c9178..794e1b47 100644 --- a/qemu/target/arm/translate-sve.c +++ b/qemu/target/arm/translate-sve.c @@ -73,6 +73,11 @@ static inline int expand_imm_sh8s(int x) return (int8_t)x << (x & 0x100 ? 8 : 0); } +static inline int expand_imm_sh8u(int x) +{ + return (uint8_t)x << (x & 0x100 ? 8 : 0); +} + /* * Include the generated decoder. */ @@ -3345,6 +3350,169 @@ static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a, uint32_t insn) return true; } +static bool trans_ADD_zzi(DisasContext *s, arg_rri_esz *a, uint32_t insn) +{ + if (a->esz == 0 && extract32(insn, 13, 1)) { + return false; + } + if (sve_access_check(s)) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_addi(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); + } + return true; +} + +static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a, uint32_t insn) +{ + a->imm = -a->imm; + return trans_ADD_zzi(s, a, insn); +} + +static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a, uint32_t insn) +{ + static const GVecGen2s op[4] = { + { + tcg_gen_vec_sub8_i64, + NULL, + tcg_gen_sub_vec, + gen_helper_sve_subri_b, + INDEX_op_sub_vec, + 0, + MO_8, + false, + true + }, + { + tcg_gen_vec_sub16_i64, + NULL, + tcg_gen_sub_vec, + gen_helper_sve_subri_h, + INDEX_op_sub_vec, + 0, + MO_16, + false, + true + }, + { + NULL, + tcg_gen_sub_i32, + tcg_gen_sub_vec, + gen_helper_sve_subri_s, + INDEX_op_sub_vec, + 0, + MO_32, + false, + true + }, + { + tcg_gen_sub_i64, + NULL, + tcg_gen_sub_vec, + gen_helper_sve_subri_d, + INDEX_op_sub_vec, + 0, + MO_64, + TCG_TARGET_REG_BITS == 64, + true + } + }; + + if (a->esz == 0 && extract32(insn, 13, 1)) { + return false; + } + if (sve_access_check(s)) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + TCGv_i64 c = tcg_const_i64(tcg_ctx, a->imm); + tcg_gen_gvec_2s(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vsz, vsz, c, &op[a->esz]); + tcg_temp_free_i64(tcg_ctx, c); + } + return true; +} + +static bool trans_MUL_zzi(DisasContext *s, arg_rri_esz *a, uint32_t insn) +{ + if (sve_access_check(s)) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_muli(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); + } + return true; +} + +static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, uint32_t insn, + bool u, bool d) +{ + if (a->esz == 0 && extract32(insn, 13, 1)) { + return false; + } + if (sve_access_check(s)) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 val = tcg_const_i64(tcg_ctx, a->imm); + do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, u, d); + tcg_temp_free_i64(tcg_ctx, val); + } + return true; +} + +static bool trans_SQADD_zzi(DisasContext *s, arg_rri_esz *a, uint32_t insn) +{ + return do_zzi_sat(s, a, insn, false, false); +} + +static bool trans_UQADD_zzi(DisasContext *s, arg_rri_esz *a, uint32_t insn) +{ + return do_zzi_sat(s, a, insn, true, false); +} + +static bool trans_SQSUB_zzi(DisasContext *s, arg_rri_esz *a, uint32_t insn) +{ + return do_zzi_sat(s, a, insn, false, true); +} + +static bool trans_UQSUB_zzi(DisasContext *s, arg_rri_esz *a, uint32_t insn) +{ + return do_zzi_sat(s, a, insn, true, true); +} + +static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn) +{ + if (sve_access_check(s)) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + TCGv_i64 c = tcg_const_i64(tcg_ctx, a->imm); + + tcg_gen_gvec_2i_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + c, vsz, vsz, 0, fn); + tcg_temp_free_i64(tcg_ctx, c); + } + return true; +} + +#define DO_ZZI(NAME, name) \ +static bool trans_##NAME##_zzi(DisasContext *s, arg_rri_esz *a, \ + uint32_t insn) \ +{ \ + static gen_helper_gvec_2i * const fns[4] = { \ + gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \ + gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \ + }; \ + return do_zzi_ool(s, a, fns[a->esz]); \ +} + +DO_ZZI(SMAX, smax) +DO_ZZI(UMAX, umax) +DO_ZZI(SMIN, smin) +DO_ZZI(UMIN, umin) + +#undef DO_ZZI + /* *** SVE Memory - 32-bit Gather and Unsized Contiguous Group */