diff --git a/qemu/target/arm/translate-vfp.inc.c b/qemu/target/arm/translate-vfp.inc.c index 09c6a085..60e3a9e7 100644 --- a/qemu/target/arm/translate-vfp.inc.c +++ b/qemu/target/arm/translate-vfp.inc.c @@ -1156,6 +1156,42 @@ typedef void VFPGen3OpDPFn(TCGContext *, TCGv_i64 vd, typedef void VFPGen2OpSPFn(TCGContext *, TCGv_i32 vd, TCGv_i32 vm); typedef void VFPGen2OpDPFn(TCGContext *, TCGv_i64 vd, TCGv_i64 vm); +/* + * Return true if the specified S reg is in a scalar bank + * (ie if it is s0..s7) + */ +static inline bool vfp_sreg_is_scalar(int reg) +{ + return (reg & 0x18) == 0; +} + +/* + * Return true if the specified D reg is in a scalar bank + * (ie if it is d0..d3 or d16..d19) + */ +static inline bool vfp_dreg_is_scalar(int reg) +{ + return (reg & 0xc) == 0; +} + +/* + * Advance the S reg number forwards by delta within its bank + * (ie increment the low 3 bits but leave the rest the same) + */ +static inline int vfp_advance_sreg(int reg, int delta) +{ + return ((reg + delta) & 0x7) | (reg & ~0x7); +} + +/* + * Advance the D reg number forwards by delta within its bank + * (ie increment the low 2 bits but leave the rest the same) + */ +static inline int vfp_advance_dreg(int reg, int delta) +{ + return ((reg + delta) & 0x3) | (reg & ~0x3); +} + /* * Perform a 3-operand VFP data processing instruction. fn is the * callback to do the actual operation; this function deals with the @@ -1167,7 +1203,6 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t delta_m = 0; uint32_t delta_d = 0; - uint32_t bank_mask = 0; int veclen = s->vec_len; TCGv_i32 f0, f1, fd; TCGv_ptr fpst; @@ -1182,16 +1217,14 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, } if (veclen > 0) { - bank_mask = 0x18; - /* Figure out what type of vector operation this is. */ - if ((vd & bank_mask) == 0) { + if (vfp_sreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = s->vec_stride + 1; - if ((vm & bank_mask) == 0) { + if (vfp_sreg_is_scalar(vm)) { /* mixed scalar/vector */ delta_m = 0; } else { @@ -1222,11 +1255,11 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, /* Set up the operands for the next iteration */ veclen--; - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); - vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask); + vd = vfp_advance_sreg(vd, delta_d); + vn = vfp_advance_sreg(vn, delta_d); neon_load_reg32(s, f0, vn); if (delta_m) { - vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); + vm = vfp_advance_sreg(vm, delta_m); neon_load_reg32(s, f1, vm); } } @@ -1245,7 +1278,6 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t delta_m = 0; uint32_t delta_d = 0; - uint32_t bank_mask = 0; int veclen = s->vec_len; TCGv_i64 f0, f1, fd; TCGv_ptr fpst; @@ -1265,16 +1297,14 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, } if (veclen > 0) { - bank_mask = 0xc; - /* Figure out what type of vector operation this is. */ - if ((vd & bank_mask) == 0) { + if (vfp_dreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = (s->vec_stride >> 1) + 1; - if ((vm & bank_mask) == 0) { + if (vfp_dreg_is_scalar(vm)) { /* mixed scalar/vector */ delta_m = 0; } else { @@ -1304,11 +1334,11 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, } /* Set up the operands for the next iteration */ veclen--; - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); - vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask); + vd = vfp_advance_dreg(vd, delta_d); + vn = vfp_advance_dreg(vn, delta_d); neon_load_reg64(s, f0, vn); if (delta_m) { - vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); + vm = vfp_advance_dreg(vm, delta_m); neon_load_reg64(s, f1, vm); } } @@ -1326,7 +1356,6 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t delta_m = 0; uint32_t delta_d = 0; - uint32_t bank_mask = 0; int veclen = s->vec_len; TCGv_i32 f0, fd; @@ -1340,16 +1369,14 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) } if (veclen > 0) { - bank_mask = 0x18; - /* Figure out what type of vector operation this is. */ - if ((vd & bank_mask) == 0) { + if (vfp_sreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = s->vec_stride + 1; - if ((vm & bank_mask) == 0) { + if (vfp_sreg_is_scalar(vm)) { /* mixed scalar/vector */ delta_m = 0; } else { @@ -1375,7 +1402,7 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) if (delta_m == 0) { /* single source one-many */ while (veclen--) { - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + vd = vfp_advance_sreg(vd, delta_d); neon_store_reg32(s, fd, vd); } break; @@ -1383,8 +1410,8 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) /* Set up the operands for the next iteration */ veclen--; - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); - vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); + vd = vfp_advance_sreg(vd, delta_d); + vm = vfp_advance_sreg(vm, delta_m); neon_load_reg32(s, f0, vm); } @@ -1399,7 +1426,6 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t delta_m = 0; uint32_t delta_d = 0; - uint32_t bank_mask = 0; int veclen = s->vec_len; TCGv_i64 f0, fd; @@ -1418,16 +1444,14 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) } if (veclen > 0) { - bank_mask = 0xc; - /* Figure out what type of vector operation this is. */ - if ((vd & bank_mask) == 0) { + if (vfp_dreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = (s->vec_stride >> 1) + 1; - if ((vm & bank_mask) == 0) { + if (vfp_dreg_is_scalar(vm)) { /* mixed scalar/vector */ delta_m = 0; } else { @@ -1453,7 +1477,7 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) if (delta_m == 0) { /* single source one-many */ while (veclen--) { - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + vd = vfp_advance_dreg(vd, delta_d); neon_store_reg64(s, fd, vd); } break; @@ -1461,8 +1485,8 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) /* Set up the operands for the next iteration */ veclen--; - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); - vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); + vd = vfp_advance_dreg(vd, delta_d); + vd = vfp_advance_dreg(vm, delta_m); neon_load_reg64(s, f0, vm); } @@ -1807,7 +1831,6 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t delta_d = 0; - uint32_t bank_mask = 0; int veclen = s->vec_len; TCGv_i32 fd; uint32_t n, i, vd; @@ -1828,9 +1851,8 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) } if (veclen > 0) { - bank_mask = 0x18; /* Figure out what type of vector operation this is. */ - if ((vd & bank_mask) == 0) { + if (vfp_sreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { @@ -1859,7 +1881,7 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) /* Set up the operands for the next iteration */ veclen--; - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + vd = vfp_advance_sreg(vd, delta_d); } tcg_temp_free_i32(tcg_ctx, fd); @@ -1870,7 +1892,6 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t delta_d = 0; - uint32_t bank_mask = 0; int veclen = s->vec_len; TCGv_i64 fd; uint32_t n, i, vd; @@ -1896,9 +1917,8 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) } if (veclen > 0) { - bank_mask = 0xc; /* Figure out what type of vector operation this is. */ - if ((vd & bank_mask) == 0) { + if (vfp_dreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { @@ -1927,7 +1947,7 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) /* Set up the operands for the next iteration */ veclen--; - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + vfp_advance_dreg(vd, delta_d); } tcg_temp_free_i64(tcg_ctx, fd);