diff --git a/src/xenia/base/cvar.h b/src/xenia/base/cvar.h index 144703665..e1e83f5d4 100644 --- a/src/xenia/base/cvar.h +++ b/src/xenia/base/cvar.h @@ -170,8 +170,10 @@ CommandVar::CommandVar(const char* name, T* default_value, const char* description) : name_(name), default_value_(*default_value), - description_(description), - current_value_(default_value) {} + current_value_(default_value), + commandline_value_(), + description_(description) + {} template ConfigVar::ConfigVar(const char* name, T* default_value, diff --git a/src/xenia/base/memory.cc b/src/xenia/base/memory.cc index 26f34318e..b83e545d2 100644 --- a/src/xenia/base/memory.cc +++ b/src/xenia/base/memory.cc @@ -59,7 +59,7 @@ static void XeCopy16384StreamingAVX(CacheLine* XE_RESTRICT to, CacheLine* dest4 = to + (NUM_CACHELINES_IN_PAGE * 3); CacheLine* src4 = from + (NUM_CACHELINES_IN_PAGE * 3); -#pragma loop(no_vector) + for (uint32_t i = 0; i < num_lines_for_8k; ++i) { xe::swcache::CacheLine line0, line1, line2, line3; @@ -92,7 +92,6 @@ static void XeCopy16384Movdir64M(CacheLine* XE_RESTRICT to, CacheLine* dest4 = to + (NUM_CACHELINES_IN_PAGE * 3); CacheLine* src4 = from + (NUM_CACHELINES_IN_PAGE * 3); -#pragma loop(no_vector) for (uint32_t i = 0; i < num_lines_for_8k; ++i) { _movdir64b(dest1 + i, src1 + i); _movdir64b(dest2 + i, src2 + i); diff --git a/src/xenia/base/memory.h b/src/xenia/base/memory.h index bd7081418..178d88fb7 100644 --- a/src/xenia/base/memory.h +++ b/src/xenia/base/memory.h @@ -620,23 +620,23 @@ static void Prefetch(const void* addr) { } template <> -void Prefetch(const void* addr) { +XE_MAYBE_UNUSED void Prefetch(const void* addr) { PrefetchW(addr); } template <> -void Prefetch(const void* addr) { +XE_MAYBE_UNUSED void Prefetch(const void* addr) { PrefetchNTA(addr); } template <> -void Prefetch(const void* addr) { +XE_MAYBE_UNUSED void Prefetch(const void* addr) { PrefetchL3(addr); } template <> -void Prefetch(const void* addr) { +XE_MAYBE_UNUSED void Prefetch(const void* addr) { PrefetchL2(addr); } template <> -void Prefetch(const void* addr) { +XE_MAYBE_UNUSED void Prefetch(const void* addr) { PrefetchL1(addr); } // todo: does aarch64 have streaming stores/loads? diff --git a/src/xenia/base/mutex.h b/src/xenia/base/mutex.h index b7fc09896..5f1bc8a60 100644 --- a/src/xenia/base/mutex.h +++ b/src/xenia/base/mutex.h @@ -25,6 +25,7 @@ namespace xe { */ class alignas(4096) xe_global_mutex { + XE_MAYBE_UNUSED char detail[64]; public: @@ -38,6 +39,7 @@ class alignas(4096) xe_global_mutex { using global_mutex_type = xe_global_mutex; class alignas(64) xe_fast_mutex { + XE_MAYBE_UNUSED char detail[64]; public: @@ -62,8 +64,6 @@ class xe_unlikely_mutex { ~xe_unlikely_mutex() { mut = 0; } void lock() { - uint32_t lock_expected = 0; - if (XE_LIKELY(_tryget())) { return; } else { diff --git a/src/xenia/base/platform.h b/src/xenia/base/platform.h index 61749e4c7..c258ad08f 100644 --- a/src/xenia/base/platform.h +++ b/src/xenia/base/platform.h @@ -144,9 +144,11 @@ #define XE_MSVC_OPTIMIZE_SMALL() #define XE_MSVC_OPTIMIZE_REVERT() #endif + #if XE_COMPILER_HAS_GNU_EXTENSIONS == 1 #define XE_LIKELY_IF(...) if (XE_LIKELY(__VA_ARGS__)) #define XE_UNLIKELY_IF(...) if (XE_UNLIKELY(__VA_ARGS__)) +#define XE_MAYBE_UNUSED __attribute__((unused)) #else #if __cplusplus >= 202002 #define XE_LIKELY_IF(...) if (!!(__VA_ARGS__)) [[likely]] @@ -155,6 +157,7 @@ #define XE_LIKELY_IF(...) if (!!(__VA_ARGS__)) #define XE_UNLIKELY_IF(...) if (!!(__VA_ARGS__)) #endif +#define XE_MAYBE_UNUSED #endif // only use __restrict if MSVC, for clang/gcc we can use -fstrict-aliasing which // acts as __restrict across the board todo: __restrict is part of the type diff --git a/src/xenia/base/ring_buffer.cc b/src/xenia/base/ring_buffer.cc index 53cd4d703..4f40b0670 100644 --- a/src/xenia/base/ring_buffer.cc +++ b/src/xenia/base/ring_buffer.cc @@ -78,7 +78,9 @@ size_t RingBuffer::Read(uint8_t* buffer, size_t _count) { if (read_offset_ < write_offset_) { assert_true(read_offset_ + count <= write_offset_); } else if (read_offset_ + count >= capacity_) { + XE_MAYBE_UNUSED ring_size_t left_half = capacity_ - read_offset_; + assert_true(count - left_half <= write_offset_); } @@ -107,6 +109,7 @@ size_t RingBuffer::Write(const uint8_t* buffer, size_t _count) { if (write_offset_ < read_offset_) { assert_true(write_offset_ + count <= read_offset_); } else if (write_offset_ + count >= capacity_) { + XE_MAYBE_UNUSED size_t left_half = capacity_ - write_offset_; assert_true(count - left_half <= read_offset_); } diff --git a/src/xenia/base/ring_buffer.h b/src/xenia/base/ring_buffer.h index e481f4f27..e914e226f 100644 --- a/src/xenia/base/ring_buffer.h +++ b/src/xenia/base/ring_buffer.h @@ -68,7 +68,6 @@ class RingBuffer { ring_size_t offset_delta = write_offs - read_offs; ring_size_t wrap_read_count = (cap - read_offs) + write_offs; - ring_size_t comparison_value = read_offs <= write_offs; if (XE_LIKELY(read_offs <= write_offs)) { return offset_delta; // will be 0 if they are equal, semantically diff --git a/src/xenia/base/threading_win.cc b/src/xenia/base/threading_win.cc index ed7874458..a8aa7889c 100644 --- a/src/xenia/base/threading_win.cc +++ b/src/xenia/base/threading_win.cc @@ -117,7 +117,7 @@ void set_name(const std::string_view name) { // checked ntoskrnl, it does not modify delay, so we can place this as a // constant and avoid creating a stack variable -static const LARGE_INTEGER sleepdelay0_for_maybeyield{0LL}; +static const LARGE_INTEGER sleepdelay0_for_maybeyield{{0LL}}; void MaybeYield() { #if 0 @@ -314,7 +314,8 @@ class Win32Event : public Win32Handle { } #endif - EventInfo Query() { EventInfo result{}; + EventInfo Query() override { + EventInfo result{}; NtQueryEventPointer.invoke(handle_, 0, &result, sizeof(EventInfo), nullptr); return result; } @@ -429,7 +430,7 @@ class Win32Timer : public Win32Handle { } bool SetRepeatingAt(GClock_::time_point due_time, std::chrono::milliseconds period, - std::function opt_callback = nullptr) { + std::function opt_callback = nullptr) override { return SetRepeatingAt(date::clock_cast(due_time), period, std::move(opt_callback)); } diff --git a/src/xenia/cpu/backend/x64/x64_emitter.cc b/src/xenia/cpu/backend/x64/x64_emitter.cc index 3ba47cad4..bc9224ab6 100644 --- a/src/xenia/cpu/backend/x64/x64_emitter.cc +++ b/src/xenia/cpu/backend/x64/x64_emitter.cc @@ -46,10 +46,6 @@ DEFINE_bool(ignore_undefined_externs, true, DEFINE_bool(emit_source_annotations, false, "Add extra movs and nops to make disassembly easier to read.", "CPU"); -DEFINE_bool(resolve_rel32_guest_calls, true, - "Experimental optimization, directly call already resolved " - "functions via x86 rel32 call/jmp", - "CPU"); DEFINE_bool(enable_incorrect_roundingmode_behavior, false, "Disables the FPU/VMX MXCSR sharing workaround, potentially " @@ -78,7 +74,6 @@ using namespace xe::literals; static const size_t kMaxCodeSize = 1_MiB; -static const size_t kStashOffset = 32; // static const size_t kStashOffsetHigh = 32 + 32; const uint32_t X64Emitter::gpr_reg_map_[X64Emitter::GPR_COUNT] = { @@ -141,55 +136,6 @@ bool X64Emitter::Emit(GuestFunction* function, HIRBuilder* builder, return true; } -#pragma pack(push, 1) -struct RGCEmitted { - uint8_t ff_; - uint32_t rgcid_; -}; -#pragma pack(pop) - -#if 0 -void X64Emitter::InjectCallAddresses(void* new_execute_address) { - for (auto&& callsite : call_sites_) { - RGCEmitted* hunter = (RGCEmitted*)new_execute_address; - while (hunter->ff_ != 0xFF || hunter->rgcid_ != callsite.offset_) { - hunter = - reinterpret_cast(reinterpret_cast(hunter) + 1); - } - - hunter->ff_ = callsite.is_jump_ ? 0xE9 : 0xE8; - hunter->rgcid_ = - static_cast(static_cast(callsite.destination_) - - reinterpret_cast(hunter + 1)); - } -} - -#else -void X64Emitter::InjectCallAddresses(void* new_execute_address) { -#if 0 - RGCEmitted* hunter = (RGCEmitted*)new_execute_address; - - std::map id_to_rgc{}; - - for (auto&& callsite : call_sites_) { - id_to_rgc[callsite.offset_] = &callsite; - } -#else - RGCEmitted* hunter = (RGCEmitted*)new_execute_address; - for (auto&& callsite : call_sites_) { - while (hunter->ff_ != 0xFF || hunter->rgcid_ != callsite.offset_) { - hunter = - reinterpret_cast(reinterpret_cast(hunter) + 1); - } - - hunter->ff_ = callsite.is_jump_ ? 0xE9 : 0xE8; - hunter->rgcid_ = - static_cast(static_cast(callsite.destination_) - - reinterpret_cast(hunter + 1)); - } -#endif -} -#endif void* X64Emitter::Emplace(const EmitFunctionInfo& func_info, GuestFunction* function) { // To avoid changing xbyak, we do a switcharoo here. @@ -207,10 +153,6 @@ void* X64Emitter::Emplace(const EmitFunctionInfo& func_info, if (function) { code_cache_->PlaceGuestCode(function->address(), top_, func_info, function, new_execute_address, new_write_address); - - if (cvars::resolve_rel32_guest_calls) { - InjectCallAddresses(new_execute_address); - } } else { code_cache_->PlaceHostCode(0, top_, func_info, new_execute_address, new_write_address); @@ -219,7 +161,6 @@ void* X64Emitter::Emplace(const EmitFunctionInfo& func_info, ready(); top_ = old_address; reset(); - call_sites_.clear(); tail_code_.clear(); for (auto&& cached_label : label_cache_) { delete cached_label; @@ -336,7 +277,7 @@ bool X64Emitter::Emit(HIRBuilder* builder, EmitFunctionInfo& func_info) { // Mark block labels. auto label = block->label_head; while (label) { - L(label->name); + L(std::to_string(label->id)); label = label->next; } @@ -418,7 +359,6 @@ void X64Emitter::EmitProfilerEpilogue() { // actually... lets just try without atomics lol // lock(); add(qword[r10], rdx); - } #endif } @@ -534,44 +474,23 @@ void X64Emitter::Call(const hir::Instr* instr, GuestFunction* function) { auto fn = static_cast(function); // Resolve address to the function to call and store in rax. - if (cvars::resolve_rel32_guest_calls && fn->machine_code()) { - ResolvableGuestCall rgc; - rgc.destination_ = uint32_t(uint64_t(fn->machine_code())); - rgc.offset_ = current_rgc_id_; - current_rgc_id_++; - + if (fn->machine_code()) { if (!(instr->flags & hir::CALL_TAIL)) { mov(rcx, qword[rsp + StackLayout::GUEST_CALL_RET_ADDR]); - db(0xFF); - rgc.is_jump_ = false; - - dd(rgc.offset_); + call((void*)fn->machine_code()); } else { // tail call EmitTraceUserCallReturn(); - - rgc.is_jump_ = true; + EmitProfilerEpilogue(); // Pass the callers return address over. mov(rcx, qword[rsp + StackLayout::GUEST_RET_ADDR]); add(rsp, static_cast(stack_size())); - db(0xFF); - dd(rgc.offset_); + jmp((void*)fn->machine_code(), T_NEAR); } - call_sites_.push_back(rgc); return; - } - - if (fn->machine_code()) { - // TODO(benvanik): is it worth it to do this? It removes the need for - // a ResolveFunction call, but makes the table less useful. - assert_zero(uint64_t(fn->machine_code()) & 0xFFFFFFFF00000000); - // todo: this should be changed so that we can actually do a call to - // fn->machine_code. the code will be emitted near us, so 32 bit rel jmp - // should be possible - mov(eax, uint32_t(uint64_t(fn->machine_code()))); } else if (code_cache_->has_indirection_table()) { // Load the pointer to the indirection table maintained in X64CodeCache. // The target dword will either contain the address of the generated code @@ -1017,7 +936,10 @@ static const vec128_t xmm_consts[] = { /*XMMSTVLShuffle*/ v128_setr_bytes(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), /* XMMSTVRSwapMask*/ - vec128b((uint8_t)0x83)}; + vec128b((uint8_t)0x83), /*XMMVSRShlByteshuf*/ + v128_setr_bytes(13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3, 0x80), + // XMMVSRMask + vec128b(1)}; void* X64Emitter::FindByteConstantOffset(unsigned bytevalue) { for (auto& vec : xmm_consts) { diff --git a/src/xenia/cpu/backend/x64/x64_emitter.h b/src/xenia/cpu/backend/x64/x64_emitter.h index 91f4016c1..d4edf1e2e 100644 --- a/src/xenia/cpu/backend/x64/x64_emitter.h +++ b/src/xenia/cpu/backend/x64/x64_emitter.h @@ -172,7 +172,9 @@ enum XmmConst { XMMLVLShuffle, XMMLVRCmp16, XMMSTVLShuffle, - XMMSTVRSwapMask // swapwordmask with bit 7 set + XMMSTVRSwapMask, // swapwordmask with bit 7 set + XMMVSRShlByteshuf, + XMMVSRMask }; using amdfx::xopcompare_e; @@ -190,13 +192,6 @@ class XbyakAllocator : public Xbyak::Allocator { virtual bool useProtect() const { return false; } }; -class ResolvableGuestCall { - public: - bool is_jump_; - uintptr_t destination_; - // rgcid - unsigned offset_; -}; class X64Emitter; using TailEmitCallback = std::function; struct TailEmitter { @@ -220,7 +215,6 @@ class X64Emitter : public Xbyak::CodeGenerator { uint32_t debug_info_flags, FunctionDebugInfo* debug_info, void** out_code_address, size_t* out_code_size, std::vector* out_source_map); - void InjectCallAddresses(void* new_execute_addr); public: // Reserved: rsp, rsi, rdi @@ -230,7 +224,7 @@ class X64Emitter : public Xbyak::CodeGenerator { // xmm4-xmm15 (save to get xmm3) static const int GPR_COUNT = 7; static const int XMM_COUNT = 12; - + static constexpr size_t kStashOffset = 32; static void SetupReg(const hir::Value* v, Xbyak::Reg8& r) { auto idx = gpr_reg_map_[v->reg.index]; r = Xbyak::Reg8(idx); @@ -410,8 +404,6 @@ class X64Emitter : public Xbyak::CodeGenerator { static const uint32_t gpr_reg_map_[GPR_COUNT]; static const uint32_t xmm_reg_map_[XMM_COUNT]; - uint32_t current_rgc_id_ = 0xEEDDF00F; - std::vector call_sites_; /* set to true if the low 32 bits of membase == 0. only really advantageous if you are storing 32 bit 0 to a displaced address, diff --git a/src/xenia/cpu/backend/x64/x64_seq_control.cc b/src/xenia/cpu/backend/x64/x64_seq_control.cc index 54e7ac8a0..2e2d273cc 100644 --- a/src/xenia/cpu/backend/x64/x64_seq_control.cc +++ b/src/xenia/cpu/backend/x64/x64_seq_control.cc @@ -25,46 +25,46 @@ static void EmitFusedBranch(X64Emitter& e, const T& i) { bool valid = i.instr->prev && i.instr->prev->dest == i.src1.value; auto opcode = valid ? i.instr->prev->opcode->num : -1; if (valid) { - auto name = i.src2.value->name; + std::string name = i.src2.value->GetIdString(); switch (opcode) { case OPCODE_COMPARE_EQ: - e.je(name, e.T_NEAR); + e.je(std::move(name), e.T_NEAR); break; case OPCODE_COMPARE_NE: - e.jne(name, e.T_NEAR); + e.jne(std::move(name), e.T_NEAR); break; case OPCODE_COMPARE_SLT: - e.jl(name, e.T_NEAR); + e.jl(std::move(name), e.T_NEAR); break; case OPCODE_COMPARE_SLE: - e.jle(name, e.T_NEAR); + e.jle(std::move(name), e.T_NEAR); break; case OPCODE_COMPARE_SGT: - e.jg(name, e.T_NEAR); + e.jg(std::move(name), e.T_NEAR); break; case OPCODE_COMPARE_SGE: - e.jge(name, e.T_NEAR); + e.jge(std::move(name), e.T_NEAR); break; case OPCODE_COMPARE_ULT: - e.jb(name, e.T_NEAR); + e.jb(std::move(name), e.T_NEAR); break; case OPCODE_COMPARE_ULE: - e.jbe(name, e.T_NEAR); + e.jbe(std::move(name), e.T_NEAR); break; case OPCODE_COMPARE_UGT: - e.ja(name, e.T_NEAR); + e.ja(std::move(name), e.T_NEAR); break; case OPCODE_COMPARE_UGE: - e.jae(name, e.T_NEAR); + e.jae(std::move(name), e.T_NEAR); break; default: e.test(i.src1, i.src1); - e.jnz(name, e.T_NEAR); + e.jnz(std::move(name), e.T_NEAR); break; } } else { e.test(i.src1, i.src1); - e.jnz(i.src2.value->name, e.T_NEAR); + e.jnz(i.src2.value->GetIdString(), e.T_NEAR); } } // ============================================================================ @@ -490,7 +490,7 @@ EMITTER_OPCODE_TABLE(OPCODE_SET_RETURN_ADDRESS, SET_RETURN_ADDRESS); // ============================================================================ struct BRANCH : Sequence> { static void Emit(X64Emitter& e, const EmitArgType& i) { - e.jmp(i.src1.value->name, e.T_NEAR); + e.jmp(i.src1.value->GetIdString(), e.T_NEAR); } }; EMITTER_OPCODE_TABLE(OPCODE_BRANCH, BRANCH); @@ -534,7 +534,7 @@ struct BRANCH_TRUE_F32 Xmm input = GetInputRegOrConstant(e, i.src1, e.xmm0); e.vmovd(e.eax, input); e.test(e.eax, e.eax); - e.jnz(i.src2.value->name, e.T_NEAR); + e.jnz(i.src2.value->GetIdString(), e.T_NEAR); } }; struct BRANCH_TRUE_F64 @@ -543,7 +543,7 @@ struct BRANCH_TRUE_F64 Xmm input = GetInputRegOrConstant(e, i.src1, e.xmm0); e.vmovq(e.rax, input); e.test(e.rax, e.rax); - e.jnz(i.src2.value->name, e.T_NEAR); + e.jnz(i.src2.value->GetIdString(), e.T_NEAR); } }; EMITTER_OPCODE_TABLE(OPCODE_BRANCH_TRUE, BRANCH_TRUE_I8, BRANCH_TRUE_I16, @@ -557,7 +557,7 @@ struct BRANCH_FALSE_I8 : Sequence> { static void Emit(X64Emitter& e, const EmitArgType& i) { e.test(i.src1, i.src1); - e.jz(i.src2.value->name, e.T_NEAR); + e.jz(i.src2.value->GetIdString(), e.T_NEAR); } }; struct BRANCH_FALSE_I16 @@ -565,7 +565,7 @@ struct BRANCH_FALSE_I16 I> { static void Emit(X64Emitter& e, const EmitArgType& i) { e.test(i.src1, i.src1); - e.jz(i.src2.value->name, e.T_NEAR); + e.jz(i.src2.value->GetIdString(), e.T_NEAR); } }; struct BRANCH_FALSE_I32 @@ -573,7 +573,7 @@ struct BRANCH_FALSE_I32 I> { static void Emit(X64Emitter& e, const EmitArgType& i) { e.test(i.src1, i.src1); - e.jz(i.src2.value->name, e.T_NEAR); + e.jz(i.src2.value->GetIdString(), e.T_NEAR); } }; struct BRANCH_FALSE_I64 @@ -581,7 +581,7 @@ struct BRANCH_FALSE_I64 I> { static void Emit(X64Emitter& e, const EmitArgType& i) { e.test(i.src1, i.src1); - e.jz(i.src2.value->name, e.T_NEAR); + e.jz(i.src2.value->GetIdString(), e.T_NEAR); } }; struct BRANCH_FALSE_F32 @@ -591,7 +591,7 @@ struct BRANCH_FALSE_F32 Xmm input = GetInputRegOrConstant(e, i.src1, e.xmm0); e.vmovd(e.eax, input); e.test(e.eax, e.eax); - e.jz(i.src2.value->name, e.T_NEAR); + e.jz(i.src2.value->GetIdString(), e.T_NEAR); } }; struct BRANCH_FALSE_F64 @@ -601,7 +601,7 @@ struct BRANCH_FALSE_F64 Xmm input = GetInputRegOrConstant(e, i.src1, e.xmm0); e.vmovq(e.rax, input); e.test(e.rax, e.rax); - e.jz(i.src2.value->name, e.T_NEAR); + e.jz(i.src2.value->GetIdString(), e.T_NEAR); } }; EMITTER_OPCODE_TABLE(OPCODE_BRANCH_FALSE, BRANCH_FALSE_I8, BRANCH_FALSE_I16, diff --git a/src/xenia/cpu/backend/x64/x64_seq_vector.cc b/src/xenia/cpu/backend/x64/x64_seq_vector.cc index 2b4657c36..5d017ec36 100644 --- a/src/xenia/cpu/backend/x64/x64_seq_vector.cc +++ b/src/xenia/cpu/backend/x64/x64_seq_vector.cc @@ -805,22 +805,7 @@ EMITTER_OPCODE_TABLE(OPCODE_VECTOR_SUB, VECTOR_SUB); // ============================================================================ // OPCODE_VECTOR_SHL // ============================================================================ -template ::value, int> = 0> -static __m128i EmulateVectorShl(void*, __m128i src1, __m128i src2) { - alignas(16) T value[16 / sizeof(T)]; - alignas(16) T shamt[16 / sizeof(T)]; - // Load SSE registers into a C array. - _mm_store_si128(reinterpret_cast<__m128i*>(value), src1); - _mm_store_si128(reinterpret_cast<__m128i*>(shamt), src2); - - for (size_t i = 0; i < (16 / sizeof(T)); ++i) { - value[i] = value[i] << (shamt[i] & ((sizeof(T) * 8) - 1)); - } - - // Store result and return it. - return _mm_load_si128(reinterpret_cast<__m128i*>(value)); -} static XmmConst GetShiftmaskForType(unsigned typ) { if (typ == INT8_TYPE) { return XMMXOPByteShiftMask; @@ -914,28 +899,14 @@ struct VECTOR_SHL_V128 } } if (all_same) { - // mul by two - /*if (seenvalue == 1) { - e.vpaddb(i.dest, i.src1, i.src1); - } else if (seenvalue == 2) { - e.vpaddb(i.dest, i.src1, i.src1); - e.vpaddb(i.dest, i.dest, i.dest); - } else if (seenvalue == 3) { - // mul by 8 - e.vpaddb(i.dest, i.src1, i.src1); - e.vpaddb(i.dest, i.dest, i.dest); - e.vpaddb(i.dest, i.dest, i.dest); - } else*/ - { - e.vpmovzxbw(e.ymm0, i.src1); - e.vpsllw(e.ymm0, e.ymm0, seenvalue); - e.vextracti128(e.xmm1, e.ymm0, 1); + e.vpmovzxbw(e.ymm0, i.src1); + e.vpsllw(e.ymm0, e.ymm0, seenvalue); + e.vextracti128(e.xmm1, e.ymm0, 1); - e.vpshufb(e.xmm0, e.xmm0, e.GetXmmConstPtr(XMMShortsToBytes)); - e.vpshufb(e.xmm1, e.xmm1, e.GetXmmConstPtr(XMMShortsToBytes)); - e.vpunpcklqdq(i.dest, e.xmm0, e.xmm1); - return; - } + e.vpshufb(e.xmm0, e.xmm0, e.GetXmmConstPtr(XMMShortsToBytes)); + e.vpshufb(e.xmm1, e.xmm1, e.GetXmmConstPtr(XMMShortsToBytes)); + e.vpunpcklqdq(i.dest, e.xmm0, e.xmm1); + return; } else { e.LoadConstantXmm(e.xmm2, constmask); @@ -966,14 +937,41 @@ struct VECTOR_SHL_V128 } } } - if (i.src2.is_constant) { - e.lea(e.GetNativeParam(1), e.StashConstantXmm(1, i.src2.constant())); + + unsigned stack_offset_src1 = StackLayout::GUEST_SCRATCH; + unsigned stack_offset_src2 = StackLayout::GUEST_SCRATCH + 16; + + if (i.src1.is_constant) { + e.StashConstantXmm(0, i.src1.constant()); + stack_offset_src1 = X64Emitter::kStashOffset; } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], i.src1); } - e.lea(e.GetNativeParam(0), e.StashXmm(0, i.src1)); - e.CallNativeSafe(reinterpret_cast(EmulateVectorShl)); - e.vmovaps(i.dest, e.xmm0); + if (i.src2.is_constant) { + e.StashConstantXmm(1, i.src2.constant()); + stack_offset_src2 = X64Emitter::kStashOffset + 16; + } else { + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], i.src2); + } + + Xbyak::Label looper; + + e.xor_(e.edx, e.edx); + + e.L(looper); + e.movzx(e.ecx, e.byte[e.rsp + stack_offset_src2 + e.rdx]); + + e.shl(e.byte[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + if (e.IsFeatureEnabled(kX64FlagsIndependentVars)) { + e.inc(e.edx); + } else { + e.add(e.edx, 1); + } + + e.cmp(e.edx, 16); + e.jnz(looper); + e.vmovdqa(i.dest, e.byte[e.rsp + stack_offset_src1]); } static void EmitInt16(X64Emitter& e, const EmitArgType& i) { Xmm src1; @@ -1022,14 +1020,32 @@ struct VECTOR_SHL_V128 // TODO(benvanik): native version (with shift magic). e.L(emu); + + unsigned stack_offset_src1 = StackLayout::GUEST_SCRATCH; + unsigned stack_offset_src2 = StackLayout::GUEST_SCRATCH + 16; + + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], src1); if (i.src2.is_constant) { - e.lea(e.GetNativeParam(1), e.StashConstantXmm(1, i.src2.constant())); + e.StashConstantXmm(1, i.src2.constant()); + stack_offset_src2 = X64Emitter::kStashOffset + 16; } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], i.src2); } - e.lea(e.GetNativeParam(0), e.StashXmm(0, src1)); - e.CallNativeSafe(reinterpret_cast(EmulateVectorShl)); - e.vmovaps(i.dest, e.xmm0); + + Xbyak::Label looper; + + e.xor_(e.edx, e.edx); + + e.L(looper); + e.movzx(e.ecx, e.word[e.rsp + stack_offset_src2 + e.rdx]); + + e.shl(e.word[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + e.add(e.edx, 2); + + e.cmp(e.edx, 16); + e.jnz(looper); + e.vmovdqa(i.dest, e.byte[e.rsp + stack_offset_src1]); e.L(end); } @@ -1098,14 +1114,32 @@ struct VECTOR_SHL_V128 // TODO(benvanik): native version (with shift magic). e.L(emu); + + unsigned stack_offset_src1 = StackLayout::GUEST_SCRATCH; + unsigned stack_offset_src2 = StackLayout::GUEST_SCRATCH + 16; + + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], src1); if (i.src2.is_constant) { - e.lea(e.GetNativeParam(1), e.StashConstantXmm(1, i.src2.constant())); + e.StashConstantXmm(1, i.src2.constant()); + stack_offset_src2 = X64Emitter::kStashOffset + 16; } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], i.src2); } - e.lea(e.GetNativeParam(0), e.StashXmm(0, src1)); - e.CallNativeSafe(reinterpret_cast(EmulateVectorShl)); - e.vmovaps(i.dest, e.xmm0); + + Xbyak::Label looper; + + e.xor_(e.edx, e.edx); + + e.L(looper); + e.mov(e.ecx, e.dword[e.rsp + stack_offset_src2 + e.rdx]); + + e.shl(e.dword[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + e.add(e.edx, 4); + + e.cmp(e.edx, 16); + e.jnz(looper); + e.vmovdqa(i.dest, e.byte[e.rsp + stack_offset_src1]); e.L(end); } @@ -1116,22 +1150,6 @@ EMITTER_OPCODE_TABLE(OPCODE_VECTOR_SHL, VECTOR_SHL_V128); // ============================================================================ // OPCODE_VECTOR_SHR // ============================================================================ -template ::value, int> = 0> -static __m128i EmulateVectorShr(void*, __m128i src1, __m128i src2) { - alignas(16) T value[16 / sizeof(T)]; - alignas(16) T shamt[16 / sizeof(T)]; - - // Load SSE registers into a C array. - _mm_store_si128(reinterpret_cast<__m128i*>(value), src1); - _mm_store_si128(reinterpret_cast<__m128i*>(shamt), src2); - - for (size_t i = 0; i < (16 / sizeof(T)); ++i) { - value[i] = value[i] >> (shamt[i] & ((sizeof(T) * 8) - 1)); - } - - // Store result and return it. - return _mm_load_si128(reinterpret_cast<__m128i*>(value)); -} struct VECTOR_SHR_V128 : Sequence> { @@ -1179,34 +1197,63 @@ struct VECTOR_SHR_V128 } static void EmitInt8(X64Emitter& e, const EmitArgType& i) { - // TODO(benvanik): native version (with shift magic). - if (i.src2.is_constant) { - if (e.IsFeatureEnabled(kX64EmitGFNI)) { - const auto& shamt = i.src2.constant(); - bool all_same = true; - for (size_t n = 0; n < 16 - n; ++n) { - if (shamt.u8[n] != shamt.u8[n + 1]) { - all_same = false; - break; - } - } - if (all_same) { - // Every count is the same, so we can use gf2p8affineqb. - const uint8_t shift_amount = shamt.u8[0] & 0b111; - const uint64_t shift_matrix = UINT64_C(0x0102040810204080) - << (shift_amount * 8); - e.vgf2p8affineqb(i.dest, i.src1, - e.StashConstantXmm(0, vec128q(shift_matrix)), 0); - return; + if (i.src2.is_constant && e.IsFeatureEnabled(kX64EmitGFNI)) { + const auto& shamt = i.src2.constant(); + bool all_same = true; + for (size_t n = 0; n < 16 - n; ++n) { + if (shamt.u8[n] != shamt.u8[n + 1]) { + all_same = false; + break; } } - e.lea(e.GetNativeParam(1), e.StashConstantXmm(1, i.src2.constant())); - } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); + if (all_same) { + // Every count is the same, so we can use gf2p8affineqb. + const uint8_t shift_amount = shamt.u8[0] & 0b111; + const uint64_t shift_matrix = UINT64_C(0x0102040810204080) + << (shift_amount * 8); + e.vgf2p8affineqb(i.dest, i.src1, + e.StashConstantXmm(0, vec128q(shift_matrix)), 0); + return; + } } - e.lea(e.GetNativeParam(0), e.StashXmm(0, i.src1)); - e.CallNativeSafe(reinterpret_cast(EmulateVectorShr)); - e.vmovaps(i.dest, e.xmm0); + unsigned stack_offset_src1 = StackLayout::GUEST_SCRATCH; + unsigned stack_offset_src2 = StackLayout::GUEST_SCRATCH + 16; + + if (i.src1.is_constant) { + e.StashConstantXmm(0, i.src1.constant()); + stack_offset_src1 = X64Emitter::kStashOffset; + } else { + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], i.src1); + } + if (i.src2.is_constant) { + e.StashConstantXmm(1, i.src2.constant()); + stack_offset_src2 = X64Emitter::kStashOffset + 16; + } else { + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], i.src2); + } + + Xbyak::Label looper; + + e.xor_(e.edx, e.edx); + + e.L(looper); + // movzx is to eliminate any possible dep on previous value of rcx at start + // of loop + e.movzx(e.ecx, e.byte[e.rsp + stack_offset_src2 + e.rdx]); + // maybe using a memory operand as the left side isn't the best idea lol, + // still better than callnativesafe though agners docs have no timing info + // on shx [m], cl so shrug + e.shr(e.byte[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + if (e.IsFeatureEnabled(kX64FlagsIndependentVars)) { + e.inc(e.edx); + } else { + e.add(e.edx, 1); + } + + e.cmp(e.edx, 16); + e.jnz(looper); + e.vmovdqa(i.dest, e.byte[e.rsp + stack_offset_src1]); } static void EmitInt16(X64Emitter& e, const EmitArgType& i) { @@ -1248,14 +1295,38 @@ struct VECTOR_SHR_V128 // TODO(benvanik): native version (with shift magic). e.L(emu); - if (i.src2.is_constant) { - e.lea(e.GetNativeParam(1), e.StashConstantXmm(1, i.src2.constant())); + + unsigned stack_offset_src1 = StackLayout::GUEST_SCRATCH; + unsigned stack_offset_src2 = StackLayout::GUEST_SCRATCH + 16; + if (i.src1.is_constant) { + e.StashConstantXmm(0, i.src1.constant()); + stack_offset_src1 = X64Emitter::kStashOffset; + } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], i.src1); } - e.lea(e.GetNativeParam(0), e.StashXmm(0, i.src1)); - e.CallNativeSafe(reinterpret_cast(EmulateVectorShr)); - e.vmovaps(i.dest, e.xmm0); + + if (i.src2.is_constant) { + e.StashConstantXmm(1, i.src2.constant()); + stack_offset_src2 = X64Emitter::kStashOffset + 16; + } else { + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], i.src2); + } + + Xbyak::Label looper; + + e.xor_(e.edx, e.edx); + + e.L(looper); + e.movzx(e.ecx, e.word[e.rsp + stack_offset_src2 + e.rdx]); + + e.shr(e.word[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + e.add(e.edx, 2); + + e.cmp(e.edx, 16); + e.jnz(looper); + e.vmovdqa(i.dest, e.byte[e.rsp + stack_offset_src1]); e.L(end); } @@ -1324,14 +1395,37 @@ struct VECTOR_SHR_V128 // TODO(benvanik): native version. e.L(emu); - if (i.src2.is_constant) { - e.lea(e.GetNativeParam(1), e.StashConstantXmm(1, i.src2.constant())); + + unsigned stack_offset_src1 = StackLayout::GUEST_SCRATCH; + unsigned stack_offset_src2 = StackLayout::GUEST_SCRATCH + 16; + if (i.src1.is_constant) { + e.StashConstantXmm(0, i.src1.constant()); + stack_offset_src1 = X64Emitter::kStashOffset; + } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], i.src1); } - e.lea(e.GetNativeParam(0), e.StashXmm(0, src1)); - e.CallNativeSafe(reinterpret_cast(EmulateVectorShr)); - e.vmovaps(i.dest, e.xmm0); + + if (i.src2.is_constant) { + e.StashConstantXmm(1, i.src2.constant()); + stack_offset_src2 = X64Emitter::kStashOffset + 16; + } else { + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], i.src2); + } + + Xbyak::Label looper; + + e.xor_(e.edx, e.edx); + + e.L(looper); + e.mov(e.ecx, e.dword[e.rsp + stack_offset_src2 + e.rdx]); + e.shr(e.dword[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + e.add(e.edx, 4); + + e.cmp(e.edx, 16); + e.jnz(looper); + e.vmovdqa(i.dest, e.byte[e.rsp + stack_offset_src1]); e.L(end); } @@ -1388,7 +1482,8 @@ struct VECTOR_SHA_V128 } static void EmitInt8(X64Emitter& e, const EmitArgType& i) { - // TODO(benvanik): native version (with shift magic). + unsigned stack_offset_src1 = StackLayout::GUEST_SCRATCH; + unsigned stack_offset_src2 = StackLayout::GUEST_SCRATCH + 16; if (i.src2.is_constant) { const auto& shamt = i.src2.constant(); bool all_same = true; @@ -1399,7 +1494,6 @@ struct VECTOR_SHA_V128 } } - if (e.IsFeatureEnabled(kX64EmitGFNI)) { if (all_same) { // Every count is the same, so we can use gf2p8affineqb. @@ -1412,8 +1506,7 @@ struct VECTOR_SHA_V128 e.StashConstantXmm(0, vec128q(shift_matrix)), 0); return; } - } - else if (all_same) { + } else if (all_same) { Xmm to_be_shifted = GetInputRegOrConstant(e, i.src1, e.xmm1); e.vpmovsxbw(e.xmm0, to_be_shifted); //_mm_srai_epi16 / psraw @@ -1425,14 +1518,41 @@ struct VECTOR_SHA_V128 return; } - - e.lea(e.GetNativeParam(1), e.StashConstantXmm(1, i.src2.constant())); + e.StashConstantXmm(1, i.src2.constant()); + stack_offset_src2 = X64Emitter::kStashOffset + 16; } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], i.src2); } - e.lea(e.GetNativeParam(0), e.StashXmm(0, i.src1)); - e.CallNativeSafe(reinterpret_cast(EmulateVectorShr)); - e.vmovaps(i.dest, e.xmm0); + + if (i.src1.is_constant) { + e.StashConstantXmm(0, i.src1.constant()); + stack_offset_src1 = X64Emitter::kStashOffset; + } else { + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], i.src1); + } + + Xbyak::Label looper; + + e.xor_(e.edx, e.edx); + + e.L(looper); + // movzx is to eliminate any possible dep on previous value of rcx at start + // of loop + e.movzx(e.ecx, e.byte[e.rsp + stack_offset_src2 + e.rdx]); + // maybe using a memory operand as the left side isn't the best idea lol, + // still better than callnativesafe though agners docs have no timing info + // on shx [m], cl so shrug + e.sar(e.byte[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + if (e.IsFeatureEnabled(kX64FlagsIndependentVars)) { + e.inc(e.edx); + } else { + e.add(e.edx, 1); + } + + e.cmp(e.edx, 16); + e.jnz(looper); + e.vmovdqa(i.dest, e.byte[e.rsp + stack_offset_src1]); } static void EmitInt16(X64Emitter& e, const EmitArgType& i) { @@ -1474,14 +1594,38 @@ struct VECTOR_SHA_V128 // TODO(benvanik): native version (with shift magic). e.L(emu); - if (i.src2.is_constant) { - e.lea(e.GetNativeParam(1), e.StashConstantXmm(1, i.src2.constant())); + + unsigned stack_offset_src1 = StackLayout::GUEST_SCRATCH; + unsigned stack_offset_src2 = StackLayout::GUEST_SCRATCH + 16; + if (i.src1.is_constant) { + e.StashConstantXmm(0, i.src1.constant()); + stack_offset_src1 = X64Emitter::kStashOffset; + } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], i.src1); } - e.lea(e.GetNativeParam(0), e.StashXmm(0, i.src1)); - e.CallNativeSafe(reinterpret_cast(EmulateVectorShr)); - e.vmovaps(i.dest, e.xmm0); + + if (i.src2.is_constant) { + e.StashConstantXmm(1, i.src2.constant()); + stack_offset_src2 = X64Emitter::kStashOffset + 16; + } else { + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], i.src2); + } + + Xbyak::Label looper; + + e.xor_(e.edx, e.edx); + + e.L(looper); + e.movzx(e.ecx, e.word[e.rsp + stack_offset_src2 + e.rdx]); + + e.sar(e.word[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + e.add(e.edx, 2); + + e.cmp(e.edx, 16); + e.jnz(looper); + e.vmovdqa(i.dest, e.byte[e.rsp + stack_offset_src1]); e.L(end); } @@ -1508,9 +1652,9 @@ struct VECTOR_SHA_V128 // that happens so we mask. if (i.src2.is_constant) { e.LoadConstantXmm(e.xmm0, i.src2.constant()); - e.vandps(e.xmm0, e.GetXmmConstPtr(XMMShiftMaskPS)); + e.vpand(e.xmm0, e.GetXmmConstPtr(XMMShiftMaskPS)); } else { - e.vandps(e.xmm0, i.src2, e.GetXmmConstPtr(XMMShiftMaskPS)); + e.vpand(e.xmm0, i.src2, e.GetXmmConstPtr(XMMShiftMaskPS)); } e.vpsravd(i.dest, i.src1, e.xmm0); } else { @@ -1535,14 +1679,36 @@ struct VECTOR_SHA_V128 // TODO(benvanik): native version. e.L(emu); - if (i.src2.is_constant) { - e.lea(e.GetNativeParam(1), e.StashConstantXmm(1, i.src2.constant())); + unsigned stack_offset_src1 = StackLayout::GUEST_SCRATCH; + unsigned stack_offset_src2 = StackLayout::GUEST_SCRATCH + 16; + if (i.src1.is_constant) { + e.StashConstantXmm(0, i.src1.constant()); + stack_offset_src1 = X64Emitter::kStashOffset; + } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], i.src1); } - e.lea(e.GetNativeParam(0), e.StashXmm(0, i.src1)); - e.CallNativeSafe(reinterpret_cast(EmulateVectorShr)); - e.vmovaps(i.dest, e.xmm0); + + if (i.src2.is_constant) { + e.StashConstantXmm(1, i.src2.constant()); + stack_offset_src2 = X64Emitter::kStashOffset + 16; + } else { + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], i.src2); + } + + Xbyak::Label looper; + + e.xor_(e.edx, e.edx); + + e.L(looper); + e.mov(e.ecx, e.dword[e.rsp + stack_offset_src2 + e.rdx]); + e.sar(e.dword[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + e.add(e.edx, 4); + + e.cmp(e.edx, 16); + e.jnz(looper); + e.vmovdqa(i.dest, e.byte[e.rsp + stack_offset_src1]); e.L(end); } @@ -1550,26 +1716,6 @@ struct VECTOR_SHA_V128 }; EMITTER_OPCODE_TABLE(OPCODE_VECTOR_SHA, VECTOR_SHA_V128); -// ============================================================================ -// OPCODE_VECTOR_ROTATE_LEFT -// ============================================================================ -template ::value, int> = 0> -static __m128i EmulateVectorRotateLeft(void*, __m128i src1, __m128i src2) { - alignas(16) T value[16 / sizeof(T)]; - alignas(16) T shamt[16 / sizeof(T)]; - - // Load SSE registers into a C array. - _mm_store_si128(reinterpret_cast<__m128i*>(value), src1); - _mm_store_si128(reinterpret_cast<__m128i*>(shamt), src2); - - for (size_t i = 0; i < (16 / sizeof(T)); ++i) { - value[i] = xe::rotate_left(value[i], shamt[i] & ((sizeof(T) * 8) - 1)); - } - - // Store result and return it. - return _mm_load_si128(reinterpret_cast<__m128i*>(value)); -} - struct VECTOR_ROTATE_LEFT_V128 : Sequence> { @@ -1594,33 +1740,72 @@ struct VECTOR_ROTATE_LEFT_V128 } } else { + unsigned stack_offset_src1 = StackLayout::GUEST_SCRATCH; + unsigned stack_offset_src2 = StackLayout::GUEST_SCRATCH + 16; switch (i.instr->flags) { - case INT8_TYPE: - // TODO(benvanik): native version (with shift magic). - if (i.src2.is_constant) { - e.lea(e.GetNativeParam(1), - e.StashConstantXmm(1, i.src2.constant())); + case INT8_TYPE: { + if (i.src1.is_constant) { + e.StashConstantXmm(0, i.src1.constant()); + stack_offset_src1 = X64Emitter::kStashOffset; + } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], i.src1); } - e.lea(e.GetNativeParam(0), e.StashXmm(0, i.src1)); - e.CallNativeSafe( - reinterpret_cast(EmulateVectorRotateLeft)); - e.vmovaps(i.dest, e.xmm0); - break; - case INT16_TYPE: - // TODO(benvanik): native version (with shift magic). + if (i.src2.is_constant) { - e.lea(e.GetNativeParam(1), - e.StashConstantXmm(1, i.src2.constant())); + e.StashConstantXmm(1, i.src2.constant()); + stack_offset_src2 = X64Emitter::kStashOffset + 16; } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], i.src2); } - e.lea(e.GetNativeParam(0), e.StashXmm(0, i.src1)); - e.CallNativeSafe( - reinterpret_cast(EmulateVectorRotateLeft)); - e.vmovaps(i.dest, e.xmm0); - break; + + Xbyak::Label rotate_iter; + + e.xor_(e.edx, e.edx); + + e.L(rotate_iter); + e.movzx(e.ecx, e.byte[e.rsp + stack_offset_src2 + e.rdx]); + + e.rol(e.byte[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + e.add(e.edx, 1); + + e.cmp(e.edx, 16); + e.jnz(rotate_iter); + e.vmovdqa(i.dest, e.byte[e.rsp + stack_offset_src1]); + + } break; + case INT16_TYPE: { + if (i.src1.is_constant) { + e.StashConstantXmm(0, i.src1.constant()); + stack_offset_src1 = X64Emitter::kStashOffset; + + } else { + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], i.src1); + } + + if (i.src2.is_constant) { + e.StashConstantXmm(1, i.src2.constant()); + stack_offset_src2 = X64Emitter::kStashOffset + 16; + } else { + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], i.src2); + } + + Xbyak::Label rotate_iter; + + e.xor_(e.edx, e.edx); + + e.L(rotate_iter); + e.movzx(e.ecx, e.word[e.rsp + stack_offset_src2 + e.rdx]); + e.rol(e.word[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + e.add(e.edx, 2); + + e.cmp(e.edx, 16); + e.jnz(rotate_iter); + e.vmovdqa(i.dest, e.byte[e.rsp + stack_offset_src1]); + + } break; case INT32_TYPE: { if (e.IsFeatureEnabled(kX64EmitAVX512Ortho)) { e.vprolvd(i.dest, i.src1, i.src2); @@ -1638,23 +1823,40 @@ struct VECTOR_ROTATE_LEFT_V128 } e.vpsllvd(e.xmm1, i.src1, e.xmm0); // Shift right (to get low bits): - e.vmovaps(temp, e.GetXmmConstPtr(XMMPI32)); + e.vmovdqa(temp, e.GetXmmConstPtr(XMMPI32)); e.vpsubd(temp, e.xmm0); e.vpsrlvd(i.dest, i.src1, temp); // Merge: e.vpor(i.dest, e.xmm1); } else { - // TODO(benvanik): non-AVX2 native version. - if (i.src2.is_constant) { - e.lea(e.GetNativeParam(1), - e.StashConstantXmm(1, i.src2.constant())); + if (i.src1.is_constant) { + e.StashConstantXmm(0, i.src1.constant()); + stack_offset_src1 = X64Emitter::kStashOffset; + } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], i.src1); } - e.lea(e.GetNativeParam(0), e.StashXmm(0, i.src1)); - e.CallNativeSafe( - reinterpret_cast(EmulateVectorRotateLeft)); - e.vmovaps(i.dest, e.xmm0); + + if (i.src2.is_constant) { + e.StashConstantXmm(1, i.src2.constant()); + stack_offset_src2 = X64Emitter::kStashOffset + 16; + } else { + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], i.src2); + } + + Xbyak::Label rotate_iter; + + e.xor_(e.edx, e.edx); + + e.L(rotate_iter); + e.mov(e.ecx, e.dword[e.rsp + stack_offset_src2 + e.rdx]); + e.rol(e.dword[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + e.add(e.edx, 4); + + e.cmp(e.edx, 16); + e.jnz(rotate_iter); + e.vmovdqa(i.dest, e.byte[e.rsp + stack_offset_src1]); } break; } @@ -1667,80 +1869,120 @@ struct VECTOR_ROTATE_LEFT_V128 }; EMITTER_OPCODE_TABLE(OPCODE_VECTOR_ROTATE_LEFT, VECTOR_ROTATE_LEFT_V128); -// ============================================================================ -// OPCODE_VECTOR_AVERAGE -// ============================================================================ -template ::value, int> = 0> -static __m128i EmulateVectorAverage(void*, __m128i src1, __m128i src2) { - alignas(16) T src1v[16 / sizeof(T)]; - alignas(16) T src2v[16 / sizeof(T)]; - alignas(16) T value[16 / sizeof(T)]; - - // Load SSE registers into a C array. - _mm_store_si128(reinterpret_cast<__m128i*>(src1v), src1); - _mm_store_si128(reinterpret_cast<__m128i*>(src2v), src2); - - for (size_t i = 0; i < (16 / sizeof(T)); ++i) { - auto t = (uint64_t(src1v[i]) + uint64_t(src2v[i]) + 1) / 2; - value[i] = T(t); - } - - // Store result and return it. - return _mm_load_si128(reinterpret_cast<__m128i*>(value)); -} - struct VECTOR_AVERAGE : Sequence> { static void Emit(X64Emitter& e, const EmitArgType& i) { + auto i_flags = i.instr->flags; EmitCommutativeBinaryXmmOp( e, i, - [&i](X64Emitter& e, const Xmm& dest, const Xmm& src1, const Xmm& src2) { - const TypeName part_type = - static_cast(i.instr->flags & 0xFF); - const uint32_t arithmetic_flags = i.instr->flags >> 8; + [i_flags](X64Emitter& e, const Xmm& dest, const Xmm& src1, + const Xmm& src2) { + const TypeName part_type = static_cast(i_flags & 0xFF); + const uint32_t arithmetic_flags = i_flags >> 8; bool is_unsigned = !!(arithmetic_flags & ARITHMETIC_UNSIGNED); + unsigned stack_offset_src1 = StackLayout::GUEST_SCRATCH; + unsigned stack_offset_src2 = StackLayout::GUEST_SCRATCH + 16; switch (part_type) { case INT8_TYPE: if (is_unsigned) { e.vpavgb(dest, src1, src2); } else { - assert_always(); + // todo: avx2 version or version that sign extends to two __m128 + + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], src1); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], src2); + + Xbyak::Label looper; + + e.xor_(e.edx, e.edx); + + e.L(looper); + + e.movsx(e.ecx, e.byte[e.rsp + stack_offset_src2 + e.rdx]); + e.movsx(e.eax, e.byte[e.rsp + stack_offset_src1 + e.rdx]); + + e.lea(e.ecx, e.ptr[e.ecx + e.eax + 1]); + e.sar(e.ecx, 1); + e.mov(e.byte[e.rsp + stack_offset_src1 + e.rdx], e.cl); + + if (e.IsFeatureEnabled(kX64FlagsIndependentVars)) { + e.inc(e.edx); + } else { + e.add(e.edx, 1); + } + + e.cmp(e.edx, 16); + e.jnz(looper); + e.vmovdqa(dest, e.ptr[e.rsp + stack_offset_src1]); } break; case INT16_TYPE: if (is_unsigned) { e.vpavgw(dest, src1, src2); } else { - assert_always(); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], src1); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], src2); + + Xbyak::Label looper; + + e.xor_(e.edx, e.edx); + + e.L(looper); + + e.movsx(e.ecx, e.word[e.rsp + stack_offset_src2 + e.rdx]); + e.movsx(e.eax, e.word[e.rsp + stack_offset_src1 + e.rdx]); + + e.lea(e.ecx, e.ptr[e.ecx + e.eax + 1]); + e.sar(e.ecx, 1); + e.mov(e.word[e.rsp + stack_offset_src1 + e.rdx], e.cx); + + e.add(e.edx, 2); + + e.cmp(e.edx, 16); + e.jnz(looper); + e.vmovdqa(dest, e.ptr[e.rsp + stack_offset_src1]); } break; - case INT32_TYPE: + case INT32_TYPE: { // No 32bit averages in AVX. + e.vmovdqa(e.ptr[e.rsp + stack_offset_src1], src1); + e.vmovdqa(e.ptr[e.rsp + stack_offset_src2], src2); + + Xbyak::Label looper; + + e.xor_(e.edx, e.edx); + + e.L(looper); + auto src2_current_ptr = + e.dword[e.rsp + stack_offset_src2 + e.rdx]; + auto src1_current_ptr = + e.dword[e.rsp + stack_offset_src1 + e.rdx]; + if (is_unsigned) { - if (i.src2.is_constant) { - e.lea(e.GetNativeParam(1), - e.StashConstantXmm(1, i.src2.constant())); - } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); - } - e.lea(e.GetNativeParam(0), e.StashXmm(0, i.src1)); - e.CallNativeSafe( - reinterpret_cast(EmulateVectorAverage)); - e.vmovaps(i.dest, e.xmm0); + // implicit zero-ext + e.mov(e.ecx, src2_current_ptr); + e.mov(e.eax, src1_current_ptr); } else { - if (i.src2.is_constant) { - e.lea(e.GetNativeParam(1), - e.StashConstantXmm(1, i.src2.constant())); - } else { - e.lea(e.GetNativeParam(1), e.StashXmm(1, i.src2)); - } - e.lea(e.GetNativeParam(0), e.StashXmm(0, i.src1)); - e.CallNativeSafe( - reinterpret_cast(EmulateVectorAverage)); - e.vmovaps(i.dest, e.xmm0); + e.movsxd(e.rcx, src2_current_ptr); + e.movsxd(e.rax, src1_current_ptr); } - break; + + e.lea(e.rcx, e.ptr[e.rcx + e.rax + 1]); + if (is_unsigned) { + e.shr(e.rcx, 1); + } else { + e.sar(e.rcx, 1); + } + e.mov(e.dword[e.rsp + stack_offset_src1 + e.rdx], e.ecx); + + e.add(e.edx, 4); + + e.cmp(e.edx, 16); + e.jnz(looper); + e.vmovdqa(dest, e.ptr[e.rsp + stack_offset_src1]); + } break; + default: assert_unhandled_case(part_type); break; diff --git a/src/xenia/cpu/backend/x64/x64_sequences.cc b/src/xenia/cpu/backend/x64/x64_sequences.cc index 1b6c40f44..24c10ebef 100644 --- a/src/xenia/cpu/backend/x64/x64_sequences.cc +++ b/src/xenia/cpu/backend/x64/x64_sequences.cc @@ -54,6 +54,10 @@ DEFINE_bool(inline_loadclock, false, "Directly read cached guest clock without calling the LoadClock " "method (it gets repeatedly updated by calls from other threads)", "CPU"); +DEFINE_bool(delay_via_maybeyield, false, + "implement the db16cyc instruction via MaybeYield, may improve " + "scheduling of guest threads", + "x64"); namespace xe { namespace cpu { namespace backend { @@ -1065,7 +1069,11 @@ struct COMPARE_NE_I32 e.cmp(src1, src2); }, [](X64Emitter& e, const Reg32& src1, int32_t constant) { - e.cmp(src1, constant); + if (constant == 0 && e.CanUseMembaseLow32As0()) { + e.cmp(src1, e.GetMembaseReg().cvt32()); + } else { + e.cmp(src1, constant); + } }); } CompareNeDoSetne(e, i.instr, i.dest); @@ -2603,25 +2611,16 @@ void EmitAndNotXX(X64Emitter& e, const ARGS& i) { // src1 constant. // `and` instruction only supports up to 32-bit immediate constants // 64-bit constants will need a temp register - if (i.dest.reg().getBit() == 64) { - auto temp = GetTempReg(e); - e.mov(temp, i.src1.constant()); + //only possible with 64 bit inputs, andc is the only instruction that generates this + auto temp = GetTempReg(e); + e.mov(temp, i.src1.constant()); - if (e.IsFeatureEnabled(kX64EmitBMI1)) { - if (i.dest.reg().getBit() == 64) { - e.andn(i.dest.reg().cvt64(), i.src2.reg().cvt64(), temp.cvt64()); - } else { - e.andn(i.dest.reg().cvt32(), i.src2.reg().cvt32(), temp.cvt32()); - } - } else { - e.mov(i.dest, i.src2); - e.not_(i.dest); - e.and_(i.dest, temp); - } + if (e.IsFeatureEnabled(kX64EmitBMI1)) { + e.andn(i.dest.reg().cvt64(), i.src2.reg().cvt64(), temp.cvt64()); } else { e.mov(i.dest, i.src2); e.not_(i.dest); - e.and_(i.dest, uint32_t(i.src1.constant())); + e.and_(i.dest, temp); } } else if (i.src2.is_constant) { // src2 constant. @@ -2638,13 +2637,7 @@ void EmitAndNotXX(X64Emitter& e, const ARGS& i) { } else { // neither are constant if (e.IsFeatureEnabled(kX64EmitBMI1)) { - if (i.dest.reg().getBit() == 64) { - e.andn(i.dest.reg().cvt64(), i.src2.reg().cvt64(), - i.src1.reg().cvt64()); - } else { - e.andn(i.dest.reg().cvt32(), i.src2.reg().cvt32(), - i.src1.reg().cvt32()); - } + e.andn(i.dest.reg().cvt64(), i.src2.reg().cvt64(), i.src1.reg().cvt64()); } else { if (i.dest == i.src2) { e.not_(i.dest); @@ -2982,15 +2975,52 @@ struct SHR_I64 : Sequence> { }; struct SHR_V128 : Sequence> { static void Emit(X64Emitter& e, const EmitArgType& i) { - // TODO(benvanik): native version (with shift magic). - if (i.src2.is_constant) { - e.mov(e.GetNativeParam(1), i.src2.constant()); + /* + godbolt link: + https://godbolt.org/#z:OYLghAFBqd5QCxAYwPYBMCmBRdBLAF1QCcAaPECAMzwBtMA7AQwFtMQByARg9KtQYEAysib0QXACx8BBAKoBnTAAUAHpwAMvAFYTStJg1DIApACYAQuYukl9ZATwDKjdAGFUtAK4sGIMwAcpK4AMngMmAByPgBGmMQgAGwaAJykAA6oCoRODB7evv5BmdmOAmER0SxxCclpdpgOuUIETMQE%2BT5%2BgbaY9mUMLW0EFVGx8Umptq3tnYU9CjMj4WPVE3UAlLaoXsTI7BzmAMzhyN5YANQmR26qAYnhBMThAHQI19gmGgCCx6fnmCuNxihA%2BX1%2BZhODDOXku1zci3wgjeYJ%2B4L%2BVAuGnRkLwmKwNAi6AgAH0SQBxSJyNxkjbgi4XAgAT3SmAJDI5nIutAEwG5vO5tGuVh%2BDOZrPZXgY2WARP5RnlfK8tCFRxF3wZxwJKwuZMeiUkisV9KukO1EV1JMeRzMF0eJq1mEJgL1gi4iQuCgQJAIDrNTp1roIAQZyAQbT9R3NgIAst8ANLYEIhCAMHwbC5plimo7HC7JyPRi4AMRjABUSQbTWYVeYzDijn08Rdo8SSTGhDSAGrYABKdNFjJZbKdXK5QartbVJvFI8xUplconhuVqvVmv9zouccTydT6czPhzebwBsLAYtpYrVbrAEkz2Z62jIU38Re2RdSSSLAB5Xshb5IgAERpEkBw1IcJVHMcOWXQVhRnYdJWlPBZQ/ODVwQwdHS3HckxTLMMyzY9ITtM9sM3HUr0rQ06xCOsGz6JRI3iYgSGrKUAGsGFQAB3BgLjQFh0joeIGOfRsGHwKhwVnZDFw/R4Li8e1px%2BOTRwXVC5TDNplN04gsO%2BDT5xQtD0E9b12mUr0fSMkzlLMuUeQVZVeSM2SkOgmDBPDYgOUeAJ7K8zEGQUiyDI5bJBCCtTjJCxzwt8vSGRUmLgqg0KfNs6y7TdRIMrnKLtI/HKCDCx53UK%2BSSossrUsqgq4ocny8vKgLBBtarvKSpTis6%2BtmoSrTzLazk0oILqhsywVWq5fVJG6zEVTmzlooIM9pqK1dVoawRNvVcEAHojouZRhjwMRaCZFt3ws2cFBeC4ywQTAbraQEvCUCzeNegSCFe26hJE%2Bh/PQVBMAUTNUHK7i%2BOO07DCZAHwj5JgYh2cqAcBWcLkwVR9nScrCCh7IAC9MBeBsi2/ABNMtsD24NqffXUAHU/yApmqokmmgI53suYmrredZkkAEUBaFhaG2bMAwFbUkO27PtwJwwMQh/SJyU17XLUqwJGKkvF0R%2BE6LkiAQAFpFkMdA2gsjHPEwQxIMhp6Xrei4Ppsj9fsYRlAawYHRP80QGB48qvswBHA8BW2pId6snaFR83YuOJRGji5UExbHPTwCmLhYPBFhYJgCDDDOvCxwGSmyGJ6FjgA3MQvEh73iEBARrqxb2pIuLgnqETBATEBRUAuMAOF/H8Qmn9O4h5XiqfUhLAt1WeQi4Ja2vdTefznwb1Qc61bW/Q%2BQkWrb2QWg%2B59iw6JLxKTRxJNnb2An82aEElPJmjeFh6afBvqORqFwpa7zPhcfmnMoEDXzFrck8Dypb2FDBc2Xh0isj2EwJQFwt52ihl9LwV0bqGhiMjSGRtpL/yKnfSWcC4oYlfpiMkyB0jeAUJwr6dDb6CAzqgTw6Cxzm14oCXihgsaT2zinPKOddgXDcBcdIbFgDEFYAoGhJszanSEKgNggkBDN0YHgRg%2Bxi5MGQGxKGRBLGcUBOkC6YhvbIH2AoJQUMGB4H2IZUWW4AJCArJ/ICEBVAZGGCSWcGYOQQHJpgXOYSNhHXiYkpx7QonDgzFbQeatcRvmdG2OmDMSSc1VqaAqZgPRkiASUspvYgRAWuFzGpt5yQkmwMBW8gEGwMiLJrNmJIQlhIiRk6JHJAnBOAiM9JBBMmsjyUcPprMAASbSVlDOmeE2Z8zMAxOxBJJiMcJLLK3Gs8kGzhnbMieM/M3wgmbNCdcsZWTem3QCd/R5MyblZI5AciEklaH%2BJ1LU7ADARmZhiZ%2BAAVFAYp2BoV0iqUk6wDANiLKLFLcF4TIWxNhaSKWiLzCJBRZYNFGLWawMFti0guKYVwqpUBIlyLVBIosOS02AL%2Bk/lBUkhkoKaUDK%2BeE%2BF6KWYfKlnyiBnNBWfKuaQd%2BnMxXAotJrRlfLGWysGfKkkjLlVctWbeXlrL%2BXAJpecy5WyFWgv1erC0azJUmuldSkZFrhUKqlrayi9rbzqpNZq116z3W6s9RSrcoKuBSoIWaiFuTWrm0oQQQEXBPxoClI4BUVA2LZg0GGkFwCzBRoFbGsweaLSgqOEWmNOKLhHDLYCUFkgq0MxpQySQ9bo0MwAKzNrBbGrtHbQUkqdZ2vtNbEiDuAQAdl7a2i4U7J0MwCLO2NARF3YBSCumtKR11cA0FK4tOK927sjU6w9tKuBcF3YWs91aL2lvFfmhmXBK23pbRCl9u6m1vrHRe9tj7y3AK4D2n9rbgMdqlqeqFWLY1XoA4CKWN7oMypLVC0Rp0UbEB%2BiQCyuc445xiNoRoBBaUjSJPB51QFX3IZdTWutFGpbfpo0BOd/6VUIc5iB5jc6B0Mc5sO7jsaJ18cFjOkdMGa0Ls5ebHivEC6jXLtYrIn584KFYICGINcLi8UIAgeTAl8ZJpQgIDtQhz10vpRAQKzKBOoq9VGVmQgJO0rRXiqAjUbOkvZfZosQgA04tc5Zs%2BnnWV2bVuxi4QhNbGpiWZu9Qr5WBR845gZnMpVOZQ%2BEhLVrGrJa3FFn8fqMx%2Bec9lp55ABp5Z1EINZMWGRxffeEt1iWYpVYtDV28jrYvOeazl/KbXAQdaK5F/zpBevlbPgNyLEao0Nd/QyODEW5tIY5HNudD6lsVtm%2BZ2tpnG3bbvW2vbwCuOrZ27xzbwCBNncOxcYTl2GZiahWt2NUmHvYGXSOl7Na10Ubm5ur7O2d1/Yjfup132L25pB0BqD9XzOXuO8%2Blb03DtcA2wa/LEbqNw9R/R97Uh0vw7Yxj6rEbTso8axei7JP2uQdm85hbpnEP08y7Si46O7WDaltj%2BrDPdt/cYyz2jbPiec8i1Lcn4vWcMmp2LjLgtru8%2Bl3dpnnMnurb52934uiLjkkYPECuY8VFMDwP5PDqAcF20epFz0rQpJQ34P5ae4Vp4UbJEIZQ3xby9ndGSCACBUIIFpcvGJUArP9YZP7wPGZ4TwgZGuq4U7lEQAmgniAIeO3u8997m0fuA/ACD/yXiof3OVcj/nhAMebhx/dDHpPn4Jq1/T3xKbWeve9gNHnwPweW%2BR9Lxtdt5fo9AjcHHm0dfk/C1Lc34vmeSQe/b2jgIXeC89%2BL5%2BfvS%2BMxR4L1X0fNw7uD5MPXlPC0Ngz9bySbPPvEgr8LyH2JUBG8Ts/BXvfceLgJ%2BP5PpLn4M9u6v3b1zxJB33v17z71PzL1APfwP1r0Tx/36wvzn2v07xAIrzXyhTDwmgNG3zfxHzH1LXgIb0myQIAOvyXzvwwIgMb0CHPzwNjwPxwKIMgIH3P3/zRB%2BC%2BlRmN2QAcXQCiXxhJDQBwwUCiUaUSlqjag8h%2BEWGIC8AcAuBMWQDMDEOP3XA5CoB5ArguBxSZA8inSaWYWfkxB3h%2BHBi8EbkBGDgMXSC/Dvi7gUGVBIxbB2EsO9l%2BzRCnXUKUmbmPguHNmIBSBNCDH3mblzDVH8NOmIEvRNB8OvgsEiIuGICCkHB8K7XQQCL3WCKtHykUKagSMyNMIgnMLcJiCsU4iEPDHCAyNOhMC7QsG4WsA0HeC7S5jqIsCtj3RaKaUHBKPoEUMfkSPaMaMsDMGaLqLaPqOsC6ImM5QZGbhDGaXcKMgZGbAgHcMaSWI0BeA0AuHAk1C8JNHmNtC2JWMTx6IgiOQdEOMHHmKWSWIdTSyYF%2Bzik5DWM/EeMFggGeJjyqSxFUCnWLGLFzU2KOC5n%2BTHGJWJQ32blojBIuDWXVR%2BNpWbi7XELVUlWRI%2BN9UxK/z%2BI0FUCBKJIzHli2In2/0QSRLXQzH2I5DUKOI5F8PEM6I0DMB3lePmkxHWIgBmx%2BIqX%2BOPVBPBL2IZIOPULHHBlFLpJuIgh8lhIuGhSWOPilM5ERMlQWKry5lhLOJ8neNRJHz7lpJ8npNuNanlO/yWK4C8B1NajVLSw1PEO1I5ONIMJVMZPuPhM%2BNCQ1JtLHH1MVPhOVNNLHCtitl8N9OlIuJ8l%2BlEk/E/BmwdOhIJMFOaS2JFOdK5AxPtK/3hNRIjOPyjK5Gbg9CWLCP5IJKBOwGLAjK9IgETNzPyKlPeOeINO2N2KNK5FrPrK1JLPrwJICA0EHIjLKN4MqJNwElLMfilNrJHIqN0nCE1IRM62zN%2BI9H7MHOLCIIJKOGLGwGxAzILIZKuNNJNNlI5FnLHPCHEOeJrOXK%2BIvPnNcweLvNCT5KTLuA3K3NUB3L3IjKZKWKfycnQhyIICb1rLfPxIBKBJBLBCOEZkHxyT3UfBtMPNNJbKWIfKqIYDONQoglhRDU5gVI2AcKcMdKDIgi7gIF2AEhvOYVdMOWNhkh%2BD6kcJiBJACDMHqMSBSCOH3RCI9GhSYC4FpSUiYDMBEoESYCOAkvKiYGkDGiYC7Rku9kSGUqYC11PNWIEQWJqKSKyNSIERItoF9AiICNzAMvKmbiyNMqiPMogh8JiJsqSKOCKK0ssrR10uIF4tiO0pfU8rMCCIssUKkH8pSO1wggWOvJ3nqPrFaLOPeO%2BJOIP0TJPykoNKEvaNzFaIn0/DkvSuiosEWmyuYMUqBEZgyvqPSOKopO%2BJLLgu9gKoKmqtSqnTKoavaKnQmJpOuPFLtC5O9iSuUUisQpFODIuGABhkngAgsCTBJACptKst2KWIqosCysMN6ubI9KGr3QuBGo7IZAmrsWmtmvmozKspTWWoKqKvWoZObLhO2pTT2qlMOqmu%2BBmpCDmqCLOrRyivaKqputNObLRI%2BGCpOMsEHlGrPIOsmruXes%2BoWpfV%2Bq4tmIzObLqsZisvuPBpTX2vGphuOo%2BtOt6qsrhK5hWs6ritRv6vUrapJtNASJxuevxrepOq%2Bo2upqSoxpDFxrEsdKnMBupo9Ixp2p5o9LJoKrWptLutpsvUhrHDysuvaOuqlupuBuSp%2Bp5rVvFr%2BpRvZrjKYDqu2qWR5rqu1uRspr1vWJprjzpp5tasVvqIpoBrPIStKpyTEozGhVyo9HdrrQVNytavdoQs/DppyXOs9pDsRrDsFK9rrJ%2BrDpzMZltA7NPIJs%2BpAClKMvKlOIPPorPOPLPM0q5BWtiraPhLEojKzvEPZN6oZBWrWuvPCNrvavqOuuvPiKlJWv%2BuvPSIzLrsaomOvMbLGvJsHuWs6tzsLNwoZALrFIZKrstJwrzoZEouoqSMhicP0IuPBDkQUA0R0xTyAoskwBiBNSLs1E4KPsUPEJPtUH5KgAuur1gOyVySMjkVPsdKnjABzvVDIw/GiK4AsiWNvrKpBuTJkjilXuIAEg/uhIAfQC3uk0VMvqShcj5DQaSNoB0wYBYEPtQYFAwfCHSBrlIyvrYmcL/osnLgUE4jr16soaNAwbOBdi7iAfhMAt6kEA2CIZrjpCMLPOYY%2BiAaTyWMEdYYYLcCmifnCs5CgYEjJE0zoAzRJHIcN3oCoAHwgDEbZFpXIaRUSGoc4kQY4O%2BAYaUiTUWEjQYYwcIYYGIYIDofBDkcwewdwZ4ecOTKYAvT4cOjzuhSOk5TMYEXBkICujwckKUhxQMi9teiZE03QHQCZEcdNLkTDEaE4kIHEKoHHhjiprCQNJ3UT2btDFel4MyaWKeHbgjIZBOmcaJoZOno5GhVificSfELSbKZMukbeKoC0dKYyYcaKbdIUZUmMvCBJClC7isXDEsIgB8YaeXo5Gcd4rZvRDzttkcGQAuE0NQG0PDFoCoDMB2YrnCdGh0OSbPOcZJBYBYCEObgIA8RJCoBtFJBubuYIHSAQBJHSAUFedueQHueyFz2yCXw4Yia4dZTFToouI2Z8QkNGnqmOYIDMH2d6aRe2YufnswCougdtwrh8SELwRMrcAYbKg%2BD%2BfeZBcCBJCBcLWuf%2BfuZ%2Be%2BYQApaUDmQ8WoC2F1BjBjBJGLF7B/DkGAhJEiB/A6QAA1lEKT2xeX%2BXBXhWywxWAAtPsH8dFeZzwi49F1QRgAQEkVFo5rQggU5uUc5oZkx8aK%2BhQWUcQu%2B98gcoc/hy1pKfGdIcQvlaEiAFy/A2AqHKmz8eWV1i5nyZxxEEAEAEEOZUQRYeEJF8l61icp%2B5RWvFCvO26gRZAYSQxm%2B1QN11M9hlyiRlMnc3alNEALEG0k6V13agCzN9IbNz19k/fNwY9DVs8hh9etAaURwKipNNhrmO%2BnJLRrNvBTie%2Bpt%2BEVt6Qs89FlgJwvAThMxfyJY2F5AQlmNm4ONuCqAOthtv4wgmqiAAAP13dHdXNyTbbmI0KNcEnuaiWEnEINaRYgC7i7cWEIBrjZDbdkexbXoBbmQIAfa9rnbGcXbEn4bWe3pfEYs5QxArZMfpDJBYCXx0N1YYH1bEEOefaQ6XzwDNfPv8OQaho5DfbtweYZP8YZJw8CDw9RZJFdYEEYDmQbfhPpZpexa4Ho5EndC9a4DkB9eTb9d6uo4CDw8XFEJY5aTebZc48wG45JXXMHLkG/dgmudw7xlzcY8EBJGDi7jYG08k91Gk44647wB48fubcHiE6o7U5o%2B2aNfo804iG09OD06Y%2BudHfELY5k9M5eb44E/Z3oyddU%2BQ7s4pjYi7j5CWO8%2BxfC9QBpbM8CDmZtJE7w9dahmi7ebtgS9BZxTo4Y%2Bc%2BY7PZS9s9E9uh4mIHLloAk888y/%2BeEkwAAEdTOeP0vaU4vIvL2LWQv1OsAKuquauaGSQbQvO3mpQnFeCeQWuSU%2BuSABuPOaHaVZvKvx4FvaGajmwKyiTaVM4DFAQtuiSiSnoIgRwLJHcM56ApJm5uRDc/EZGOQq3c2hTa0LOa8ISeu7Ou5GuVIHDHO5kNEjBMA1vRv6vWRmu5OzOSU2uLh8unP3PDGVOGRUuNP0gdPGg3ODPR2Pw6uSRsuqXl0vufvIY/uVHDBgAgfDHaUCv3PdPMB9OiuaHEfLRQuyudMAZHPMhCvcf0BtAvo%2B2QeaWLDpuIVXW0frE6f4esf0AmeROjEGATF2g2QSQ2fvQa4SeAfyfufefFgBf/3PnvnfmVeOetO5kmAee%2Bev3p3ORkfxOBe8fEvPtaVxO1uuuzybftIFARucfxvyipuIeeOOvMBgAnePefHIjy4HELhtfypxOr2kfSu8PCfTdifDNSfAevfWOxu7Hff4v/eSVA/g/17vvk/RDU%2BNeKfiuMzkey%2ByegekWcjxf6e7epIcukuk/fua/0/xL7Pdm5lqeXPoQMeGf1vgv4%2BWe8Ok1iBnhLCVHMBHh4hVFsWhDjF4h%2Becfo2PnRD8etGV/Ff%2BCVfMZ1fa%2Bte%2Bew%2BTpvRe4vRR5iZBA%2BhaBS5GB8E8Abm2RLok1L%2BvBgByfFgPwUYoZtC2OcfZnup0n7T96ATzXPP3zmQ0BVAWCO3gk186FpQBeAGfl3Hn7EBF%2BUbXfn2yp6qB/ux/evq5wl5cNp2NncfpYiwBo9%2BuW/WqCSDi4dwcelhK7qZ2XTICZ%2BzzFQlAKeZ4BYB6QEPrKE97d9lu83QxsNwfSj916OLeRm8w3748DeEAcuJQOEE0DzIdA%2BIBDAUCu9YOedI6ER05BEBwY5bZ/qolQAmJA4pccEJR1FDI8dWPEdDkixRaYcvwLPDFua3u5j91O9fdGINzHY48fOEPF5gSUBKHcgSXgV3tbwT76ZRBPzAXv4JEiBCPyg5JIdZ1NLI9TB8QHZrxDT7k9YhJnAIYWgJKSBghe5QcmEJK7kCpQWAYgJkOyGAg/BeQ%2BIQUO/IBAHWSQsoVX0iHpDqhy8Wfo4WMq5DQKwvIIUSXaHCdIhi4UQfvHqGDC8%2BEARTikLd6lc8WGSQzK4CV7l8BhnHH5hAESBdou0GgSQDsSoAy9IhovWnvTwz5SdbmcQvAH5344SNAuJw8gaLyIH08phmfa4Q0Mh4QB/ODw49Fbw%2B5lcC%2BAwuLq32XRPD1OkbSOJF114NdweIkZdAX3a7qDOu5QlDlQFx4xBvoTzEgEIQfY49su2wqgLSjY6yCHe8grwViJd7hDAReHDEd4KV7z91ELidfkSyZbb96RWIpXo7jxHpAmeugt0lYiopkJaUaaWgBZGzglwGAR0cuHfXCA/8zeOcTEHW36F2I6wbAYgOTzrB5QJmr8boXxFqFHRxmXQmoeXwo4BNUhkQyODkRJAmiehGw9fsJGMqIDqAmI7EUyI0S0BaUdog0eXwhF2dxmlQjIfaNr6wjOEffRoa6IZH8EPRYgUhlUNNG19/RZXY0Qr0TGA8BeqgXEdv2tGpjgxvo2vjt0IDQig%2BNIjwYECL5E9S%2BeAtYfwQdEfDeRwAOZESLdHcjcRdbJ3ksD%2B61jahZYq4Ah1uLmw0haYnobiKDH6i5MOPbMQ73Dy2iRxBYwHhJV1EJiQxgPPscj3b4p8axr8OscfzQG38MBlFbHg2I37siZxm46sUZiwC7i1xaIuzhiP3GT9MBx4q4eu035gjOWAI8sWV3OFMd4gzffgtvwvHdidxtQ2fugOfHoBcBqPX8ZVi/HAC7OsEvtjiOIBH8MxfgoXvkNdGPiF%2BR4qCUDCH7xB1xSwwwDDFeioSmAvEKgMqHAkHjMBuvIlnIO2FIS2xqEzvuTz7FkD1O%2BzExKIWtEwClecA9foy1EKfNvikccibjyok0ScJh47FiSJ5Z8sBWQrICCKzFbYBJWRBGVkpPlaqTFWJIFVgK35FHRsY%2BCH2JgOZDjUbECgWlEyGoSWjyBwE1YaBPL7vDXxE3TiKX0jFOTtx14sCbJMgnIiIupYu8WVx9G8Q3JRnW5h5K8m3DC0eYicbUKCkkAQpHQioXqMyGRS2OMUl0YGIymrjyeyU1EWlPU7hTfcY4/KXxCylvNpxoLMqapUcgriqpwmCDg5PU6utXO1U25vb1BY%2BSrx6w2vu6GgkN8h%2BKbCQdX1zbcCdW/bKKdz34KzCeJxPfiTwMEl8CUenUlqb1ROiyZPQfQYjJ6FZDIBLotARQm3GOSLDyB3CRoMdJJCtxvAdQhsT1KS71TvR84rIVnSZ5XSjpYgW6WdIF61SkuX0m6XdPbgkisuLfbfqLwEn4SJho7EkLXmInkDoZKEmlodOOkZcGxTA9AM3BYEQAoZK0mGWjJ%2BkgzMAr0/MRFNvwVSmpFMvsc42Rk8igZ48HCpq3BDYFJAYhEDgPiICgs2ZYhC3MJWUgLQ%2BZ6QDnDKXGhCyo4j9LmBAH5nllVAVABWYrIVlM9eZ7ld1hbjBprl5ZSsxWSrIlmMh3WXgFNF7Ssp6z9o7M8qLxDFoqY5ZOs3WTaVVkXBfB7DbOnBRBrj54Jss2CiAi5hSMGSrs6WbLJNlXp6aTsm0s7OlnZ1IKds5WQ7P1kRT3WrskBIzA9kSCNZoDN2fCT9mmkA5DeY2TnBFm/EEiRjDMhHJtneza04g7pks1/a4sZZ6QfORrKLnaYLq4NEudI1wrHBXAOicKg5FkLyFZK9/WUHgh%2BEv4lCFBDLmLMci5A3Ba0SpgoQI4MhtWEZdFnoT7o99tCZMFeTe14iptnac8uQgvKnlvEBoYhHgRGTPhny15zdS%2BcTC3nrzb5doXeZPRtL9yj5zdKfGIS8CqAIyn8qONfKlJ/zlI98j%2BY/K8DPzeqh5DMuiw4FK1daDJPooCCoBngYq8CwGjFDPlBR6iVUC2gyVTxfysFFgHBfvI6igVb8xMHBRYCwUkLuY5C5SJQuoVS1T5xMLqK3TQXtswFrCwquwpPkD4z5KCiwKXTjnmyv5AioRRmTfnZ1j5HIR2cvF/n6yK8e8pes7RNByILcBuIgIZDBLNId%2B3bRQo0AnnQkYgGYUjrPJrlSCi8z3GIC8GXgVJoSFeKxW8EDzKLVFQiE6eoo0SaLSSYJXRTr3HnUstEfxYxXLx14EdnGti7xVzGsW2KLS9eBxZEozhOKC8LiwcP4uXxQwPFFcEgHUWwC%2BLLKBigJUYpMUCBQl0itJTS29jiFoUf/G0kwCSV6YclGFepbUpsV8QrgXaH2YkrkUZlnGTAFxRyXKWBKPQmSzRY0p8WmLBlRSsxQyGqUB4MZXMWZaXHaWMwYgNpZxosq0QckoF6hQZQXI0XZLkUEygpekqmWmKCOuypgFUpqUZk6lDiqEvCWsWKKblrSycVUiaXdLeqvS/pTsuOU0toSIyg5U1FCRHLkAhioJcGyaZ/8rlcy/kqsp6W1yBIGy75SaF2UArDISePJfotBWFLwVISqRcTV%2BX/9oVFg55XcpEYPLmlzymJeSqiUvK1lCK72MitSWEr/lluUZTSsxWTLcVBHDZcSsnnKI4VnyhlUiq2WuloWWKigl0zmKrsEJZXbACB0NxCAEAxALsKCxt57ABZ%2BCmPnsAPbSLzY3wWgCwCyCDyTp8ou3O4k7jxxwwLAXOQLM0zlQ7BVsUjlIlXhnlzYXoVgMTAy71ENApAJ2ngsChiEPVNq8QnvWUJyyJ6vVMQNpBHlwFylllM6Sl2k6aKgeQEufnRMopvj4QqXAMrkuJQkyuWYauDAeUhCqImAwAcuIJAMCuQGAbEFUCSB5AW4GS53cPMTGrq90EieHMBuglDJ4AIVnIEmS8C8CEKu1HS/4g3QAqDrh17RUdYzG3Jj1M5jMYNYMwtJjVdoPkOsmdKHWEKrMu1ONDkvHULrLOEAEMDkmXVQtIFizeYluunU%2Brllh61oo6RvWEKlqY6%2BdY%2BornLr6VFitjloUAnnj01T4zNRv2zUJ9c1EAfNWdIvXXFt61gyIRY0n5zM3BXEz7hvX6HTC2JCIuYdrIvS0pu%2B0lC4PJSUp3ZaUU6WlEEAuBpArOF6AWWjgvQEapAVOJns43lXKhFVyq1VW3zQ3OEpM0LPhJpCvqRRE0mAJQmITJAVwngKAz9n7jrLEZpxFMCAIaC9qpJemjwdVm2xt4IAvACs0GM4PU5FrSMga7VcoUxYyEdV9yoCEEIWq/KY%2BWmnTUDyzr0kxVBK7Fcvls3abNCqa5VU5poUSrqW7m%2BzTSwQBChDizmvBVfX67RrxC%2B5Xqi2qUhdr4SwGdBAlrnU9qrYeAUgJFvv7WBrA/arkF6A83gCs626mdfevfVl0uYWWvANU0siFavNxAErfUSq0LquYs6w9evLCh2bPNQW2gI1vZRzdo1LWz8Hh0Qox43135JRaaQK2Bbitt6iwK%2BrnUTaP18JZrRIq63gCvQDWubc1uW3CkD1TdBktNu61ehet22gbdlt23/EByNpYTaJts3EBptL42aUdvAEQ9l0BmhUgFOA1EtQN4/cDcShe31boNKTKhHdssjVc7NT27zutqB5va4kGq2lNCi%2B1L8QNNwHNeSwB0w6etwOs8ptse1sNk5K7czWtoh3abjxzbACqes9A6rEZ6nbQKgBWAzS8dkO4RsohO347v1a9aHXVtxn07GdiO5HVGx%2B1o6wNGOqpIDt6FOEcdncn4Mj2bh714ZemuzkWry2hsYdxAeHaCrw2XsDCHALYLQE4BdpeAfgDgFoFICoBOAo%2BSwNYE9A7A9gdQyEDwFIAEBNAeurYJxBACSBEgLwSQCkD3QpANARwAcgFUSA9ADdHASQMbtd3m7OAvABQCAF9Uu7Tdeu0gHAFgBIAQ4oMcgJQEz0TAzgZPKJHIQYCcQ%2BASjeIPHogAxBo9IIZgMQCZCcAndQkenj%2BAYDXRo9WAcuEYHEDJ7SA%2BAV9o4F4nR78YjQT9g3t4Dz9w9Zu%2B/jEA0R16PAWAaPZJpYBj6tgmhctQoC7BmJeIP4VkCbqd38BBAIgMQOwEY0H75ASgNQNHt0DCVq1xgHLZYH0AoD49kALYBbgGDx6OAVsH8Nkx167Vy4ewd4GCTJjy94gtoG2AQAQZglZQ1pa3aMSYBx69pTQZwHjKkhzA/AwlUICsCqA1A9AJQHIAIDQO4Gsg%2BBhgKMGwMTBhKDQJA4MCWCEHKDiBgYEMHaBkHxgCQSg7Qc8BdA9AtsZg1gdYMSAtgE8XYPsD0BPBR4K%2B/QIbqj096LdHAO4IkCthVh89CoCAJU2L3b4rdVgB/RcFwCEB2IxwAWR4GEihwcwV6XgEnq0DopSAr0M3hMDmakAPdXaMwC8CnQvpJARwJw5ICKEcUihkhiPdIbN2yG49Ce53a7q2Bp7EAIATGPY2z079jDoMSIOpk4CqAqwLABQM3C2ZWUUgLwKQJ%2BA/iRBsAGwXgK/00V4B0AegM/UfvECn7ZAigFQOoB73X7SAvEDROkAkPh6jdmW6PbIZ/A1x7GSonQvcEUOGhlDfIVQ0XtoafgjDIMf8QYaKOhHk9Vhj3dsSnQpAu0RQ9w1Ol91skqkRwaQOHsj1dGZDse2wCEYsNu6/DZgAI7wCCMLHLDWwBXtkGcCSAgAA%3D%3D%3D + */ + // https://github.com/xenia-canary/xenia-canary/blob/968f656d96b3ca9c14d6467423df77d3583f7d18/src/xenia/cpu/backend/x64/x64_sequences.cc + /* + todo: this is a naive version, we can do far more optimizations for + constant src2 + */ + bool consts2 = false; + + if (i.src1.is_constant) { + e.LoadConstantXmm(e.xmm0, i.src1.constant()); } else { - e.mov(e.GetNativeParam(1), i.src2); + e.vmovdqa(e.xmm0, i.src1); } - e.lea(e.GetNativeParam(0), e.StashXmm(0, i.src1)); - e.CallNativeSafe(reinterpret_cast(EmulateShrV128)); - e.vmovaps(i.dest, e.xmm0); + if (i.src2.is_constant) { + consts2 = true; + e.mov(e.r8d, i.src2.constant() & 7); + e.mov(e.eax, 8 - (i.src2.constant() & 7)); + } else { + e.movzx(e.r8d, i.src2); + e.and_(e.r8d, 7); + } + + e.vpshufd(e.xmm1, e.xmm0, 27); + e.vpcmpeqd(e.xmm3, e.xmm3, e.xmm3); + e.vpshufb(e.xmm0, e.xmm0, e.GetXmmConstPtr(XMMVSRShlByteshuf)); + if (!consts2) { + e.mov(e.eax, 8); + } + e.vmovd(e.xmm2, e.r8d); + if (!consts2) { + e.sub(e.eax, e.r8d); + } + e.vpsrlw(e.xmm1, e.xmm1, e.xmm2); + e.vpsrlw(e.xmm2, e.xmm3, e.xmm2); + e.vpshufb(e.xmm2, e.xmm2, e.GetXmmConstPtr(XMMVSRMask)); + e.vpand(e.xmm1, e.xmm1, e.xmm2); + e.vmovd(e.xmm2, e.eax); + e.vpsllw(e.xmm0, e.xmm0, e.xmm2); + e.vpsllw(e.xmm2, e.xmm3, e.xmm2); + e.vpshufb(e.xmm2, e.xmm2, e.GetXmmConstPtr(XMMZero)); + e.vpand(e.xmm0, e.xmm0, e.xmm2); + e.vpor(e.xmm0, e.xmm0, e.xmm1); + e.vpshufd(i.dest, e.xmm0, 27); } static __m128i EmulateShrV128(void*, __m128i src1, uint8_t src2) { // Almost all instances are shamt = 1, but non-constant. @@ -3238,6 +3268,8 @@ struct SET_ROUNDING_MODE_I32 } }; EMITTER_OPCODE_TABLE(OPCODE_SET_ROUNDING_MODE, SET_ROUNDING_MODE_I32); + +static void MaybeYieldForwarder(void* ctx) { xe::threading::MaybeYield(); } // ============================================================================ // OPCODE_DELAY_EXECUTION // ============================================================================ @@ -3245,7 +3277,11 @@ struct DELAY_EXECUTION : Sequence> { static void Emit(X64Emitter& e, const EmitArgType& i) { // todo: what if they dont have smt? - e.pause(); + if (cvars::delay_via_maybeyield) { + e.CallNativeSafe((void*)MaybeYieldForwarder); + } else { + e.pause(); + } } }; EMITTER_OPCODE_TABLE(OPCODE_DELAY_EXECUTION, DELAY_EXECUTION); diff --git a/src/xenia/cpu/compiler/passes/finalization_pass.cc b/src/xenia/cpu/compiler/passes/finalization_pass.cc index 1b409430c..d9e2846eb 100644 --- a/src/xenia/cpu/compiler/passes/finalization_pass.cc +++ b/src/xenia/cpu/compiler/passes/finalization_pass.cc @@ -41,18 +41,6 @@ bool FinalizationPass::Run(HIRBuilder* builder) { block->ordinal = block_ordinal++; // Ensure all labels have names. - auto label = block->label_head; - while (label) { - if (!label->name) { - const size_t label_len = 6 + 4; - char* name = reinterpret_cast(arena->Alloc(label_len + 1, 1)); - assert_true(label->id <= 9999); - auto end = fmt::format_to_n(name, label_len, "_label{}", label->id); - name[end.size] = '\0'; - label->name = name; - } - label = label->next; - } // Remove unneeded jumps. auto tail = block->instr_tail; diff --git a/src/xenia/cpu/compiler/passes/simplification_pass.cc b/src/xenia/cpu/compiler/passes/simplification_pass.cc index a8c93c3b6..da6f0dfe1 100644 --- a/src/xenia/cpu/compiler/passes/simplification_pass.cc +++ b/src/xenia/cpu/compiler/passes/simplification_pass.cc @@ -23,52 +23,6 @@ using namespace xe::cpu::hir; using xe::cpu::hir::HIRBuilder; using xe::cpu::hir::Instr; using xe::cpu::hir::Value; -using vmask_portion_t = uint64_t; -template -struct Valuemask_t { - vmask_portion_t bits[Ndwords]; - - static Valuemask_t create_empty(vmask_portion_t fill = 0) { - Valuemask_t result; - for (uint32_t i = 0; i < Ndwords; ++i) { - result.bits[i] = fill; - } - return result; - } - template - Valuemask_t operate(TCallable&& oper) const { - Valuemask_t result = create_empty(); - - for (uint32_t i = 0; i < Ndwords; ++i) { - result.bits[i] = oper(bits[i]); - } - return result; - } - template - Valuemask_t operate(TCallable&& oper, Valuemask_t other) const { - Valuemask_t result = create_empty(); - - for (uint32_t i = 0; i < Ndwords; ++i) { - result.bits[i] = oper(bits[i], other.bits[i]); - } - return result; - } - Valuemask_t operator&(ValueMask other) const { - return operate([](vmask_portion_t x, vmask_portion_t y) { return x & y; }, - other); - } - Valuemask_t operator|(ValueMask other) const { - return operate([](vmask_portion_t x, vmask_portion_t y) { return x | y; }, - other); - } - Valuemask_t operator^(ValueMask other) const { - return operate([](vmask_portion_t x, vmask_portion_t y) { return x ^ y; }, - other); - } - Valuemask_t operator~() const { - return operate([](vmask_portion_t x) { return ~x; }, other); - } -}; SimplificationPass::SimplificationPass() : ConditionalGroupSubpass() {} @@ -76,17 +30,13 @@ SimplificationPass::~SimplificationPass() {} bool SimplificationPass::Run(HIRBuilder* builder, bool& result) { result = false; - bool iter_result = false; - do { - iter_result = false; - iter_result |= SimplifyBitArith(builder); - iter_result |= EliminateConversions(builder); - iter_result |= SimplifyAssignments(builder); - iter_result |= SimplifyBasicArith(builder); - iter_result |= SimplifyVectorOps(builder); - result |= iter_result; - } while (iter_result); + result |= SimplifyBitArith(builder); + result |= EliminateConversions(builder); + result |= SimplifyAssignments(builder); + result |= SimplifyBasicArith(builder); + result |= SimplifyVectorOps(builder); + return true; } // simplifications that apply to both or and xor @@ -735,7 +685,9 @@ bool SimplificationPass::CheckAdd(hir::Instr* i, hir::HIRBuilder* builder) { auto [added_constant_neg, added_var_neg] = i->BinaryValueArrangeAsConstAndVar(); - if (!added_constant_neg) return false; + if (!added_constant_neg) { + return false; + } if (added_constant_neg->AsUint64() & GetScalarSignbitMask(added_constant_neg->type)) { // adding a value that has its signbit set! @@ -882,11 +834,6 @@ bool SimplificationPass::CheckScalarConstCmp(hir::Instr* i, } else if (cmpop == OPCODE_COMPARE_UGT) { // impossible, cannot be greater than mask - - /* i->Replace(&OPCODE_ASSIGN_info, 0); - i->set_src1(builder->LoadZeroInt8()); - return true; - */ constant_replacement = builder->LoadZeroInt8(); } else if (cmpop == OPCODE_COMPARE_ULE) { // less than or equal to mask = @@ -914,9 +861,9 @@ bool SimplificationPass::CheckIsTrueIsFalse(hir::Instr* i, bool istrue = i->opcode == &OPCODE_COMPARE_NE_info; bool isfalse = i->opcode == &OPCODE_COMPARE_EQ_info; - auto [input_cosntant, input] = i->BinaryValueArrangeAsConstAndVar(); + auto [input_constant, input] = i->BinaryValueArrangeAsConstAndVar(); - if (!input_cosntant || input_cosntant->AsUint64() != 0) { + if (!input_constant || input_constant->AsUint64() != 0) { return false; } @@ -957,12 +904,6 @@ bool SimplificationPass::CheckIsTrueIsFalse(hir::Instr* i, } } - /* Instr* input_def = input->def; - if (!input_def) { - return false; - } - - input_def = input_def->GetDestDefSkipAssigns();*/ return false; } bool SimplificationPass::CheckSHRByConst(hir::Instr* i, diff --git a/src/xenia/cpu/hir/hir_builder.cc b/src/xenia/cpu/hir/hir_builder.cc index 286a19845..7a5935001 100644 --- a/src/xenia/cpu/hir/hir_builder.cc +++ b/src/xenia/cpu/hir/hir_builder.cc @@ -1872,18 +1872,14 @@ Value* HIRBuilder::AndNot(Value* value1, Value* value2) { ASSERT_NON_FLOAT_TYPE(value1); ASSERT_NON_FLOAT_TYPE(value2); ASSERT_TYPES_EQUAL(value1, value2); - //only other type it can be used with is INT64_TYPE (andc) + // only other type it can be used with is INT64_TYPE (andc) if (value1->type != VEC128_TYPE) { return this->And(this->Not(value2), value1); + } else if (value1 == value2) { + return LoadZero(value1->type); + } else if (value1->IsConstantZero() || value2->IsConstantZero()) { + return value1; } else { - if (value1 == value2) { - return LoadZero(value1->type); - } else if (value1->IsConstantZero()) { - return value1; - } else if (value2->IsConstantZero()) { - return value1; - } - Instr* i = AppendInstr(OPCODE_AND_NOT_info, 0, AllocValue(value1->type)); i->set_src1(value1); i->set_src2(value2); diff --git a/src/xenia/cpu/hir/label.h b/src/xenia/cpu/hir/label.h index c57fd0154..cc4f4146e 100644 --- a/src/xenia/cpu/hir/label.h +++ b/src/xenia/cpu/hir/label.h @@ -26,6 +26,13 @@ class Label { char* name; void* tag; + // just use stringification of label id + // this will later be used as an input to xbyak. xbyak only accepts + // std::string as a value, not passed by reference, so precomputing the + // stringification does not help + std::string GetIdString() { + return std::to_string(id); + } }; } // namespace hir diff --git a/src/xenia/cpu/ppc/ppc_emit_memory.cc b/src/xenia/cpu/ppc/ppc_emit_memory.cc index 9bb7d5593..69c7fdf9e 100644 --- a/src/xenia/cpu/ppc/ppc_emit_memory.cc +++ b/src/xenia/cpu/ppc/ppc_emit_memory.cc @@ -16,7 +16,7 @@ #include "xenia/cpu/ppc/ppc_hir_builder.h" DEFINE_bool( - disable_prefetch_and_cachecontrol, false, + disable_prefetch_and_cachecontrol, true, "Disables translating ppc prefetch/cache flush instructions to host " "prefetch/cacheflush instructions. This may improve performance as these " "instructions were written with the Xbox 360's cache in mind, and modern " diff --git a/src/xenia/cpu/ppc/ppc_frontend.cc b/src/xenia/cpu/ppc/ppc_frontend.cc index 7b7617368..bd65919dd 100644 --- a/src/xenia/cpu/ppc/ppc_frontend.cc +++ b/src/xenia/cpu/ppc/ppc_frontend.cc @@ -105,6 +105,11 @@ bool PPCFrontend::Initialize() { } bool PPCFrontend::DeclareFunction(GuestFunction* function) { + + //chrispy: make sure we aren't declaring a function that is actually padding data, this will mess up PPCScanner and is hard to debug + //wow, this halo reach actually has branches into 0 opcodes, look into further + //xenia_assert(*reinterpret_cast( + // this->memory()->TranslateVirtual(function->address())) != 0); // Could scan or something here. // Could also check to see if it's a well-known function type and classify // for later. diff --git a/src/xenia/cpu/ppc/ppc_hir_builder.cc b/src/xenia/cpu/ppc/ppc_hir_builder.cc index 263d3675a..867651c32 100644 --- a/src/xenia/cpu/ppc/ppc_hir_builder.cc +++ b/src/xenia/cpu/ppc/ppc_hir_builder.cc @@ -34,6 +34,11 @@ DEFINE_bool( "unimplemented PowerPC instruction is encountered.", "CPU"); +DEFINE_bool( + emit_useless_fpscr_updates, false, + "Emit useless fpscr update instructions (pre-10/30/2022 behavior). ", + "CPU"); + namespace xe { namespace cpu { namespace ppc { @@ -89,6 +94,9 @@ bool PPCHIRBuilder::Emit(GuestFunction* function, uint32_t flags) { function_ = function; start_address_ = function_->address(); + //chrispy: i've seen this one happen, not sure why but i think from trying to precompile twice + //i've also seen ones with a start and end address that are the same... + assert_true(function_->address() <= function_->end_address()); instr_count_ = (function_->end_address() - function_->address()) / 4 + 1; with_debug_info_ = (flags & EMIT_DEBUG_COMMENTS) == EMIT_DEBUG_COMMENTS; @@ -242,6 +250,7 @@ void PPCHIRBuilder::MaybeBreakOnInstruction(uint32_t address) { } void PPCHIRBuilder::AnnotateLabel(uint32_t address, Label* label) { + //chrispy: label->name is unused, it would be nice to be able to remove the field and this code char name_buffer[13]; auto format_result = fmt::format_to_n(name_buffer, 12, "loc_{:08X}", address); name_buffer[format_result.size] = '\0'; @@ -447,31 +456,38 @@ void PPCHIRBuilder::StoreFPSCR(Value* value) { void PPCHIRBuilder::UpdateFPSCR(Value* result, bool update_cr1) { // TODO(benvanik): detect overflow and nan cases. // fx and vx are the most important. - Value* fx = LoadConstantInt8(0); - Value* fex = LoadConstantInt8(0); - Value* vx = LoadConstantInt8(0); - Value* ox = LoadConstantInt8(0); + /* + chrispy: stubbed this out because right now all it does is waste + memory and CPU time + */ + if (cvars::emit_useless_fpscr_updates) { + Value* fx = LoadConstantInt8(0); + Value* fex = LoadConstantInt8(0); + Value* vx = LoadConstantInt8(0); + Value* ox = LoadConstantInt8(0); - if (update_cr1) { - // Store into the CR1 field. - // We do this instead of just calling CopyFPSCRToCR1 so that we don't - // have to read back the bits and do shifting work. - StoreContext(offsetof(PPCContext, cr1.cr1_fx), fx); - StoreContext(offsetof(PPCContext, cr1.cr1_fex), fex); - StoreContext(offsetof(PPCContext, cr1.cr1_vx), vx); - StoreContext(offsetof(PPCContext, cr1.cr1_ox), ox); + if (update_cr1) { + // Store into the CR1 field. + // We do this instead of just calling CopyFPSCRToCR1 so that we don't + // have to read back the bits and do shifting work. + StoreContext(offsetof(PPCContext, cr1.cr1_fx), fx); + StoreContext(offsetof(PPCContext, cr1.cr1_fex), fex); + StoreContext(offsetof(PPCContext, cr1.cr1_vx), vx); + StoreContext(offsetof(PPCContext, cr1.cr1_ox), ox); + } + + // Generate our new bits. + Value* new_bits = Shl(ZeroExtend(fx, INT32_TYPE), 31); + new_bits = Or(new_bits, Shl(ZeroExtend(fex, INT32_TYPE), 30)); + new_bits = Or(new_bits, Shl(ZeroExtend(vx, INT32_TYPE), 29)); + new_bits = Or(new_bits, Shl(ZeroExtend(ox, INT32_TYPE), 28)); + + // Mix into fpscr while preserving sticky bits (FX and OX). + Value* bits = LoadFPSCR(); + bits = Or(And(bits, LoadConstantUint32(0x9FFFFFFF)), new_bits); + StoreFPSCR(bits); } - // Generate our new bits. - Value* new_bits = Shl(ZeroExtend(fx, INT32_TYPE), 31); - new_bits = Or(new_bits, Shl(ZeroExtend(fex, INT32_TYPE), 30)); - new_bits = Or(new_bits, Shl(ZeroExtend(vx, INT32_TYPE), 29)); - new_bits = Or(new_bits, Shl(ZeroExtend(ox, INT32_TYPE), 28)); - - // Mix into fpscr while preserving sticky bits (FX and OX). - Value* bits = LoadFPSCR(); - bits = Or(And(bits, LoadConstantUint32(0x9FFFFFFF)), new_bits); - StoreFPSCR(bits); } void PPCHIRBuilder::CopyFPSCRToCR1() { diff --git a/src/xenia/cpu/ppc/ppc_instr.h b/src/xenia/cpu/ppc/ppc_instr.h index 7f2b69bba..a65f1b638 100644 --- a/src/xenia/cpu/ppc/ppc_instr.h +++ b/src/xenia/cpu/ppc/ppc_instr.h @@ -21,13 +21,7 @@ namespace xe { namespace cpu { namespace ppc { -// DEPRECATED -// TODO(benvanik): move code to PPCDecodeData. -struct InstrData { - PPCOpcode opcode; - const PPCOpcodeInfo* opcode_info; - uint32_t address; - +struct PPCOpcodeBits { union { uint32_t code; @@ -329,6 +323,14 @@ struct InstrData { }; }; +// DEPRECATED +// TODO(benvanik): move code to PPCDecodeData. +struct InstrData : public PPCOpcodeBits { + PPCOpcode opcode; + const PPCOpcodeInfo* opcode_info; + uint32_t address; +}; + } // namespace ppc } // namespace cpu } // namespace xe diff --git a/src/xenia/cpu/xex_module.cc b/src/xenia/cpu/xex_module.cc index d7325ea91..24ca9af73 100644 --- a/src/xenia/cpu/xex_module.cc +++ b/src/xenia/cpu/xex_module.cc @@ -31,7 +31,8 @@ #include "third_party/crypto/rijndael-alg-fst.c" #include "third_party/crypto/rijndael-alg-fst.h" #include "third_party/pe/pe_image.h" - +#include "xenia/cpu/ppc/ppc_decode_data.h" +#include "xenia/cpu/ppc/ppc_instr.h" DEFINE_bool(disable_instruction_infocache, false, "Disables caching records of called instructions/mmio accesses.", "CPU"); @@ -1074,12 +1075,13 @@ bool XexModule::LoadContinue() { image_sha_str_ += &fmtbuf[0]; } - info_cache_.Init(this); // Find __savegprlr_* and __restgprlr_* and the others. // We can flag these for special handling (inlining/etc). if (!FindSaveRest()) { return false; } + info_cache_.Init(this); + PrecompileDiscoveredFunctions(); // Load a specified module map and diff. if (cvars::load_module_map.size()) { @@ -1363,7 +1365,20 @@ InfoCacheFlags* XexModule::GetInstructionAddressFlags(uint32_t guest_addr) { return info_cache_.LookupFlags(guest_addr); } +void XexModule::PrecompileDiscoveredFunctions() { + auto others = PreanalyzeCode(); + for (auto&& other : others) { + if (other < low_address_ || other >= high_address_) { + continue; + } + auto sym = processor_->LookupFunction(other); + + if (!sym || sym->status() != Symbol::Status::kDefined) { + processor_->ResolveFunction(other); + } + } +} void XexModule::PrecompileKnownFunctions() { if (cvars::disable_function_precompilation) { return; @@ -1376,10 +1391,157 @@ void XexModule::PrecompileKnownFunctions() { } for (uint32_t i = 0; i < end; i++) { if (flags[i].was_resolved) { - processor_->ResolveFunction(low_address_ + (i * 4)); + uint32_t addr = low_address_ + (i * 4); + auto sym = processor_->LookupFunction(addr); + + if (!sym || sym->status() != Symbol::Status::kDefined) { + processor_->ResolveFunction(addr); + } } } } + +static uint32_t get_bl_called_function(XexModule* xexmod, uint32_t current_base, + ppc::PPCOpcodeBits wrd) { + int32_t displ = static_cast(ppc::XEEXTS26(wrd.I.LI << 2)); + + if (wrd.I.AA) { + return static_cast(displ); + } else { + return static_cast(static_cast(current_base) + displ); + } +} +static bool is_bl(unsigned w) { + return (w >> (32 - 6)) == 18 && ppc::PPCOpcodeBits{w}.I.LK; +} + +std::vector XexModule::PreanalyzeCode() { + uint32_t low_8_aligned = xe::align(low_address_, 8); + uint32_t high_8_aligned = high_address_ & ~(8U - 1); + + uint32_t n_possible_8byte_addresses = (high_8_aligned - low_8_aligned) / 8; + uint32_t* funcstart_candidate_stack = + new uint32_t[n_possible_8byte_addresses]; + uint32_t* funcstart_candstack2 = new uint32_t[n_possible_8byte_addresses]; + + uint32_t stack_pos = 0; + { + // all functions seem to start on 8 byte boundaries, except for obvious ones + // like the save/rest funcs + uint32_t* range_start = + (uint32_t*)memory()->TranslateVirtual(low_8_aligned); + uint32_t* range_end = (uint32_t*)memory()->TranslateVirtual( + high_8_aligned); // align down to multiple of 8 + + const uint8_t mfspr_r12_lr[4] = {0x7D, 0x88, 0x02, 0xA6}; + + // a blr instruction, with 4 zero bytes afterwards to pad the next address + // to 8 byte alignment + // if we see this prior to our address, we can assume we are a function + // start + const uint8_t blr[4] = {0x4E, 0x80, 0x0, 0x20}; + + uint32_t blr32 = *reinterpret_cast(&blr[0]); + + uint32_t mfspr_r12_lr32 = + *reinterpret_cast(&mfspr_r12_lr[0]); + /* + First pass: detect save of the link register at an eight byte + aligned address + */ + for (uint32_t* first_pass = range_start; first_pass < range_end; + first_pass += 2) { + if (*first_pass == mfspr_r12_lr32) { + // Push our newly discovered function start into our list + // All addresses in the list are sorted until the second pass + funcstart_candidate_stack[stack_pos++] = + static_cast(reinterpret_cast(first_pass) - + reinterpret_cast(range_start)) + + low_8_aligned; + } else if (first_pass[-1] == 0 && *first_pass != 0) { + // originally i checked for blr followed by 0, but some functions are + // actually aligned to greater boundaries. something that appears to be + // longjmp (it occurs in most games, so standard library, and loads ctx, + // so longjmp) is aligned to 16 bytes in most games + uint32_t* check_iter = &first_pass[-2]; + + while (!*check_iter) { + --check_iter; + } + + XE_LIKELY_IF(*check_iter == blr32) { + funcstart_candidate_stack[stack_pos++] = + static_cast(reinterpret_cast(first_pass) - + reinterpret_cast(range_start)) + + low_8_aligned; + } + } + } + uint32_t current_guestaddr = low_8_aligned; + // Second pass: detect branch with link instructions and decode the target + // address. We can safely assume that if bl is to address, that address is + // the start of the function + for (uint32_t* second_pass = range_start; second_pass < range_end; + second_pass++, current_guestaddr += 4) { + uint32_t current_call = xe::byte_swap(*second_pass); + + if (is_bl(current_call)) { + funcstart_candidate_stack[stack_pos++] = get_bl_called_function( + this, current_guestaddr, ppc::PPCOpcodeBits{current_call}); + } + } + + auto pdata = this->GetPESection(".pdata"); + + if (pdata) { + uint32_t* pdata_base = + (uint32_t*)this->memory()->TranslateVirtual(pdata->address); + + uint32_t n_pdata_entries = pdata->raw_size / 8; + + for (uint32_t i = 0; i < n_pdata_entries; ++i) { + uint32_t funcaddr = xe::load_and_swap(&pdata_base[i * 2]); + if (funcaddr >= low_address_ && funcaddr <= high_address_) { + funcstart_candidate_stack[stack_pos++] = funcaddr; + } else { + // we hit 0 for func addr, that means we're done + break; + } + } + } + } + + // Sort the list of function starts and then ensure that all addresses are + // unique + uint32_t n_known_funcaddrs = 0; + { + // make addresses unique + + std::sort(funcstart_candidate_stack, funcstart_candidate_stack + stack_pos); + + uint32_t read_pos = 0; + uint32_t write_pos = 0; + uint32_t previous_addr = ~0u; + while (read_pos < stack_pos) { + uint32_t current_addr = funcstart_candidate_stack[read_pos++]; + + if (current_addr != previous_addr) { + previous_addr = current_addr; + funcstart_candstack2[write_pos++] = current_addr; + } + } + n_known_funcaddrs = write_pos; + } + + delete[] funcstart_candidate_stack; + + std::vector result; + result.resize(n_known_funcaddrs); + memcpy(&result[0], funcstart_candstack2, + sizeof(uint32_t) * n_known_funcaddrs); + delete[] funcstart_candstack2; + return result; +} bool XexModule::FindSaveRest() { // Special stack save/restore functions. // http://research.microsoft.com/en-us/um/redmond/projects/invisible/src/crt/md/ppc/xxx.s.htm @@ -1552,6 +1714,8 @@ bool XexModule::FindSaveRest() { auto page_size = base_address_ <= 0x90000000 ? 64 * 1024 : 4 * 1024; auto sec_header = xex_security_info(); + std::vector resolve_on_exit{}; + resolve_on_exit.reserve(256); for (uint32_t i = 0, page = 0; i < sec_header->page_descriptor_count; i++) { // Byteswap the bitfield manually. xex2_page_descriptor desc; @@ -1586,13 +1750,20 @@ bool XexModule::FindSaveRest() { // Add function stubs. char name[32]; + + auto AddXexFunction = [this, &resolve_on_exit](uint32_t address, + Function** function) { + DeclareFunction(address, function); + resolve_on_exit.push_back(address); + }; if (gplr_start) { uint32_t address = gplr_start; for (int n = 14; n <= 31; n++) { auto format_result = fmt::format_to_n(name, xe::countof(name), "__savegprlr_{}", n); Function* function; - DeclareFunction(address, &function); + + AddXexFunction(address, &function); function->set_end_address(address + (31 - n) * 4 + 2 * 4); function->set_name(std::string_view(name, format_result.size)); // TODO(benvanik): set type fn->type = FunctionSymbol::User; @@ -1608,7 +1779,7 @@ bool XexModule::FindSaveRest() { auto format_result = fmt::format_to_n(name, xe::countof(name), "__restgprlr_{}", n); Function* function; - DeclareFunction(address, &function); + AddXexFunction(address, &function); function->set_end_address(address + (31 - n) * 4 + 3 * 4); function->set_name(std::string_view(name, format_result.size)); // TODO(benvanik): set type fn->type = FunctionSymbol::User; @@ -1625,7 +1796,7 @@ bool XexModule::FindSaveRest() { auto format_result = fmt::format_to_n(name, xe::countof(name), "__savefpr_{}", n); Function* function; - DeclareFunction(address, &function); + AddXexFunction(address, &function); function->set_end_address(address + (31 - n) * 4 + 1 * 4); function->set_name(std::string_view(name, format_result.size)); // TODO(benvanik): set type fn->type = FunctionSymbol::User; @@ -1641,7 +1812,7 @@ bool XexModule::FindSaveRest() { auto format_result = fmt::format_to_n(name, xe::countof(name), "__restfpr_{}", n); Function* function; - DeclareFunction(address, &function); + AddXexFunction(address, &function); function->set_end_address(address + (31 - n) * 4 + 1 * 4); function->set_name(std::string_view(name, format_result.size)); // TODO(benvanik): set type fn->type = FunctionSymbol::User; @@ -1663,7 +1834,7 @@ bool XexModule::FindSaveRest() { auto format_result = fmt::format_to_n(name, xe::countof(name), "__savevmx_{}", n); Function* function; - DeclareFunction(address, &function); + AddXexFunction(address, &function); function->set_name(std::string_view(name, format_result.size)); // TODO(benvanik): set type fn->type = FunctionSymbol::User; // TODO(benvanik): set flags fn->flags |= FunctionSymbol::kFlagSaveVmx; @@ -1677,7 +1848,7 @@ bool XexModule::FindSaveRest() { auto format_result = fmt::format_to_n(name, xe::countof(name), "__savevmx_{}", n); Function* function; - DeclareFunction(address, &function); + AddXexFunction(address, &function); function->set_name(std::string_view(name, format_result.size)); // TODO(benvanik): set type fn->type = FunctionSymbol::User; // TODO(benvanik): set flags fn->flags |= FunctionSymbol::kFlagSaveVmx; @@ -1691,7 +1862,7 @@ bool XexModule::FindSaveRest() { auto format_result = fmt::format_to_n(name, xe::countof(name), "__restvmx_{}", n); Function* function; - DeclareFunction(address, &function); + AddXexFunction(address, &function); function->set_name(std::string_view(name, format_result.size)); // TODO(benvanik): set type fn->type = FunctionSymbol::User; // TODO(benvanik): set flags fn->flags |= FunctionSymbol::kFlagRestVmx; @@ -1705,7 +1876,7 @@ bool XexModule::FindSaveRest() { auto format_result = fmt::format_to_n(name, xe::countof(name), "__restvmx_{}", n); Function* function; - DeclareFunction(address, &function); + AddXexFunction(address, &function); function->set_name(std::string_view(name, format_result.size)); // TODO(benvanik): set type fn->type = FunctionSymbol::User; // TODO(benvanik): set flags fn->flags |= FunctionSymbol::kFlagRestVmx; @@ -1716,6 +1887,13 @@ bool XexModule::FindSaveRest() { } } + for (auto&& to_ensure_precompiled : resolve_on_exit) { + // we want to make sure an address for these functions is available before + // any other functions are compiled for code generation purposes but we do + // it outside of our loops, because we also want to make sure we've marked + // up the symbol with info about it being save/rest and whatnot + processor_->ResolveFunction(to_ensure_precompiled); + } return true; } diff --git a/src/xenia/cpu/xex_module.h b/src/xenia/cpu/xex_module.h index 06045ff92..66691efff 100644 --- a/src/xenia/cpu/xex_module.h +++ b/src/xenia/cpu/xex_module.h @@ -34,7 +34,8 @@ struct InfoCacheFlags { uint32_t was_resolved : 1; // has this address ever been called/requested // via resolvefunction? uint32_t accessed_mmio : 1; - uint32_t reserved : 30; + uint32_t is_syscall_func : 1; + uint32_t reserved : 29; }; struct XexInfoCache { struct InfoCacheFlagsHeader { @@ -209,7 +210,8 @@ class XexModule : public xe::cpu::Module { InfoCacheFlags* GetInstructionAddressFlags(uint32_t guest_addr); void PrecompileKnownFunctions(); - + void PrecompileDiscoveredFunctions(); + std::vector PreanalyzeCode(); protected: std::unique_ptr CreateFunction(uint32_t address) override; diff --git a/src/xenia/kernel/xboxkrnl/xboxkrnl_threading.cc b/src/xenia/kernel/xboxkrnl/xboxkrnl_threading.cc index 8d0283744..34cc2a685 100644 --- a/src/xenia/kernel/xboxkrnl/xboxkrnl_threading.cc +++ b/src/xenia/kernel/xboxkrnl/xboxkrnl_threading.cc @@ -9,7 +9,6 @@ #include #include - #include "xenia/base/atomic.h" #include "xenia/base/clock.h" #include "xenia/base/logging.h" @@ -964,7 +963,7 @@ uint32_t xeKeKfAcquireSpinLock(uint32_t* lock, uint64_t r13 = 1) { PrefetchForCAS(lock); assert_true(*lock != static_cast(r13)); // Lock. - while (!xe::atomic_cas(0, static_cast(r13), lock)) { + while (!xe::atomic_cas(0, xe::byte_swap(static_cast(r13)), lock)) { // Spin! // TODO(benvanik): error on deadlock? xe::threading::MaybeYield(); @@ -978,7 +977,7 @@ uint32_t xeKeKfAcquireSpinLock(uint32_t* lock, uint64_t r13 = 1) { } dword_result_t KfAcquireSpinLock_entry(lpdword_t lock_ptr, - ppc_context_t& ppc_context) { + const ppc_context_t& ppc_context) { auto lock = reinterpret_cast(lock_ptr.host_address()); return xeKeKfAcquireSpinLock(lock, ppc_context->r[13]); } @@ -997,7 +996,7 @@ void xeKeKfReleaseSpinLock(uint32_t* lock, dword_t old_irql) { } void KfReleaseSpinLock_entry(lpdword_t lock_ptr, dword_t old_irql, - ppc_context_t& ppc_ctx) { + const ppc_context_t& ppc_ctx) { auto lock = reinterpret_cast(lock_ptr.host_address()); assert_true(*lock_ptr == static_cast(ppc_ctx->r[13])); @@ -1014,14 +1013,14 @@ DECLARE_XBOXKRNL_EXPORT2(KfReleaseSpinLock, kThreading, kImplemented, kHighFrequency); // todo: this is not accurate void KeAcquireSpinLockAtRaisedIrql_entry(lpdword_t lock_ptr, - ppc_context_t& ppc_ctx) { + const ppc_context_t& ppc_ctx) { // Lock. auto lock = reinterpret_cast(lock_ptr.host_address()); // must not be our own thread assert_true(*lock_ptr != static_cast(ppc_ctx->r[13])); PrefetchForCAS(lock); - while (!xe::atomic_cas(0, static_cast(ppc_ctx->r[13]), lock)) { + while (!xe::atomic_cas(0, xe::byte_swap(static_cast(ppc_ctx->r[13])), lock)) { #if XE_ARCH_AMD64 == 1 // todo: this is just a nop if they don't have SMT, which is not great // either... @@ -1036,12 +1035,12 @@ DECLARE_XBOXKRNL_EXPORT3(KeAcquireSpinLockAtRaisedIrql, kThreading, kImplemented, kBlocking, kHighFrequency); dword_result_t KeTryToAcquireSpinLockAtRaisedIrql_entry( - lpdword_t lock_ptr, ppc_context_t& ppc_ctx) { + lpdword_t lock_ptr, const ppc_context_t& ppc_ctx) { // Lock. auto lock = reinterpret_cast(lock_ptr.host_address()); assert_true(*lock_ptr != static_cast(ppc_ctx->r[13])); PrefetchForCAS(lock); - if (!xe::atomic_cas(0, static_cast(ppc_ctx->r[13]), lock)) { + if (!xe::atomic_cas(0, xe::byte_swap(static_cast(ppc_ctx->r[13])), lock)) { return 0; } return 1; @@ -1050,7 +1049,7 @@ DECLARE_XBOXKRNL_EXPORT4(KeTryToAcquireSpinLockAtRaisedIrql, kThreading, kImplemented, kBlocking, kHighFrequency, kSketchy); void KeReleaseSpinLockFromRaisedIrql_entry(lpdword_t lock_ptr, - ppc_context_t& ppc_ctx) { + const ppc_context_t& ppc_ctx) { // Unlock. assert_true(*lock_ptr == static_cast(ppc_ctx->r[13])); auto lock = reinterpret_cast(lock_ptr.host_address()); @@ -1283,7 +1282,8 @@ void ExInitializeReadWriteLock_entry(pointer_t lock_ptr) { } DECLARE_XBOXKRNL_EXPORT1(ExInitializeReadWriteLock, kThreading, kImplemented); -void ExAcquireReadWriteLockExclusive_entry(pointer_t lock_ptr, ppc_context_t& ppc_context) { +void ExAcquireReadWriteLockExclusive_entry(pointer_t lock_ptr, + const ppc_context_t& ppc_context) { auto old_irql = xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]); int32_t lock_count = ++lock_ptr->lock_count; @@ -1301,7 +1301,7 @@ DECLARE_XBOXKRNL_EXPORT2(ExAcquireReadWriteLockExclusive, kThreading, kImplemented, kBlocking); dword_result_t ExTryToAcquireReadWriteLockExclusive_entry( - pointer_t lock_ptr, ppc_context_t& ppc_context) { + pointer_t lock_ptr, const ppc_context_t& ppc_context) { auto old_irql = xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]); @@ -1320,7 +1320,7 @@ DECLARE_XBOXKRNL_EXPORT1(ExTryToAcquireReadWriteLockExclusive, kThreading, kImplemented); void ExAcquireReadWriteLockShared_entry(pointer_t lock_ptr, - ppc_context_t& ppc_context) { + const ppc_context_t& ppc_context) { auto old_irql = xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]); int32_t lock_count = ++lock_ptr->lock_count; @@ -1340,7 +1340,7 @@ DECLARE_XBOXKRNL_EXPORT2(ExAcquireReadWriteLockShared, kThreading, kImplemented, kBlocking); dword_result_t ExTryToAcquireReadWriteLockShared_entry( - pointer_t lock_ptr, ppc_context_t& ppc_context) { + pointer_t lock_ptr, const ppc_context_t& ppc_context) { auto old_irql = xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]); @@ -1361,7 +1361,7 @@ DECLARE_XBOXKRNL_EXPORT1(ExTryToAcquireReadWriteLockShared, kThreading, kImplemented); void ExReleaseReadWriteLock_entry(pointer_t lock_ptr, - ppc_context_t& ppc_context) { + const ppc_context_t& ppc_context) { auto old_irql = xeKeKfAcquireSpinLock(&lock_ptr->spin_lock, ppc_context->r[13]); diff --git a/src/xenia/vfs/devices/null_device.cc b/src/xenia/vfs/devices/null_device.cc index ef34fd833..79490376b 100644 --- a/src/xenia/vfs/devices/null_device.cc +++ b/src/xenia/vfs/devices/null_device.cc @@ -21,7 +21,7 @@ namespace vfs { NullDevice::NullDevice(const std::string& mount_path, const std::initializer_list& null_paths) - : Device(mount_path), null_paths_(null_paths), name_("NullDevice") {} + : Device(mount_path), name_("NullDevice"), null_paths_(null_paths) {} NullDevice::~NullDevice() = default; diff --git a/third_party/FFmpeg b/third_party/FFmpeg index a437fe6d8..fa4f77cf4 160000 --- a/third_party/FFmpeg +++ b/third_party/FFmpeg @@ -1 +1 @@ -Subproject commit a437fe6d8efef17c8ad33d39f5815032e7adf5d7 +Subproject commit fa4f77cf444cd30894a222148efc5a371b3f76a6