SPU/vm: Improve vm::range_lock a bit

Use some prefetching
Use optimistic locking
This commit is contained in:
Nekotekina 2020-11-08 17:21:19 +03:00
parent 3507cd0a37
commit 8bc9868c1f
3 changed files with 29 additions and 21 deletions

View file

@ -167,6 +167,8 @@ namespace vm
for (u64 i = 0;; i++)
{
range_lock->store(begin | (u64{size} << 32));
const u64 lock_val = g_range_lock.load();
const u64 is_share = g_shmem[begin >> 16].load();
@ -188,18 +190,18 @@ namespace vm
if (addr + size <= lock_addr || addr >= lock_addr + lock_size) [[likely]]
{
range_lock->store(begin | (u64{size} << 32));
const u64 new_lock_val = g_range_lock.load();
if (!new_lock_val || new_lock_val == lock_val) [[likely]]
{
break;
}
range_lock->release(0);
}
// Wait a bit before accessing g_mutex
range_lock->store(0);
busy_wait(200);
std::shared_lock lock(g_mutex, std::try_to_lock);
if (!lock && i < 15)