2018-09-25 22:34:45 +02:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
|
|
#include "vm.h"
|
|
|
|
|
|
|
|
|
|
class cpu_thread;
|
|
|
|
|
class shared_mutex;
|
|
|
|
|
|
|
|
|
|
namespace vm
|
|
|
|
|
{
|
|
|
|
|
extern shared_mutex g_mutex;
|
|
|
|
|
|
|
|
|
|
extern thread_local atomic_t<cpu_thread*>* g_tls_locked;
|
|
|
|
|
|
2020-10-31 11:33:27 +01:00
|
|
|
enum range_lock_flags : u64
|
|
|
|
|
{
|
|
|
|
|
/* flags (3 bits) */
|
|
|
|
|
|
|
|
|
|
range_readable = 1ull << 32,
|
|
|
|
|
range_writable = 2ull << 32,
|
|
|
|
|
range_executable = 4ull << 32,
|
|
|
|
|
range_all_mask = 7ull << 32,
|
|
|
|
|
|
|
|
|
|
/* flag combinations with special meaning */
|
|
|
|
|
|
|
|
|
|
range_normal = 3ull << 32, // R+W
|
|
|
|
|
range_updated = 2ull << 32, // R+W as well but do not
|
|
|
|
|
range_allocated = 4ull << 32, // No safe access
|
|
|
|
|
range_deallocated = 0, // No safe access
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
extern atomic_t<u64> g_range_lock;
|
2020-10-26 02:05:17 +01:00
|
|
|
|
2020-10-27 20:25:32 +01:00
|
|
|
extern atomic_t<u8> g_shareable[];
|
|
|
|
|
|
2018-09-25 22:34:45 +02:00
|
|
|
// Register reader
|
|
|
|
|
void passive_lock(cpu_thread& cpu);
|
2020-10-26 02:05:17 +01:00
|
|
|
|
|
|
|
|
// Register range lock for further use
|
|
|
|
|
atomic_t<u64, 64>* alloc_range_lock();
|
|
|
|
|
|
|
|
|
|
void range_lock_internal(atomic_t<u64, 64>* range_lock, u32 begin, u32 size);
|
|
|
|
|
|
|
|
|
|
// Lock memory range
|
2020-10-30 05:58:16 +01:00
|
|
|
FORCE_INLINE void range_lock(atomic_t<u64>& res, atomic_t<u64, 64>* range_lock, u32 begin, u32 size)
|
2020-10-26 02:05:17 +01:00
|
|
|
{
|
2020-10-31 11:33:27 +01:00
|
|
|
const u64 lock_val = g_range_lock.load();
|
2020-10-26 02:05:17 +01:00
|
|
|
const u64 lock_addr = static_cast<u32>(lock_val); // -> u64
|
2020-10-31 11:33:27 +01:00
|
|
|
const u32 lock_size = static_cast<u32>(lock_val >> 35);
|
2020-10-26 02:05:17 +01:00
|
|
|
|
2020-10-27 20:25:32 +01:00
|
|
|
u64 addr = begin;
|
|
|
|
|
|
|
|
|
|
if (g_shareable[begin >> 16])
|
|
|
|
|
{
|
|
|
|
|
addr = addr & 0xffff;
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-30 05:58:16 +01:00
|
|
|
if ((addr + size <= lock_addr || addr >= lock_addr + lock_size) && !(res.load() & 127)) [[likely]]
|
2020-10-26 02:05:17 +01:00
|
|
|
{
|
|
|
|
|
// Optimistic locking
|
|
|
|
|
range_lock->release(begin | (u64{size} << 32));
|
|
|
|
|
|
2020-10-31 11:33:27 +01:00
|
|
|
const u64 new_lock_val = g_range_lock.load();
|
2020-10-26 02:05:17 +01:00
|
|
|
|
2020-10-30 05:58:16 +01:00
|
|
|
if ((!new_lock_val || new_lock_val == lock_val) && !(res.load() & 127)) [[likely]]
|
2020-10-26 02:05:17 +01:00
|
|
|
{
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
range_lock->release(0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Fallback to slow path
|
|
|
|
|
range_lock_internal(range_lock, begin, size);
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-30 05:58:16 +01:00
|
|
|
// Wait for all range locks to release in specified range
|
|
|
|
|
void clear_range_locks(u32 addr, u32 size);
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
// Release it
|
|
|
|
|
void free_range_lock(atomic_t<u64, 64>*) noexcept;
|
2018-09-25 22:34:45 +02:00
|
|
|
|
|
|
|
|
// Unregister reader
|
|
|
|
|
void passive_unlock(cpu_thread& cpu);
|
|
|
|
|
|
|
|
|
|
// Unregister reader (foreign thread)
|
|
|
|
|
void cleanup_unlock(cpu_thread& cpu) noexcept;
|
|
|
|
|
|
|
|
|
|
// Optimization (set cpu_flag::memory)
|
|
|
|
|
void temporary_unlock(cpu_thread& cpu) noexcept;
|
|
|
|
|
void temporary_unlock() noexcept;
|
|
|
|
|
|
|
|
|
|
class reader_lock final
|
|
|
|
|
{
|
|
|
|
|
bool m_upgraded = false;
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
reader_lock(const reader_lock&) = delete;
|
|
|
|
|
reader_lock& operator=(const reader_lock&) = delete;
|
|
|
|
|
reader_lock();
|
|
|
|
|
~reader_lock();
|
|
|
|
|
|
|
|
|
|
void upgrade();
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct writer_lock final
|
|
|
|
|
{
|
|
|
|
|
writer_lock(const writer_lock&) = delete;
|
|
|
|
|
writer_lock& operator=(const writer_lock&) = delete;
|
|
|
|
|
writer_lock(u32 addr = 0);
|
|
|
|
|
~writer_lock();
|
|
|
|
|
};
|
|
|
|
|
} // namespace vm
|