Rewrite cpu_thread::suspend_all

Now it's a function of higher order.
Make only one thread do the hard work of thread pausing.
This commit is contained in:
Nekotekina 2020-10-09 20:33:12 +03:00
parent 6d83c9cc0e
commit 050c3e1d6b
10 changed files with 299 additions and 415 deletions

View file

@ -201,7 +201,7 @@ asmjit::JitRuntime& asmjit::get_global_runtime()
return g_rt;
}
void asmjit::build_transaction_enter(asmjit::X86Assembler& c, asmjit::Label fallback, const asmjit::X86Gp& ctr, uint less_than)
asmjit::Label asmjit::build_transaction_enter(asmjit::X86Assembler& c, asmjit::Label fallback, const asmjit::X86Gp& ctr, uint less_than)
{
Label fall = c.newLabel();
Label begin = c.newLabel();
@ -234,7 +234,10 @@ void asmjit::build_transaction_enter(asmjit::X86Assembler& c, asmjit::Label fall
c.jae(fallback);
c.align(kAlignCode, 16);
c.bind(begin);
c.xbegin(fall);
return fall;
// xbegin should be issued manually, allows to add more check before entering transaction
//c.xbegin(fall);
}
void asmjit::build_transaction_abort(asmjit::X86Assembler& c, unsigned char code)

View file

@ -56,7 +56,7 @@ namespace asmjit
asmjit::JitRuntime& get_global_runtime();
// Emit xbegin and adjacent loop, return label at xbegin
void build_transaction_enter(X86Assembler& c, Label fallback, const X86Gp& ctr, uint less_than);
[[nodiscard]] asmjit::Label build_transaction_enter(X86Assembler& c, Label fallback, const X86Gp& ctr, uint less_than);
// Emit xabort
void build_transaction_abort(X86Assembler& c, unsigned char code);

View file

@ -39,84 +39,6 @@ void shared_mutex::imp_unlock_shared(u32 old)
}
}
void shared_mutex::imp_lock_low(u32 val)
{
verify("shared_mutex underflow" HERE), val < c_err;
for (int i = 0; i < 10; i++)
{
busy_wait();
if (try_lock_low())
{
return;
}
}
// Acquire writer lock and downgrade
const u32 old = m_value.fetch_add(c_one);
if (old == 0)
{
lock_downgrade();
return;
}
verify("shared_mutex overflow" HERE), (old % c_sig) + c_one < c_sig;
imp_wait();
lock_downgrade();
}
void shared_mutex::imp_unlock_low(u32 old)
{
verify("shared_mutex underflow" HERE), old - 1 < c_err;
// Check reader count, notify the writer if necessary
if ((old - 1) % c_vip == 0)
{
imp_signal();
}
}
void shared_mutex::imp_lock_vip(u32 val)
{
verify("shared_mutex underflow" HERE), val < c_err;
for (int i = 0; i < 10; i++)
{
busy_wait();
if (try_lock_vip())
{
return;
}
}
// Acquire writer lock and downgrade
const u32 old = m_value.fetch_add(c_one);
if (old == 0)
{
lock_downgrade_to_vip();
return;
}
verify("shared_mutex overflow" HERE), (old % c_sig) + c_one < c_sig;
imp_wait();
lock_downgrade_to_vip();
}
void shared_mutex::imp_unlock_vip(u32 old)
{
verify("shared_mutex underflow" HERE), old - 1 < c_err;
// Check reader count, notify the writer if necessary
if ((old - 1) % c_one / c_vip == 0)
{
imp_signal();
}
}
void shared_mutex::imp_wait()
{
while (true)
@ -241,18 +163,3 @@ void shared_mutex::imp_lock_unlock()
imp_wait();
unlock();
}
bool shared_mutex::downgrade_unique_vip_lock_to_low_or_unlock()
{
return m_value.atomic_op([](u32& value)
{
if (value % c_one / c_vip == 1)
{
value -= c_vip - 1;
return true;
}
value -= c_vip;
return false;
});
}

View file

@ -12,17 +12,12 @@ class shared_mutex final
c_one = 1u << 14, // Fixed-point 1.0 value (one writer, max_readers = c_one - 1)
c_sig = 1u << 30,
c_err = 1u << 31,
c_vip = 1u << 7,
};
atomic_t<u32> m_value{};
void imp_lock_shared(u32 val);
void imp_unlock_shared(u32 old);
void imp_lock_low(u32 val);
void imp_unlock_low(u32 old);
void imp_lock_vip(u32 val);
void imp_unlock_vip(u32 old);
void imp_wait();
void imp_signal();
void imp_lock(u32 val);
@ -88,64 +83,6 @@ public:
}
}
bool try_lock_low()
{
const u32 value = m_value.load();
// Conditional increment
return value < c_vip - 1 && m_value.compare_and_swap_test(value, value + 1);
}
void lock_low()
{
const u32 value = m_value.load();
if (value >= c_vip - 1 || !m_value.compare_and_swap_test(value, value + 1)) [[unlikely]]
{
imp_lock_low(value);
}
}
void unlock_low()
{
// Unconditional decrement (can result in broken state)
const u32 value = m_value.fetch_sub(1);
if (value >= c_one) [[unlikely]]
{
imp_unlock_low(value);
}
}
bool try_lock_vip()
{
const u32 value = m_value.load();
// Conditional increment
return (value < c_one - 1 || value & (c_one - c_vip)) && (value % c_vip) == 0 && m_value.compare_and_swap_test(value, value + c_vip);
}
void lock_vip()
{
const u32 value = m_value.load();
if ((value >= c_one - 1 && !(value & (c_one - c_vip))) || (value % c_vip) || !m_value.compare_and_swap_test(value, value + c_vip)) [[unlikely]]
{
imp_lock_vip(value);
}
}
void unlock_vip()
{
// Unconditional decrement (can result in broken state)
const u32 value = m_value.fetch_sub(c_vip);
if (value >= c_one) [[unlikely]]
{
imp_unlock_vip(value);
}
}
bool try_lock()
{
return m_value.compare_and_swap_test(0, c_one);
@ -214,12 +151,6 @@ public:
m_value -= c_one - 1;
}
void lock_downgrade_to_vip()
{
// Convert to vip lock (can result in broken state)
m_value -= c_one - c_vip;
}
// Optimized wait for lockability without locking, relaxed
void lock_unlock()
{
@ -240,12 +171,9 @@ public:
{
return m_value.load() < c_one - 1;
}
// Special purpose logic
bool downgrade_unique_vip_lock_to_low_or_unlock();
};
// Simplified shared (reader) lock implementation. Mutually incompatible with low_lock and vip_lock.
// Simplified shared (reader) lock implementation.
class reader_lock final
{
shared_mutex& m_mutex;
@ -283,47 +211,3 @@ public:
m_upgraded ? m_mutex.unlock() : m_mutex.unlock_shared();
}
};
// Special shared (reader) lock, mutually exclusive with vip locks. Mutually incompatible with normal shared (reader) lock.
class low_lock final
{
shared_mutex& m_mutex;
public:
low_lock(const low_lock&) = delete;
low_lock& operator=(const low_lock&) = delete;
explicit low_lock(shared_mutex& mutex)
: m_mutex(mutex)
{
m_mutex.lock_low();
}
~low_lock()
{
m_mutex.unlock_low();
}
};
// Special shared (reader) lock, mutually exclusive with low locks. Mutually incompatible with normal shared (reader) lock.
class vip_lock final
{
shared_mutex& m_mutex;
public:
vip_lock(const vip_lock&) = delete;
vip_lock& operator=(const vip_lock&) = delete;
explicit vip_lock(shared_mutex& mutex)
: m_mutex(mutex)
{
m_mutex.lock_vip();
}
~vip_lock()
{
m_mutex.unlock_vip();
}
};