diff --git a/src/xenia/base/delay_scheduler.cc b/src/xenia/base/delay_scheduler.cc new file mode 100644 index 000000000..982d1d05a --- /dev/null +++ b/src/xenia/base/delay_scheduler.cc @@ -0,0 +1,111 @@ +/** + ****************************************************************************** + * Xenia : Xbox 360 Emulator Research Project * + ****************************************************************************** + * Copyright 2022 Ben Vanik. All rights reserved. * + * Released under the BSD license - see LICENSE in the root for more details. * + ****************************************************************************** + */ + +#include "xenia/base/delay_scheduler.h" +#include "xenia/base/assert.h" +#include "xenia/base/logging.h" + +namespace xe::internal { + +DelaySchedulerBase::~DelaySchedulerBase() { + if (call_on_destruct_) { + for (auto& slot : deletion_queue_) { + // No thread safety in destructors anyway + if (slot.state == State::kWaiting) { + callback_(slot.info); + } + } + } +} + +void DelaySchedulerBase::Collect() { + static_assert(atomic_state_t::is_always_lock_free, + "Locks are unsafe to use with thread suspension"); + + for (auto& slot : deletion_queue_) { + TryCollectOne_impl(slot); + } +} + +bool DelaySchedulerBase::TryCollectOne_impl(deletion_record_t& slot) { + auto now = clock::now(); + + auto state = State::kWaiting; + // Try to lock the waiting slot for reading the other fields + if (slot.state.compare_exchange_strong(state, State::kDeleting, + std::memory_order_acq_rel)) { + if (now > slot.due) { + // Time has passed, call back now + callback_(slot.info); + slot.info = nullptr; + slot.state.store(State::kFree, std::memory_order_release); + return true; + } else { + // Oops it's not yet due + slot.state.store(State::kWaiting, std::memory_order_release); + } + } + return false; +} + +void DelaySchedulerBase::ScheduleAt_impl( + void* info, const clock::time_point& timeout_time) { + bool first_pass = true; + + if (info == nullptr) { + return; + } + + for (;;) { + if (TryScheduleAt_impl(info, timeout_time)) { + return; + } + + if (first_pass) { + first_pass = false; + XELOGE( + "`DelayScheduler::ScheduleAt(...)` stalled: list is full! Find out " + "why or increase `MAX_QUEUE`."); + } + } +} + +bool DelaySchedulerBase::TryScheduleAt_impl( + void* info, const clock::time_point& timeout_time) { + if (info == nullptr) { + return false; + } + + for (auto& slot : deletion_queue_) { + // Clean up due item + TryCollectOne_impl(slot); + + if (TryScheduleAtOne_impl(slot, info, timeout_time)) { + return true; + } + } + return false; +} + +bool DelaySchedulerBase::TryScheduleAtOne_impl(deletion_record_t& slot, + void* info, + clock::time_point due) { + auto state = State::kFree; + if (slot.state.compare_exchange_strong(state, State::kInitializing, + std::memory_order_acq_rel)) { + slot.info = info; + slot.due = due; + slot.state.store(State::kWaiting, std::memory_order_release); + return true; + } + + return false; +} + +} // namespace xe::internal \ No newline at end of file diff --git a/src/xenia/base/delay_scheduler.h b/src/xenia/base/delay_scheduler.h new file mode 100644 index 000000000..33cf196c9 --- /dev/null +++ b/src/xenia/base/delay_scheduler.h @@ -0,0 +1,142 @@ +/** + ****************************************************************************** + * Xenia : Xbox 360 Emulator Research Project * + ****************************************************************************** + * Copyright 2022 Ben Vanik. All rights reserved. * + * Released under the BSD license - see LICENSE in the root for more details. * + ****************************************************************************** + */ + +#ifndef XENIA_BASE_DELAY_SCHEDULER_H_ +#define XENIA_BASE_DELAY_SCHEDULER_H_ + +#include +#include +#include +#include +#include + +namespace xe { + +namespace internal { +// Put the implementation in a non templated base class to reduce compile time +// and code duplication +class DelaySchedulerBase { + protected: // Types + enum class State : uint_least8_t { + kFree = 0, // Slot is unsued + kInitializing, // Slot is reserved and currently written to by a thread + kWaiting, // The slot contains a pointer scheduled for deletion + kDeleting // A thread is currently deleting the pointer + }; + using atomic_state_t = std::atomic; + using clock = std::chrono::steady_clock; + struct deletion_record_t { + atomic_state_t state; + void* info; + clock::time_point due; + }; + using deletion_queue_t = std::vector; + using callback_t = std::function; + + public: + /// Check all scheduled items in the queue and free any that are due using + /// `callback(info);`. Call this to reliably collect all due wait items in the + /// queue. + void Collect(); + + size_t size() { return deletion_queue_.size(); } + + protected: // Implementation + DelaySchedulerBase(size_t queue_size, callback_t callback, + bool call_on_destruct) + : deletion_queue_(queue_size), + callback_(callback), + call_on_destruct_(call_on_destruct) {} + virtual ~DelaySchedulerBase(); + + void ScheduleAt_impl(void* info, const clock::time_point& timeout_time); + [[nodiscard]] bool TryScheduleAt_impl(void* info, + const clock::time_point& timeout_time); + + /// Checks if the slot is due and if so, call back for it. + bool TryCollectOne_impl(deletion_record_t& slot); + + [[nodiscard]] bool TryScheduleAtOne_impl(deletion_record_t& slot, void* info, + clock::time_point due); + + private: + deletion_queue_t deletion_queue_; + callback_t callback_; + bool call_on_destruct_; +}; +} // namespace internal + +/// A lazy scheduler/timer. +/// Will wait at least the specified duration before invoking the callbacks but +/// might wait until it is destructed. Lockless thread-safe, will spinlock +/// though if the wait queue is full (except for `Try`... methods). Might use +/// any thread that calls any member to invoke callbacks of due wait items. +template +class DelayScheduler : internal::DelaySchedulerBase { + public: + DelayScheduler(size_t queue_size, std::function callback, + bool call_on_destruct) + : DelaySchedulerBase( + queue_size, + [callback](void* info) { callback(reinterpret_cast(info)); }, + call_on_destruct){}; + DelayScheduler(const DelayScheduler&) = delete; + DelayScheduler& operator=(const DelayScheduler&) = delete; + virtual ~DelayScheduler() {} + + // From base class: + // void Collect(); + + /// Schedule an object for deletion at some point after `timeout_time` using + /// `callback(info);`. Will collect any wait items it encounters which can be + /// 0 or all, use `Collect()` to collect all due wait items. Blocks until a + /// free wait slot is found. + template + void ScheduleAt( + INFO* info, + const std::chrono::time_point& timeout_time) { + ScheduleAt(info, + std::chrono::time_point_cast(timeout_time)); + } + /// Like `ScheduleAt` but does not block on full list. + template + [[nodiscard]] bool TryScheduleAt( + INFO* info, + const std::chrono::time_point& timeout_time) { + return TryScheduleAt( + info, std::chrono::time_point_cast(timeout_time)); + } + + void ScheduleAt(INFO* info, const clock::time_point& timeout_time) { + ScheduleAt_impl(info, timeout_time); + } + [[nodiscard]] bool TryScheduleAt(INFO* info, + const clock::time_point& timeout_time) { + return TryScheduleAt_impl(info, timeout_time); + } + + /// Schedule a callback at some point after `rel_time` has passed. + template + void ScheduleAfter(INFO* info, + const std::chrono::duration& rel_time) { + ScheduleAt(info, + clock::now() + std::chrono::ceil(rel_time)); + } + /// Like `ScheduleAfter` but does not block. + template + [[nodiscard]] bool TryScheduleAfter( + INFO* info, const std::chrono::duration& rel_time) { + return TryScheduleAt( + info, clock::now() + std::chrono::ceil(rel_time)); + } +}; + +} // namespace xe + +#endif