#pragma once #include #include #include #include #include "reverse_ptr.hpp" namespace rsx { namespace aligned_allocator { template requires (Align != 0) && ((Align & (Align - 1)) == 0) size_t align_up(size_t size) { return (size + (Align - 1)) & ~(Align - 1); } template requires (Align != 0) && ((Align & (Align - 1)) == 0) void* malloc(size_t size) { #if defined(_WIN32) return _aligned_malloc(size, Align); #elif defined(__APPLE__) constexpr size_t NativeAlign = std::max(Align, sizeof(void*)); return std::aligned_alloc(NativeAlign, align_up(size)); #else return std::aligned_alloc(Align, align_up(size)); #endif } template requires (Align != 0) && ((Align & (Align - 1)) == 0) void* realloc(void* prev_ptr, [[maybe_unused]] size_t prev_size, size_t new_size) { if (align_up(prev_size) >= new_size) { return prev_ptr; } ensure(reinterpret_cast(prev_ptr) % Align == 0, "Pointer not aligned to Align"); #if defined(_WIN32) return _aligned_realloc(prev_ptr, new_size, Align); #else #if defined(__APPLE__) constexpr size_t NativeAlign = std::max(Align, sizeof(void*)); void* ret = std::aligned_alloc(NativeAlign, align_up(new_size)); #else void* ret = std::aligned_alloc(Align, align_up(new_size)); #endif std::memcpy(ret, prev_ptr, std::min(prev_size, new_size)); std::free(prev_ptr); return ret; #endif } static inline void free(void* ptr) { #ifdef _WIN32 _aligned_free(ptr); #else std::free(ptr); #endif } } template concept span_like = requires(C& c) { { c.data() } -> std::convertible_to; { c.size() } -> std::integral; }; template concept is_trivially_comparable_v = requires (T t1, U t2) { { t1 == t2 } -> std::same_as; }; template requires std::is_trivially_destructible_v && std::is_trivially_copyable_v struct simple_array { public: using iterator = Ty*; using const_iterator = const Ty*; using reverse_iterator = reverse_pointer; using const_reverse_iterator = reverse_pointer; using value_type = Ty; private: static constexpr u32 _local_capacity = std::max(64u / sizeof(Ty), 1u); alignas(Align) char _local_storage[_local_capacity * sizeof(Ty)]; u32 _capacity = _local_capacity; Ty* _data = _local_capacity ? reinterpret_cast(_local_storage) : nullptr; u32 _size = 0; inline u32 offset(const_iterator pos) { return (_data) ? u32(pos - _data) : 0u; } bool is_local_storage() const { return _data == reinterpret_cast(_local_storage); } public: simple_array() = default; simple_array(u32 initial_size) { reserve(initial_size); _size = initial_size; } simple_array(u32 initial_size, const Ty val) { reserve(initial_size); _size = initial_size; for (u32 n = 0; n < initial_size; ++n) { _data[n] = val; } } simple_array(const std::initializer_list& args) { reserve(::size32(args)); for (const auto& arg : args) { push_back(arg); } } simple_array(const simple_array& other) { resize(other._size); if (_size) { std::memcpy(_data, other._data, size_bytes()); } } simple_array(simple_array&& other) noexcept { swap(other); } template requires span_like simple_array(const Container& container) { resize(container.size()); if (_size) { std::memcpy(_data, container.data(), size_bytes()); } } simple_array& operator=(const simple_array& other) { if (&other != this) { resize(other._size); if (_size) { std::memcpy(_data, other._data, size_bytes()); } } return *this; } simple_array& operator=(simple_array&& other) noexcept { swap(other); return *this; } ~simple_array() { if (_data) { if (!is_local_storage()) { aligned_allocator::free(_data); } _data = nullptr; _size = _capacity = 0; } } void swap(simple_array& that) noexcept { if (!_size && !that._size) { // NOP. Surprisingly common return; } const auto _this_is_local = is_local_storage(); const auto _that_is_local = that.is_local_storage(); if (!_this_is_local && !_that_is_local) { std::swap(_capacity, that._capacity); std::swap(_size, that._size); std::swap(_data, that._data); return; } if (!_size) { *this = that; that.clear(); return; } if (!that._size) { that = *this; clear(); return; } if (_this_is_local != _that_is_local) { // Mismatched usage of the stack storage. rsx::simple_array tmp{ *this }; *this = that; that = tmp; return; } // Use memcpy to allow compiler optimizations Ty _stack_alloc[_local_capacity]; std::memcpy(_stack_alloc, that._data, that.size_bytes()); std::memcpy(that._data, _data, size_bytes()); std::memcpy(_data, _stack_alloc, that.size_bytes()); std::swap(_size, that._size); } void reserve(u32 size) { if (_capacity >= size) { return; } if (is_local_storage()) { // Switch to heap storage ensure(_data = static_cast(aligned_allocator::malloc(sizeof(Ty) * size))); std::memcpy(static_cast(_data), _local_storage, size_bytes()); } else { // Extend heap storage ensure(_data = static_cast(aligned_allocator::realloc(_data, size_bytes(), sizeof(Ty) * size))); // "realloc() failed!" } _capacity = size; } template requires UnsignedInt void resize(T size) { const auto new_size = static_cast(size); reserve(new_size); _size = new_size; } void push_back(const Ty& val) { if (_size >= _capacity) { reserve(_capacity + 16); } _data[_size++] = val; } void push_back(Ty&& val) { if (_size >= _capacity) { reserve(_capacity + 16); } _data[_size++] = val; } template void emplace_back(Args&&... args) { if (_size >= _capacity) { reserve(_capacity + 16); } std::construct_at(&_data[_size++], std::forward(args)...); } Ty pop_back() { return _data[--_size]; } iterator insert(iterator pos, const Ty& val) { ensure(pos >= _data); const auto _loc = offset(pos); if (_size >= _capacity) { reserve(_capacity + 16); pos = _data + _loc; } if (_loc >= _size) { _data[_size++] = val; return pos; } AUDIT(_loc < _size); const auto remaining = (_size - _loc); memmove(pos + 1, pos, remaining * sizeof(Ty)); *pos = val; _size++; return pos; } iterator insert(iterator pos, Ty&& val) { ensure(pos >= _data); const auto _loc = offset(pos); if (_size >= _capacity) { reserve(_capacity + 16); pos = _data + _loc; } if (_loc >= _size) { _data[_size++] = val; return pos; } AUDIT(_loc < _size); const u32 remaining = (_size - _loc); memmove(pos + 1, pos, remaining * sizeof(Ty)); *pos = val; _size++; return pos; } void operator += (const rsx::simple_array& that) { const auto old_size = _size; resize(_size + that._size); std::memcpy(data() + old_size, that.data(), that.size_bytes()); } void clear() { _size = 0; } bool empty() const { return _size == 0; } u32 size() const { return _size; } u64 size_bytes() const { return _size * sizeof(Ty); } u32 size_bytes32() const { return _size * sizeof(Ty); } u32 capacity() const { return _capacity; } Ty& operator[] (u32 index) { return _data[index]; } const Ty& operator[] (u32 index) const { return _data[index]; } Ty* data() { return _data; } const Ty* data() const { return _data; } Ty& back() { return _data[_size - 1]; } const Ty& back() const { return _data[_size - 1]; } Ty& front() { return _data[0]; } const Ty& front() const { return _data[0]; } iterator begin() { return _data; } iterator end() { return _data ? _data + _size : nullptr; } const_iterator begin() const { return _data; } const_iterator end() const { return _data ? _data + _size : nullptr; } const_iterator cbegin() const { return _data; } const_iterator cend() const { return _data ? _data + _size : nullptr; } reverse_iterator rbegin() { return reverse_iterator(end() - 1); } reverse_iterator rend() { return reverse_iterator(begin() - 1); } const_reverse_iterator rbegin() const { return crbegin(); } const_reverse_iterator rend() const { return crend(); } const_reverse_iterator crbegin() const { return const_reverse_iterator(cend() - 1); } const_reverse_iterator crend() const { return const_reverse_iterator(cbegin() - 1); } bool any(std::predicate auto predicate) const { for (auto it = begin(); it != end(); ++it) { if (std::invoke(predicate, *it)) { return true; } } return false; } /** * Note that find and find_if return pointers to objects and not iterators for simplified usage. * It is functionally equivalent to retrieve a nullptr meaning empty object stored and nullptr meaning not found for all practical uses of this container. */ template requires is_trivially_comparable_v Ty* find(const T& value) { for (auto it = begin(); it != end(); ++it) { if (*it == value) { return &(*it); } } return nullptr; } // Remove when we switch to C++23 template requires is_trivially_comparable_v const Ty* find(const T& value) const { return const_cast*>(this)->find(value); } Ty* find_if(std::predicate auto predicate) { for (auto it = begin(); it != end(); ++it) { if (std::invoke(predicate, *it)) { return &(*it); } } return nullptr; } // Remove with C++23 const Ty* find_if(std::predicate auto predicate) const { return const_cast*>(this)->find_if(predicate); } bool erase_if(std::predicate auto predicate) { if (!_size) { return false; } bool ret = false; for (auto ptr = _data, last = _data + _size - 1; ptr <= last;) { if (predicate(*ptr)) { ret = true; if (ptr == last) { // Popping the last entry from list. Just set the new size and exit _size--; break; } // Move item to the end of the list and shrink by 1 std::memcpy(ptr, last, sizeof(Ty)); _size--; last--; // Retest the same ptr which now has the previous tail item continue; } ptr++; } return ret; } simple_array& sort(std::predicate auto predicate) { if (_size < 2) { return *this; } std::sort(begin(), end(), predicate); return *this; } template > requires (std::is_invocable_v && std::is_trivially_destructible_v) simple_array map(F&& xform) const { simple_array result; result.reserve(size()); for (auto it = begin(); it != end(); ++it) { result.push_back(xform(*it)); } return result; } template > requires (std::is_invocable_v && !std::is_trivially_destructible_v) std::vector map(F&& xform) const { std::vector result; result.reserve(size()); for (auto it = begin(); it != end(); ++it) { result.push_back(xform(*it)); } return result; } template requires std::is_invocable_r_v U reduce(U initial_value, F&& reducer) const { U accumulate = initial_value; for (auto it = begin(); it != end(); ++it) { accumulate = reducer(accumulate, *it); } return accumulate; } }; }