Add usz alias for std::size_t

This commit is contained in:
Nekotekina 2020-12-18 10:39:54 +03:00
parent 360c4d1554
commit fb29933d3d
173 changed files with 718 additions and 717 deletions

View file

@ -286,7 +286,7 @@ namespace utils
}
// Synchronization helper (cache-friendly busy waiting)
inline void busy_wait(std::size_t cycles = 3000)
inline void busy_wait(usz cycles = 3000)
{
const u64 start = __rdtsc();
do _mm_pause();

View file

@ -22,7 +22,7 @@
#include "endian.hpp"
// Total number of entries.
static constexpr std::size_t s_hashtable_size = 1u << 17;
static constexpr usz s_hashtable_size = 1u << 17;
// Reference counter combined with shifted pointer (which is assumed to be 47 bit)
static constexpr uptr s_ref_mask = (1u << 17) - 1;

View file

@ -265,7 +265,7 @@ namespace atomic_wait
struct atomic_wait_engine
{
private:
template <typename T, std::size_t Align>
template <typename T, usz Align>
friend class atomic_t;
template <uint Max, typename... T>
@ -304,7 +304,7 @@ void atomic_wait::list<Max, T...>::wait(atomic_wait_timeout timeout)
}
// Helper class, provides access to compiler-specific atomic intrinsics
template <typename T, std::size_t Size = sizeof(T)>
template <typename T, usz Size = sizeof(T)>
struct atomic_storage
{
/* First part: Non-MSVC intrinsics */
@ -1036,7 +1036,7 @@ struct atomic_storage<T, 16> : atomic_storage<T, 0>
};
// Atomic type with lock-free and standard layout guarantees (and appropriate limitations)
template <typename T, std::size_t Align = sizeof(T)>
template <typename T, usz Align = sizeof(T)>
class atomic_t
{
protected:
@ -1058,7 +1058,7 @@ protected:
alignas(Align) type m_data;
public:
static constexpr std::size_t align = Align;
static constexpr usz align = Align;
atomic_t() noexcept = default;
@ -1631,13 +1631,13 @@ public:
}
};
template <std::size_t Align>
template <usz Align>
class atomic_t<bool, Align> : private atomic_t<uchar, Align>
{
using base = atomic_t<uchar, Align>;
public:
static constexpr std::size_t align = Align;
static constexpr usz align = Align;
using simple_type = bool;
@ -1723,6 +1723,6 @@ public:
namespace atomic_wait
{
template <std::size_t Align>
template <usz Align>
inline __m128i default_mask<atomic_t<bool, Align>> = _mm_cvtsi32_si128(1);
}

View file

@ -4,7 +4,7 @@
namespace stx
{
template <typename T, std::size_t Align = alignof(T), std::size_t Size = sizeof(T)>
template <typename T, usz Align = alignof(T), usz Size = sizeof(T)>
struct se_storage
{
struct type8
@ -86,7 +86,7 @@ namespace stx
}
};
template <typename T, std::size_t Align, std::size_t Size>
template <typename T, usz Align, usz Size>
constexpr typename se_storage<T, Align, Size>::type se_storage<T, Align, Size>::swap(const type& src) noexcept
{
// Try to keep u16/u32/u64 optimizations at the cost of more bitcasts
@ -112,7 +112,7 @@ namespace stx
type64 dst{};
// Swap u64 blocks
for (std::size_t i = 0; i < sizeof(T) / 8; i++)
for (usz i = 0; i < sizeof(T) / 8; i++)
{
dst.data[i] = se_storage<u64>::swap(tmp.data[sizeof(T) / 8 - 1 - i]);
}
@ -124,7 +124,7 @@ namespace stx
type dst{};
// Swap by moving every byte
for (std::size_t i = 0; i < sizeof(T); i++)
for (usz i = 0; i < sizeof(T); i++)
{
dst.data[i] = src.data[sizeof(T) - 1 - i];
}
@ -134,7 +134,7 @@ namespace stx
}
// Endianness support template
template <typename T, bool Swap, std::size_t Align = alignof(T)>
template <typename T, bool Swap, usz Align = alignof(T)>
class alignas(Align) se_t
{
using type = typename std::remove_cv<T>::type;
@ -246,7 +246,7 @@ private:
return std::bit_cast<To>(static_cast<se_t<To, Swap>>(rhs));
}
template <typename To, typename Test = int, typename R, std::size_t Align2>
template <typename To, typename Test = int, typename R, usz Align2>
static constexpr To right_arg_cast(const se_t<R, Swap, Align2>& rhs) noexcept
{
if constexpr ((std::is_integral_v<R> || std::is_enum_v<R>) && std::is_convertible_v<R, Test> && sizeof(R) == sizeof(T))

View file

@ -85,7 +85,7 @@ namespace logs
virtual ~file_writer();
// Append raw data
void log(const char* text, std::size_t size);
void log(const char* text, usz size);
};
struct file_listener final : file_writer, public listener
@ -326,7 +326,7 @@ void logs::message::broadcast(const char* fmt, const fmt_type_info* sup, ...) co
static constexpr fmt_type_info empty_sup{};
std::size_t args_count = 0;
usz args_count = 0;
for (auto v = sup; v && v->fmt_string; v++)
args_count++;
@ -538,7 +538,7 @@ bool logs::file_writer::flush(u64 bufv)
return false;
}
void logs::file_writer::log(const char* text, std::size_t size)
void logs::file_writer::log(const char* text, usz size)
{
if (!m_fptr)
{

View file

@ -87,7 +87,7 @@ namespace logs
#define GEN_LOG_METHOD(_sev)\
const message msg_##_sev{this, level::_sev};\
template <typename CharT, std::size_t N, typename... Args>\
template <typename CharT, usz N, typename... Args>\
void _sev(const CharT(&fmt)[N], const Args&... args)\
{\
if (level::_sev <= enabled.observe()) [[unlikely]]\

View file

@ -72,15 +72,15 @@ namespace stx
atomic_t<void (*)(shared_counter* _this) noexcept> destroy{};
// Reference counter
atomic_t<std::size_t> refs{1};
atomic_t<usz> refs{1};
};
template <std::size_t Size, std::size_t Align, typename = void>
template <usz Size, usz Align, typename = void>
struct align_filler
{
};
template <std::size_t Size, std::size_t Align>
template <usz Size, usz Align>
struct align_filler<Size, Align, std::enable_if_t<(Align > Size)>>
{
char dummy[Align - Size];
@ -104,10 +104,10 @@ namespace stx
};
template <typename T>
class alignas(T) shared_data<T[]> final : align_filler<sizeof(shared_counter) + sizeof(std::size_t), alignof(T)>
class alignas(T) shared_data<T[]> final : align_filler<sizeof(shared_counter) + sizeof(usz), alignof(T)>
{
public:
std::size_t m_count;
usz m_count;
shared_counter m_ctr;
@ -301,13 +301,13 @@ namespace stx
}
template <typename T, bool Init = true>
static std::enable_if_t<std::is_unbounded_array_v<T>, single_ptr<T>> make_single(std::size_t count) noexcept
static std::enable_if_t<std::is_unbounded_array_v<T>, single_ptr<T>> make_single(usz count) noexcept
{
static_assert(sizeof(shared_data<T>) - offsetof(shared_data<T>, m_ctr) == sizeof(shared_counter));
using etype = std::remove_extent_t<T>;
const std::size_t size = sizeof(shared_data<T>) + count * sizeof(etype);
const usz size = sizeof(shared_data<T>) + count * sizeof(etype);
std::byte* bytes = nullptr;
@ -535,7 +535,7 @@ namespace stx
}
}
std::size_t use_count() const noexcept
usz use_count() const noexcept
{
if (m_ptr)
{
@ -588,7 +588,7 @@ namespace stx
}
template <typename T, bool Init = true>
static std::enable_if_t<std::is_unbounded_array_v<T>, shared_ptr<T>> make_shared(std::size_t count) noexcept
static std::enable_if_t<std::is_unbounded_array_v<T>, shared_ptr<T>> make_shared(usz count) noexcept
{
return make_single<T, Init>(count);
}

View file

@ -64,7 +64,7 @@ struct to_se<T[], Se>
using type = typename to_se<T, Se>::type[];
};
template <typename T, bool Se, std::size_t N>
template <typename T, bool Se, usz N>
struct to_se<T[N], Se>
{
// Move array qualifier

View file

@ -96,6 +96,7 @@ using u8 = std::uint8_t;
using u16 = std::uint16_t;
using u32 = std::uint32_t;
using u64 = std::uint64_t;
using usz = std::size_t;
using s8 = std::int8_t;
using s16 = std::int16_t;
@ -150,7 +151,7 @@ namespace std
#endif
// Get integral type from type size
template <std::size_t N>
template <usz N>
struct get_int_impl
{
};
@ -183,10 +184,10 @@ struct get_int_impl<sizeof(u64)>
using stype = s64;
};
template <std::size_t N>
template <usz N>
using get_uint_t = typename get_int_impl<N>::utype;
template <std::size_t N>
template <usz N>
using get_sint_t = typename get_int_impl<N>::stype;
template <typename T>
@ -210,29 +211,29 @@ namespace fmt
const fmt_type_info* get_type_info();
}
template <typename T, std::size_t Align>
template <typename T, usz Align>
class atomic_t;
namespace stx
{
template <typename T, bool Se, std::size_t Align>
template <typename T, bool Se, usz Align>
class se_t;
}
using stx::se_t;
// se_t<> with native endianness
template <typename T, std::size_t Align = alignof(T)>
template <typename T, usz Align = alignof(T)>
using nse_t = se_t<T, false, Align>;
template <typename T, std::size_t Align = alignof(T)>
template <typename T, usz Align = alignof(T)>
using be_t = se_t<T, std::endian::little == std::endian::native, Align>;
template <typename T, std::size_t Align = alignof(T)>
template <typename T, usz Align = alignof(T)>
using le_t = se_t<T, std::endian::big == std::endian::native, Align>;
template <typename T, std::size_t Align = alignof(T)>
template <typename T, usz Align = alignof(T)>
using atomic_be_t = atomic_t<be_t<T>, Align>;
template <typename T, std::size_t Align = alignof(T)>
template <typename T, usz Align = alignof(T)>
using atomic_le_t = atomic_t<le_t<T>, Align>;
// Extract T::simple_type if available, remove cv qualifiers
@ -625,7 +626,7 @@ inline u32 offset32(T T2::*const mptr)
#ifdef _MSC_VER
return std::bit_cast<u32>(mptr);
#elif __GNUG__
return std::bit_cast<std::size_t>(mptr);
return std::bit_cast<usz>(mptr);
#else
static_assert(sizeof(mptr) == 0, "Unsupported pointer-to-member size");
#endif
@ -643,7 +644,7 @@ struct offset32_array
}
};
template <typename T, std::size_t N>
template <typename T, usz N>
struct offset32_array<std::array<T, N>>
{
template <typename Arg>
@ -689,7 +690,7 @@ constexpr u32 to_u8(char c)
}
// Convert 1-2-byte string to u16 value like reinterpret_cast does
constexpr u16 operator""_u16(const char* s, std::size_t /*length*/)
constexpr u16 operator""_u16(const char* s, usz /*length*/)
{
if constexpr (std::endian::little == std::endian::native)
{
@ -702,7 +703,7 @@ constexpr u16 operator""_u16(const char* s, std::size_t /*length*/)
}
// Convert 3-4-byte string to u32 value like reinterpret_cast does
constexpr u32 operator""_u32(const char* s, std::size_t /*length*/)
constexpr u32 operator""_u32(const char* s, usz /*length*/)
{
if constexpr (std::endian::little == std::endian::native)
{
@ -715,7 +716,7 @@ constexpr u32 operator""_u32(const char* s, std::size_t /*length*/)
}
// Convert 5-6-byte string to u64 value like reinterpret_cast does
constexpr u64 operator""_u48(const char* s, std::size_t /*length*/)
constexpr u64 operator""_u48(const char* s, usz /*length*/)
{
if constexpr (std::endian::little == std::endian::native)
{
@ -728,7 +729,7 @@ constexpr u64 operator""_u48(const char* s, std::size_t /*length*/)
}
// Convert 7-8-byte string to u64 value like reinterpret_cast does
constexpr u64 operator""_u64(const char* s, std::size_t /*length*/)
constexpr u64 operator""_u64(const char* s, usz /*length*/)
{
if constexpr (std::endian::little == std::endian::native)
{
@ -877,7 +878,7 @@ template <typename CT, typename = decltype(static_cast<u32>(std::declval<CT>().s
}
// Returns u32 size for an array
template <typename T, std::size_t Size>
template <typename T, usz Size>
[[nodiscard]] constexpr u32 size32(const T (&)[Size])
{
static_assert(Size < UINT32_MAX, "Array is too big for 32-bit");
@ -885,20 +886,20 @@ template <typename T, std::size_t Size>
}
// Simplified hash algorithm for pointers. May be used in std::unordered_(map|set).
template <typename T, std::size_t Align = alignof(T)>
template <typename T, usz Align = alignof(T)>
struct pointer_hash
{
std::size_t operator()(T* ptr) const
usz operator()(T* ptr) const
{
return reinterpret_cast<uptr>(ptr) / Align;
}
};
template <typename T, std::size_t Shift = 0>
template <typename T, usz Shift = 0>
struct value_hash
{
std::size_t operator()(T value) const
usz operator()(T value) const
{
return static_cast<std::size_t>(value) >> Shift;
return static_cast<usz>(value) >> Shift;
}
};

View file

@ -9,26 +9,26 @@ union alignas(16) v128
uchar _bytes[16];
char _chars[16];
template <typename T, std::size_t N, std::size_t M>
template <typename T, usz N, usz M>
struct masked_array_t // array type accessed as (index ^ M)
{
char m_data[16];
public:
T& operator[](std::size_t index)
T& operator[](usz index)
{
return reinterpret_cast<T*>(m_data)[index ^ M];
}
const T& operator[](std::size_t index) const
const T& operator[](usz index) const
{
return reinterpret_cast<const T*>(m_data)[index ^ M];
}
};
template <typename T, std::size_t N = 16 / sizeof(T)>
template <typename T, usz N = 16 / sizeof(T)>
using normal_array_t = masked_array_t<T, N, std::endian::little == std::endian::native ? 0 : N - 1>;
template <typename T, std::size_t N = 16 / sizeof(T)>
template <typename T, usz N = 16 / sizeof(T)>
using reversed_array_t = masked_array_t<T, N, std::endian::little == std::endian::native ? N - 1 : 0>;
normal_array_t<u64> _u64;
@ -218,7 +218,7 @@ union alignas(16) v128
}
// Unaligned load with optional index offset
static v128 loadu(const void* ptr, std::size_t index = 0)
static v128 loadu(const void* ptr, usz index = 0)
{
v128 ret;
std::memcpy(&ret, static_cast<const u8*>(ptr) + index * sizeof(v128), sizeof(v128));
@ -226,7 +226,7 @@ union alignas(16) v128
}
// Unaligned store with optional index offset
static void storeu(v128 value, void* ptr, std::size_t index = 0)
static void storeu(v128 value, void* ptr, usz index = 0)
{
std::memcpy(static_cast<u8*>(ptr) + index * sizeof(v128), &value, sizeof(v128));
}
@ -368,7 +368,7 @@ union alignas(16) v128
}
};
template <typename T, std::size_t N, std::size_t M>
template <typename T, usz N, usz M>
struct offset32_array<v128::masked_array_t<T, N, M>>
{
template <typename Arg>

View file

@ -18,29 +18,29 @@ namespace utils
* Reserve `size` bytes of virtual memory and returns it.
* The memory should be commited before usage.
*/
void* memory_reserve(std::size_t size, void* use_addr = nullptr);
void* memory_reserve(usz size, void* use_addr = nullptr);
/**
* Commit `size` bytes of virtual memory starting at pointer.
* That is, bake reserved memory with physical memory.
* pointer should belong to a range of reserved memory.
*/
void memory_commit(void* pointer, std::size_t size, protection prot = protection::rw);
void memory_commit(void* pointer, usz size, protection prot = protection::rw);
// Decommit all memory committed via commit_page_memory.
void memory_decommit(void* pointer, std::size_t size);
void memory_decommit(void* pointer, usz size);
// Decommit all memory and commit it again.
void memory_reset(void* pointer, std::size_t size, protection prot = protection::rw);
void memory_reset(void* pointer, usz size, protection prot = protection::rw);
// Free memory after reserved by memory_reserve, should specify original size
void memory_release(void* pointer, std::size_t size);
void memory_release(void* pointer, usz size);
// Set memory protection
void memory_protect(void* pointer, std::size_t size, protection prot);
void memory_protect(void* pointer, usz size, protection prot);
// Lock pages in memory
bool memory_lock(void* pointer, std::size_t size);
bool memory_lock(void* pointer, usz size);
// Shared memory handle
class shm

View file

@ -65,7 +65,7 @@ namespace utils
return _prot;
}
void* memory_reserve(std::size_t size, void* use_addr)
void* memory_reserve(usz size, void* use_addr)
{
#ifdef _WIN32
return ::VirtualAlloc(use_addr, size, MEM_RESERVE, PAGE_NOACCESS);
@ -119,7 +119,7 @@ namespace utils
#endif
}
void memory_commit(void* pointer, std::size_t size, protection prot)
void memory_commit(void* pointer, usz size, protection prot)
{
#ifdef _WIN32
ensure(::VirtualAlloc(pointer, size, MEM_COMMIT, +prot));
@ -130,7 +130,7 @@ namespace utils
#endif
}
void memory_decommit(void* pointer, std::size_t size)
void memory_decommit(void* pointer, usz size)
{
#ifdef _WIN32
ensure(::VirtualFree(pointer, size, MEM_DECOMMIT));
@ -145,7 +145,7 @@ namespace utils
#endif
}
void memory_reset(void* pointer, std::size_t size, protection prot)
void memory_reset(void* pointer, usz size, protection prot)
{
#ifdef _WIN32
memory_decommit(pointer, size);
@ -167,7 +167,7 @@ namespace utils
#endif
}
void memory_release(void* pointer, std::size_t size)
void memory_release(void* pointer, usz size)
{
#ifdef _WIN32
ensure(::VirtualFree(pointer, 0, MEM_RELEASE));
@ -176,7 +176,7 @@ namespace utils
#endif
}
void memory_protect(void* pointer, std::size_t size, protection prot)
void memory_protect(void* pointer, usz size, protection prot)
{
#ifdef _WIN32
for (u64 addr = reinterpret_cast<u64>(pointer), end = addr + size; addr < end;)
@ -199,7 +199,7 @@ namespace utils
#endif
}
bool memory_lock(void* pointer, std::size_t size)
bool memory_lock(void* pointer, usz size)
{
#ifdef _WIN32
return ::VirtualLock(pointer, size);