Attribute macro changed

__forceinline -> force_inline
__noinline -> never_inline
printf_alike(x,y) added
This commit is contained in:
Nekotekina 2015-05-28 18:14:22 +03:00
parent f83306b0bf
commit 78fdcf75e7
32 changed files with 395 additions and 393 deletions

View file

@ -2,7 +2,7 @@
#define IS_LE_MACHINE
union _CRT_ALIGN(16) u128
union u128
{
u64 _u64[2];
s64 _s64[2];
@ -107,12 +107,12 @@ union _CRT_ALIGN(16) u128
{
}
__forceinline operator bool() const
force_inline operator bool() const
{
return (data & mask) != 0;
}
__forceinline bit_element& operator = (const bool right)
force_inline bit_element& operator = (const bool right)
{
if (right)
{
@ -125,7 +125,7 @@ union _CRT_ALIGN(16) u128
return *this;
}
__forceinline bit_element& operator = (const bit_element& right)
force_inline bit_element& operator = (const bit_element& right)
{
if (right)
{
@ -249,77 +249,77 @@ union _CRT_ALIGN(16) u128
return ret;
}
static __forceinline u128 add8(const u128& left, const u128& right)
static force_inline u128 add8(const u128& left, const u128& right)
{
return fromV(_mm_add_epi8(left.vi, right.vi));
}
static __forceinline u128 add16(const u128& left, const u128& right)
static force_inline u128 add16(const u128& left, const u128& right)
{
return fromV(_mm_add_epi16(left.vi, right.vi));
}
static __forceinline u128 add32(const u128& left, const u128& right)
static force_inline u128 add32(const u128& left, const u128& right)
{
return fromV(_mm_add_epi32(left.vi, right.vi));
}
static __forceinline u128 addfs(const u128& left, const u128& right)
static force_inline u128 addfs(const u128& left, const u128& right)
{
return fromF(_mm_add_ps(left.vf, right.vf));
}
static __forceinline u128 addfd(const u128& left, const u128& right)
static force_inline u128 addfd(const u128& left, const u128& right)
{
return fromD(_mm_add_pd(left.vd, right.vd));
}
static __forceinline u128 sub8(const u128& left, const u128& right)
static force_inline u128 sub8(const u128& left, const u128& right)
{
return fromV(_mm_sub_epi8(left.vi, right.vi));
}
static __forceinline u128 sub16(const u128& left, const u128& right)
static force_inline u128 sub16(const u128& left, const u128& right)
{
return fromV(_mm_sub_epi16(left.vi, right.vi));
}
static __forceinline u128 sub32(const u128& left, const u128& right)
static force_inline u128 sub32(const u128& left, const u128& right)
{
return fromV(_mm_sub_epi32(left.vi, right.vi));
}
static __forceinline u128 subfs(const u128& left, const u128& right)
static force_inline u128 subfs(const u128& left, const u128& right)
{
return fromF(_mm_sub_ps(left.vf, right.vf));
}
static __forceinline u128 subfd(const u128& left, const u128& right)
static force_inline u128 subfd(const u128& left, const u128& right)
{
return fromD(_mm_sub_pd(left.vd, right.vd));
}
static __forceinline u128 maxu8(const u128& left, const u128& right)
static force_inline u128 maxu8(const u128& left, const u128& right)
{
return fromV(_mm_max_epu8(left.vi, right.vi));
}
static __forceinline u128 minu8(const u128& left, const u128& right)
static force_inline u128 minu8(const u128& left, const u128& right)
{
return fromV(_mm_min_epu8(left.vi, right.vi));
}
static __forceinline u128 eq8(const u128& left, const u128& right)
static force_inline u128 eq8(const u128& left, const u128& right)
{
return fromV(_mm_cmpeq_epi8(left.vi, right.vi));
}
static __forceinline u128 eq16(const u128& left, const u128& right)
static force_inline u128 eq16(const u128& left, const u128& right)
{
return fromV(_mm_cmpeq_epi16(left.vi, right.vi));
}
static __forceinline u128 eq32(const u128& left, const u128& right)
static force_inline u128 eq32(const u128& left, const u128& right)
{
return fromV(_mm_cmpeq_epi32(left.vi, right.vi));
}
@ -334,17 +334,17 @@ union _CRT_ALIGN(16) u128
return (_u64[0] != right._u64[0]) || (_u64[1] != right._u64[1]);
}
__forceinline u128 operator | (const u128& right) const
force_inline u128 operator | (const u128& right) const
{
return fromV(_mm_or_si128(vi, right.vi));
}
__forceinline u128 operator & (const u128& right) const
force_inline u128 operator & (const u128& right) const
{
return fromV(_mm_and_si128(vi, right.vi));
}
__forceinline u128 operator ^ (const u128& right) const
force_inline u128 operator ^ (const u128& right) const
{
return fromV(_mm_xor_si128(vi, right.vi));
}
@ -354,18 +354,18 @@ union _CRT_ALIGN(16) u128
return from64(~_u64[0], ~_u64[1]);
}
__forceinline bool is_any_1() const // check if any bit is 1
force_inline bool is_any_1() const // check if any bit is 1
{
return _u64[0] || _u64[1];
}
__forceinline bool is_any_0() const // check if any bit is 0
force_inline bool is_any_0() const // check if any bit is 0
{
return ~_u64[0] || ~_u64[1];
}
// result = (~left) & (right)
static __forceinline u128 andnot(const u128& left, const u128& right)
static force_inline u128 andnot(const u128& left, const u128& right)
{
return fromV(_mm_andnot_si128(left.vi, right.vi));
}
@ -379,7 +379,7 @@ union _CRT_ALIGN(16) u128
std::string to_xyzw() const;
static __forceinline u128 byteswap(const u128 val)
static force_inline u128 byteswap(const u128 val)
{
u128 ret;
ret._u64[0] = _byteswap_uint64(val._u64[1]);
@ -388,7 +388,9 @@ union _CRT_ALIGN(16) u128
}
};
static __forceinline u128 sync_val_compare_and_swap(volatile u128* dest, u128 comp, u128 exch)
static_assert(__alignof(u128) == 16 && sizeof(u128) == 16, "Wrong u128 size or alignment");
static force_inline u128 sync_val_compare_and_swap(volatile u128* dest, u128 comp, u128 exch)
{
#if !defined(_MSC_VER)
auto res = __sync_val_compare_and_swap((volatile __int128_t*)dest, (__int128_t&)comp, (__int128_t&)exch);
@ -399,7 +401,7 @@ static __forceinline u128 sync_val_compare_and_swap(volatile u128* dest, u128 co
#endif
}
static __forceinline bool sync_bool_compare_and_swap(volatile u128* dest, u128 comp, u128 exch)
static force_inline bool sync_bool_compare_and_swap(volatile u128* dest, u128 comp, u128 exch)
{
#if !defined(_MSC_VER)
return __sync_bool_compare_and_swap((volatile __int128_t*)dest, (__int128_t&)comp, (__int128_t&)exch);
@ -408,7 +410,7 @@ static __forceinline bool sync_bool_compare_and_swap(volatile u128* dest, u128 c
#endif
}
static __forceinline u128 sync_lock_test_and_set(volatile u128* dest, u128 value)
static force_inline u128 sync_lock_test_and_set(volatile u128* dest, u128 value)
{
while (true)
{
@ -417,7 +419,7 @@ static __forceinline u128 sync_lock_test_and_set(volatile u128* dest, u128 value
}
}
static __forceinline u128 sync_fetch_and_or(volatile u128* dest, u128 value)
static force_inline u128 sync_fetch_and_or(volatile u128* dest, u128 value)
{
while (true)
{
@ -426,7 +428,7 @@ static __forceinline u128 sync_fetch_and_or(volatile u128* dest, u128 value)
}
}
static __forceinline u128 sync_fetch_and_and(volatile u128* dest, u128 value)
static force_inline u128 sync_fetch_and_and(volatile u128* dest, u128 value)
{
while (true)
{
@ -435,7 +437,7 @@ static __forceinline u128 sync_fetch_and_and(volatile u128* dest, u128 value)
}
}
static __forceinline u128 sync_fetch_and_xor(volatile u128* dest, u128 value)
static force_inline u128 sync_fetch_and_xor(volatile u128* dest, u128 value)
{
while (true)
{
@ -453,12 +455,12 @@ template<typename T, int size = sizeof(T)> struct se_t;
template<typename T> struct se_t<T, 1>
{
static __forceinline u8 to_be(const T& src)
static force_inline u8 to_be(const T& src)
{
return (u8&)src;
}
static __forceinline T from_be(const u8 src)
static force_inline T from_be(const u8 src)
{
return (T&)src;
}
@ -466,12 +468,12 @@ template<typename T> struct se_t<T, 1>
template<typename T> struct se_t<T, 2>
{
static __forceinline u16 to_be(const T& src)
static force_inline u16 to_be(const T& src)
{
return _byteswap_ushort((u16&)src);
}
static __forceinline T from_be(const u16 src)
static force_inline T from_be(const u16 src)
{
const u16 res = _byteswap_ushort(src);
return (T&)res;
@ -480,12 +482,12 @@ template<typename T> struct se_t<T, 2>
template<typename T> struct se_t<T, 4>
{
static __forceinline u32 to_be(const T& src)
static force_inline u32 to_be(const T& src)
{
return _byteswap_ulong((u32&)src);
}
static __forceinline T from_be(const u32 src)
static force_inline T from_be(const u32 src)
{
const u32 res = _byteswap_ulong(src);
return (T&)res;
@ -494,12 +496,12 @@ template<typename T> struct se_t<T, 4>
template<typename T> struct se_t<T, 8>
{
static __forceinline u64 to_be(const T& src)
static force_inline u64 to_be(const T& src)
{
return _byteswap_uint64((u64&)src);
}
static __forceinline T from_be(const u64 src)
static force_inline T from_be(const u64 src)
{
const u64 res = _byteswap_uint64(src);
return (T&)res;
@ -508,12 +510,12 @@ template<typename T> struct se_t<T, 8>
template<typename T> struct se_t<T, 16>
{
static __forceinline u128 to_be(const T& src)
static force_inline u128 to_be(const T& src)
{
return u128::byteswap((u128&)src);
}
static __forceinline T from_be(const u128& src)
static force_inline T from_be(const u128& src)
{
const u128 res = u128::byteswap(src);
return (T&)res;
@ -603,7 +605,7 @@ private:
template<typename Tto, typename Tfrom, int mode>
struct _convert
{
static __forceinline be_t<Tto>& func(Tfrom& be_value)
static force_inline be_t<Tto>& func(Tfrom& be_value)
{
Tto res = be_value;
return (be_t<Tto>&)res;
@ -613,7 +615,7 @@ private:
template<typename Tto, typename Tfrom>
struct _convert<Tto, Tfrom, 1>
{
static __forceinline be_t<Tto>& func(Tfrom& be_value)
static force_inline be_t<Tto>& func(Tfrom& be_value)
{
Tto res = se_t<Tto, sizeof(Tto)>::func(se_t<Tfrom, sizeof(Tfrom)>::func(be_value));
return (be_t<Tto>&)res;
@ -623,7 +625,7 @@ private:
template<typename Tto, typename Tfrom>
struct _convert<Tto, Tfrom, 2>
{
static __forceinline be_t<Tto>& func(Tfrom& be_value)
static force_inline be_t<Tto>& func(Tfrom& be_value)
{
Tto res = be_value >> ((sizeof(Tfrom)-sizeof(Tto)) * 8);
return (be_t<Tto>&)res;
@ -673,7 +675,7 @@ public:
}
//get value in current machine byte ordering
__forceinline type value() const
force_inline type value() const
{
#ifdef IS_LE_MACHINE
return ToLE();
@ -906,13 +908,13 @@ struct convert_le_be_t<Tto, be_t<Tf, Tf1>>
};
template<typename Tto, typename Tfrom>
__forceinline Tto convert_le_be(Tfrom&& value)
force_inline Tto convert_le_be(Tfrom&& value)
{
return convert_le_be_t<Tto, Tfrom>::func(value);
}
template<typename Tto, typename Tfrom>
__forceinline void convert_le_be(Tto& dst, Tfrom&& src)
force_inline void convert_le_be(Tto& dst, Tfrom&& src)
{
dst = convert_le_be_t<Tto, Tfrom>::func(src);
}

View file

@ -2,22 +2,34 @@
#include <emmintrin.h>
#ifdef _WIN32
#if defined(_MSC_VER)
#define thread_local __declspec(thread)
#elif __APPLE__
#define thread_local __thread
#endif
#ifdef _WIN32
#define __noinline __declspec(noinline)
#if defined(_MSC_VER)
#define never_inline __declspec(noinline)
#else
#define __noinline __attribute__((noinline))
#define never_inline __attribute__((noinline))
#endif
#ifdef _WIN32
#define __safebuffers __declspec(safebuffers)
#if defined(_MSC_VER)
#define safe_buffers __declspec(safebuffers)
#else
#define __safebuffers
#define safe_buffers
#endif
#if defined(_MSC_VER)
#define printf_alike(x, y)
#else
#define printf_alike(x, y) __attribute__((format(printf, x, y)))
#endif
#if defined(_MSC_VER)
#define force_inline __forceinline
#else
#define force_inline __attribute__((always_inline))
#endif
template<size_t size>
@ -46,12 +58,10 @@ void strcpy_trunc(char(&dst)[size], const char(&src)[rsize])
#endif
#define _fpclass(x) std::fpclassify(x)
#define __forceinline __attribute__((always_inline))
#define _byteswap_ushort(x) __builtin_bswap16(x)
#define _byteswap_ulong(x) __builtin_bswap32(x)
#define _byteswap_uint64(x) __builtin_bswap64(x)
#define INFINITE 0xFFFFFFFF
#define _CRT_ALIGN(x) __attribute__((aligned(x)))
inline uint64_t __umulh(uint64_t a, uint64_t b)
{
@ -124,181 +134,181 @@ template<typename T, typename T2> static inline typename std::enable_if<std::is_
// atomic compare and swap functions
static __forceinline uint8_t sync_val_compare_and_swap(volatile uint8_t* dest, uint8_t comp, uint8_t exch)
static force_inline uint8_t sync_val_compare_and_swap(volatile uint8_t* dest, uint8_t comp, uint8_t exch)
{
return _InterlockedCompareExchange8((volatile char*)dest, exch, comp);
}
static __forceinline uint16_t sync_val_compare_and_swap(volatile uint16_t* dest, uint16_t comp, uint16_t exch)
static force_inline uint16_t sync_val_compare_and_swap(volatile uint16_t* dest, uint16_t comp, uint16_t exch)
{
return _InterlockedCompareExchange16((volatile short*)dest, exch, comp);
}
static __forceinline uint32_t sync_val_compare_and_swap(volatile uint32_t* dest, uint32_t comp, uint32_t exch)
static force_inline uint32_t sync_val_compare_and_swap(volatile uint32_t* dest, uint32_t comp, uint32_t exch)
{
return _InterlockedCompareExchange((volatile long*)dest, exch, comp);
}
static __forceinline uint64_t sync_val_compare_and_swap(volatile uint64_t* dest, uint64_t comp, uint64_t exch)
static force_inline uint64_t sync_val_compare_and_swap(volatile uint64_t* dest, uint64_t comp, uint64_t exch)
{
return _InterlockedCompareExchange64((volatile long long*)dest, exch, comp);
}
static __forceinline bool sync_bool_compare_and_swap(volatile uint8_t* dest, uint8_t comp, uint8_t exch)
static force_inline bool sync_bool_compare_and_swap(volatile uint8_t* dest, uint8_t comp, uint8_t exch)
{
return (uint8_t)_InterlockedCompareExchange8((volatile char*)dest, exch, comp) == comp;
}
static __forceinline bool sync_bool_compare_and_swap(volatile uint16_t* dest, uint16_t comp, uint16_t exch)
static force_inline bool sync_bool_compare_and_swap(volatile uint16_t* dest, uint16_t comp, uint16_t exch)
{
return (uint16_t)_InterlockedCompareExchange16((volatile short*)dest, exch, comp) == comp;
}
static __forceinline bool sync_bool_compare_and_swap(volatile uint32_t* dest, uint32_t comp, uint32_t exch)
static force_inline bool sync_bool_compare_and_swap(volatile uint32_t* dest, uint32_t comp, uint32_t exch)
{
return (uint32_t)_InterlockedCompareExchange((volatile long*)dest, exch, comp) == comp;
}
static __forceinline bool sync_bool_compare_and_swap(volatile uint64_t* dest, uint64_t comp, uint64_t exch)
static force_inline bool sync_bool_compare_and_swap(volatile uint64_t* dest, uint64_t comp, uint64_t exch)
{
return (uint64_t)_InterlockedCompareExchange64((volatile long long*)dest, exch, comp) == comp;
}
// atomic exchange functions
static __forceinline uint8_t sync_lock_test_and_set(volatile uint8_t* dest, uint8_t value)
static force_inline uint8_t sync_lock_test_and_set(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedExchange8((volatile char*)dest, value);
}
static __forceinline uint16_t sync_lock_test_and_set(volatile uint16_t* dest, uint16_t value)
static force_inline uint16_t sync_lock_test_and_set(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedExchange16((volatile short*)dest, value);
}
static __forceinline uint32_t sync_lock_test_and_set(volatile uint32_t* dest, uint32_t value)
static force_inline uint32_t sync_lock_test_and_set(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedExchange((volatile long*)dest, value);
}
static __forceinline uint64_t sync_lock_test_and_set(volatile uint64_t* dest, uint64_t value)
static force_inline uint64_t sync_lock_test_and_set(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedExchange64((volatile long long*)dest, value);
}
// atomic add functions
static __forceinline uint8_t sync_fetch_and_add(volatile uint8_t* dest, uint8_t value)
static force_inline uint8_t sync_fetch_and_add(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedExchangeAdd8((volatile char*)dest, value);
}
static __forceinline uint16_t sync_fetch_and_add(volatile uint16_t* dest, uint16_t value)
static force_inline uint16_t sync_fetch_and_add(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedExchangeAdd16((volatile short*)dest, value);
}
static __forceinline uint32_t sync_fetch_and_add(volatile uint32_t* dest, uint32_t value)
static force_inline uint32_t sync_fetch_and_add(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedExchangeAdd((volatile long*)dest, value);
}
static __forceinline uint64_t sync_fetch_and_add(volatile uint64_t* dest, uint64_t value)
static force_inline uint64_t sync_fetch_and_add(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedExchangeAdd64((volatile long long*)dest, value);
}
// atomic sub functions
static __forceinline uint8_t sync_fetch_and_sub(volatile uint8_t* dest, uint8_t value)
static force_inline uint8_t sync_fetch_and_sub(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedExchangeAdd8((volatile char*)dest, -(char)value);
}
static __forceinline uint16_t sync_fetch_and_sub(volatile uint16_t* dest, uint16_t value)
static force_inline uint16_t sync_fetch_and_sub(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedExchangeAdd16((volatile short*)dest, -(short)value);
}
static __forceinline uint32_t sync_fetch_and_sub(volatile uint32_t* dest, uint32_t value)
static force_inline uint32_t sync_fetch_and_sub(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedExchangeAdd((volatile long*)dest, -(long)value);
}
static __forceinline uint64_t sync_fetch_and_sub(volatile uint64_t* dest, uint64_t value)
static force_inline uint64_t sync_fetch_and_sub(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedExchangeAdd64((volatile long long*)dest, -(long long)value);
}
// atomic bitwise or functions
static __forceinline uint8_t sync_fetch_and_or(volatile uint8_t* dest, uint8_t value)
static force_inline uint8_t sync_fetch_and_or(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedOr8((volatile char*)dest, value);
}
static __forceinline uint16_t sync_fetch_and_or(volatile uint16_t* dest, uint16_t value)
static force_inline uint16_t sync_fetch_and_or(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedOr16((volatile short*)dest, value);
}
static __forceinline uint32_t sync_fetch_and_or(volatile uint32_t* dest, uint32_t value)
static force_inline uint32_t sync_fetch_and_or(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedOr((volatile long*)dest, value);
}
static __forceinline uint64_t sync_fetch_and_or(volatile uint64_t* dest, uint64_t value)
static force_inline uint64_t sync_fetch_and_or(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedOr64((volatile long long*)dest, value);
}
// atomic bitwise and functions
static __forceinline uint8_t sync_fetch_and_and(volatile uint8_t* dest, uint8_t value)
static force_inline uint8_t sync_fetch_and_and(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedAnd8((volatile char*)dest, value);
}
static __forceinline uint16_t sync_fetch_and_and(volatile uint16_t* dest, uint16_t value)
static force_inline uint16_t sync_fetch_and_and(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedAnd16((volatile short*)dest, value);
}
static __forceinline uint32_t sync_fetch_and_and(volatile uint32_t* dest, uint32_t value)
static force_inline uint32_t sync_fetch_and_and(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedAnd((volatile long*)dest, value);
}
static __forceinline uint64_t sync_fetch_and_and(volatile uint64_t* dest, uint64_t value)
static force_inline uint64_t sync_fetch_and_and(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedAnd64((volatile long long*)dest, value);
}
// atomic bitwise xor functions
static __forceinline uint8_t sync_fetch_and_xor(volatile uint8_t* dest, uint8_t value)
static force_inline uint8_t sync_fetch_and_xor(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedXor8((volatile char*)dest, value);
}
static __forceinline uint16_t sync_fetch_and_xor(volatile uint16_t* dest, uint16_t value)
static force_inline uint16_t sync_fetch_and_xor(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedXor16((volatile short*)dest, value);
}
static __forceinline uint32_t sync_fetch_and_xor(volatile uint32_t* dest, uint32_t value)
static force_inline uint32_t sync_fetch_and_xor(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedXor((volatile long*)dest, value);
}
static __forceinline uint64_t sync_fetch_and_xor(volatile uint64_t* dest, uint64_t value)
static force_inline uint64_t sync_fetch_and_xor(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedXor64((volatile long long*)dest, value);
}
#endif /* _MSC_VER */
static __forceinline uint32_t cntlz32(uint32_t arg)
static force_inline uint32_t cntlz32(uint32_t arg)
{
#if defined(_MSC_VER)
unsigned long res;
@ -322,7 +332,7 @@ static __forceinline uint32_t cntlz32(uint32_t arg)
#endif
}
static __forceinline uint64_t cntlz64(uint64_t arg)
static force_inline uint64_t cntlz64(uint64_t arg)
{
#if defined(_MSC_VER)
unsigned long res;

View file

@ -129,8 +129,7 @@ static struct { inline operator Log::LogType() { return Log::LogType::TTY; } } T
void log_message(Log::LogType type, Log::LogSeverity sev, const char* text);
void log_message(Log::LogType type, Log::LogSeverity sev, std::string text);
template<typename... Targs>
__noinline void log_message(Log::LogType type, Log::LogSeverity sev, const char* fmt, Targs... args)
template<typename... Args> never_inline void log_message(Log::LogType type, Log::LogSeverity sev, const char* fmt, Args... args) printf_alike(3, 4)
{
log_message(type, sev, fmt::Format(fmt, fmt::do_unveil(args)...));
}

View file

@ -95,8 +95,7 @@ namespace fmt
T by_value(T x) { return x; }
//wrapper to deal with advance sprintf formating options with automatic length finding
template<typename ... Args>
std::string Format(const char* fmt, Args ... parameters)
template<typename... Args> std::string Format(const char* fmt, Args... parameters) printf_alike(1, 2)
{
size_t length = 256;
std::string str;
@ -139,7 +138,7 @@ namespace fmt
if (src.substr(pos, comp_length) == list[i].first)
{
src = (pos ? src.substr(0, pos) + list[i].second : list[i].second) + std::string(src.c_str() + pos + comp_length);
src = (pos ? src.substr(0, pos) + list[i].second : list[i].second) + src.substr(pos + comp_length);
pos += list[i].second.length() - 1;
break;
}
@ -163,7 +162,7 @@ namespace fmt
if (src.substr(pos, comp_length) == list[i].first)
{
src = (pos ? src.substr(0, pos) + list[i].second() : list[i].second()) + std::string(src.c_str() + pos + comp_length);
src = (pos ? src.substr(0, pos) + list[i].second() : list[i].second()) + src.substr(pos + comp_length);
pos += list[i].second().length() - 1;
break;
}
@ -182,7 +181,7 @@ namespace fmt
{
typedef T result_type;
__forceinline static result_type get_value(const T& arg)
force_inline static result_type get_value(const T& arg)
{
return arg;
}
@ -193,7 +192,7 @@ namespace fmt
{
typedef const char* result_type;
__forceinline static result_type get_value(const char* arg)
force_inline static result_type get_value(const char* arg)
{
return arg;
}
@ -204,7 +203,7 @@ namespace fmt
{
typedef const char* result_type;
__forceinline static result_type get_value(const char(&arg)[N])
force_inline static result_type get_value(const char(&arg)[N])
{
return arg;
}
@ -215,7 +214,7 @@ namespace fmt
{
typedef const char* result_type;
__forceinline static result_type get_value(const std::string& arg)
force_inline static result_type get_value(const std::string& arg)
{
return arg.c_str();
}
@ -226,7 +225,7 @@ namespace fmt
{
typedef typename std::underlying_type<T>::type result_type;
__forceinline static result_type get_value(const T& arg)
force_inline static result_type get_value(const T& arg)
{
return static_cast<result_type>(arg);
}
@ -237,14 +236,14 @@ namespace fmt
{
typedef typename unveil<T>::result_type result_type;
__forceinline static result_type get_value(const be_t<T, T2>& arg)
force_inline static result_type get_value(const be_t<T, T2>& arg)
{
return unveil<T>::get_value(arg.value());
}
};
template<typename T>
__forceinline typename unveil<T>::result_type do_unveil(const T& arg)
force_inline typename unveil<T>::result_type do_unveil(const T& arg)
{
return unveil<T>::get_value(arg);
}
@ -266,8 +265,7 @@ namespace fmt
vm::psv::ref (fmt::unveil) (vm_ref.h)
*/
template<typename... Args>
__forceinline __safebuffers std::string format(const char* fmt, Args... args)
template<typename... Args> force_inline safe_buffers std::string format(const char* fmt, Args... args) printf_alike(1, 2)
{
return Format(fmt, do_unveil(args)...);
}

View file

@ -118,7 +118,7 @@ struct waiter_map_t
bool is_stopped(u64 signal_id);
// wait until waiter_func() returns true, signal_id is an arbitrary number
template<typename S, typename WT> __forceinline __safebuffers void wait_op(const S& signal_id, const WT waiter_func)
template<typename S, typename WT> force_inline safe_buffers void wait_op(const S& signal_id, const WT waiter_func)
{
// generate hash
const auto hash = std::hash<S>()(signal_id) % size;
@ -141,7 +141,7 @@ struct waiter_map_t
}
// signal all threads waiting on waiter_op() with the same signal_id (signaling only hints those threads that corresponding conditions are *probably* met)
template<typename S> __forceinline void notify(const S& signal_id)
template<typename S> force_inline void notify(const S& signal_id)
{
// generate hash
const auto hash = std::hash<S>()(signal_id) % size;
@ -258,12 +258,12 @@ public:
return push(data, [do_exit](){ return do_exit && *do_exit; });
}
__forceinline bool push(const T& data)
force_inline bool push(const T& data)
{
return push(data, SQUEUE_NEVER_EXIT);
}
__forceinline bool try_push(const T& data)
force_inline bool try_push(const T& data)
{
return push(data, SQUEUE_ALWAYS_EXIT);
}
@ -326,12 +326,12 @@ public:
return pop(data, [do_exit](){ return do_exit && *do_exit; });
}
__forceinline bool pop(T& data)
force_inline bool pop(T& data)
{
return pop(data, SQUEUE_NEVER_EXIT);
}
__forceinline bool try_pop(T& data)
force_inline bool try_pop(T& data)
{
return pop(data, SQUEUE_ALWAYS_EXIT);
}
@ -388,12 +388,12 @@ public:
return peek(data, start_pos, [do_exit](){ return do_exit && *do_exit; });
}
__forceinline bool peek(T& data, u32 start_pos = 0)
force_inline bool peek(T& data, u32 start_pos = 0)
{
return peek(data, start_pos, SQUEUE_NEVER_EXIT);
}
__forceinline bool try_peek(T& data, u32 start_pos = 0)
force_inline bool try_peek(T& data, u32 start_pos = 0)
{
return peek(data, start_pos, SQUEUE_ALWAYS_EXIT);
}