🛠️🐜 Antkeeper superbuild with dependencies included https://antkeeper.com
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

317 lines
9.1 KiB

  1. #ifndef AL_NUMERIC_H
  2. #define AL_NUMERIC_H
  3. #include <stdint.h>
  4. #ifdef HAVE_INTRIN_H
  5. #include <intrin.h>
  6. #endif
  7. #ifdef HAVE_SSE_INTRINSICS
  8. #include <xmmintrin.h>
  9. #endif
  10. #include "opthelpers.h"
  11. inline constexpr int64_t operator "" _i64(unsigned long long int n) noexcept { return static_cast<int64_t>(n); }
  12. inline constexpr uint64_t operator "" _u64(unsigned long long int n) noexcept { return static_cast<uint64_t>(n); }
  13. constexpr inline float minf(float a, float b) noexcept
  14. { return ((a > b) ? b : a); }
  15. constexpr inline float maxf(float a, float b) noexcept
  16. { return ((a > b) ? a : b); }
  17. constexpr inline float clampf(float val, float min, float max) noexcept
  18. { return minf(max, maxf(min, val)); }
  19. constexpr inline double mind(double a, double b) noexcept
  20. { return ((a > b) ? b : a); }
  21. constexpr inline double maxd(double a, double b) noexcept
  22. { return ((a > b) ? a : b); }
  23. constexpr inline double clampd(double val, double min, double max) noexcept
  24. { return mind(max, maxd(min, val)); }
  25. constexpr inline unsigned int minu(unsigned int a, unsigned int b) noexcept
  26. { return ((a > b) ? b : a); }
  27. constexpr inline unsigned int maxu(unsigned int a, unsigned int b) noexcept
  28. { return ((a > b) ? a : b); }
  29. constexpr inline unsigned int clampu(unsigned int val, unsigned int min, unsigned int max) noexcept
  30. { return minu(max, maxu(min, val)); }
  31. constexpr inline int mini(int a, int b) noexcept
  32. { return ((a > b) ? b : a); }
  33. constexpr inline int maxi(int a, int b) noexcept
  34. { return ((a > b) ? a : b); }
  35. constexpr inline int clampi(int val, int min, int max) noexcept
  36. { return mini(max, maxi(min, val)); }
  37. constexpr inline int64_t mini64(int64_t a, int64_t b) noexcept
  38. { return ((a > b) ? b : a); }
  39. constexpr inline int64_t maxi64(int64_t a, int64_t b) noexcept
  40. { return ((a > b) ? a : b); }
  41. constexpr inline int64_t clampi64(int64_t val, int64_t min, int64_t max) noexcept
  42. { return mini64(max, maxi64(min, val)); }
  43. constexpr inline uint64_t minu64(uint64_t a, uint64_t b) noexcept
  44. { return ((a > b) ? b : a); }
  45. constexpr inline uint64_t maxu64(uint64_t a, uint64_t b) noexcept
  46. { return ((a > b) ? a : b); }
  47. constexpr inline uint64_t clampu64(uint64_t val, uint64_t min, uint64_t max) noexcept
  48. { return minu64(max, maxu64(min, val)); }
  49. constexpr inline size_t minz(size_t a, size_t b) noexcept
  50. { return ((a > b) ? b : a); }
  51. constexpr inline size_t maxz(size_t a, size_t b) noexcept
  52. { return ((a > b) ? a : b); }
  53. constexpr inline size_t clampz(size_t val, size_t min, size_t max) noexcept
  54. { return minz(max, maxz(min, val)); }
  55. /** Find the next power-of-2 for non-power-of-2 numbers. */
  56. inline uint32_t NextPowerOf2(uint32_t value) noexcept
  57. {
  58. if(value > 0)
  59. {
  60. value--;
  61. value |= value>>1;
  62. value |= value>>2;
  63. value |= value>>4;
  64. value |= value>>8;
  65. value |= value>>16;
  66. }
  67. return value+1;
  68. }
  69. /** Round up a value to the next multiple. */
  70. inline size_t RoundUp(size_t value, size_t r) noexcept
  71. {
  72. value += r-1;
  73. return value - (value%r);
  74. }
  75. /* Define CTZ macros (count trailing zeros), and POPCNT macros (population
  76. * count/count 1 bits), for 32- and 64-bit integers. The CTZ macros' results
  77. * are *UNDEFINED* if the value is 0.
  78. */
  79. #ifdef __GNUC__
  80. #define POPCNT32 __builtin_popcount
  81. #define CTZ32 __builtin_ctz
  82. #if SIZEOF_LONG == 8
  83. #define POPCNT64 __builtin_popcountl
  84. #define CTZ64 __builtin_ctzl
  85. #else
  86. #define POPCNT64 __builtin_popcountll
  87. #define CTZ64 __builtin_ctzll
  88. #endif
  89. #elif defined(HAVE_BITSCANFORWARD64_INTRINSIC)
  90. inline int msvc64_popcnt32(uint32_t v)
  91. { return (int)__popcnt(v); }
  92. #define POPCNT32 msvc64_popcnt32
  93. inline int msvc64_ctz32(uint32_t v)
  94. {
  95. unsigned long idx = 32;
  96. _BitScanForward(&idx, v);
  97. return (int)idx;
  98. }
  99. #define CTZ32 msvc64_ctz32
  100. inline int msvc64_popcnt64(uint64_t v)
  101. { return (int)__popcnt64(v); }
  102. #define POPCNT64 msvc64_popcnt64
  103. inline int msvc64_ctz64(uint64_t v)
  104. {
  105. unsigned long idx = 64;
  106. _BitScanForward64(&idx, v);
  107. return (int)idx;
  108. }
  109. #define CTZ64 msvc64_ctz64
  110. #elif defined(HAVE_BITSCANFORWARD_INTRINSIC)
  111. inline int msvc_popcnt32(uint32_t v)
  112. { return (int)__popcnt(v); }
  113. #define POPCNT32 msvc_popcnt32
  114. inline int msvc_ctz32(uint32_t v)
  115. {
  116. unsigned long idx = 32;
  117. _BitScanForward(&idx, v);
  118. return (int)idx;
  119. }
  120. #define CTZ32 msvc_ctz32
  121. inline int msvc_popcnt64(uint64_t v)
  122. { return (int)(__popcnt((uint32_t)v) + __popcnt((uint32_t)(v>>32))); }
  123. #define POPCNT64 msvc_popcnt64
  124. inline int msvc_ctz64(uint64_t v)
  125. {
  126. unsigned long idx = 64;
  127. if(!_BitScanForward(&idx, (uint32_t)(v&0xffffffff)))
  128. {
  129. if(_BitScanForward(&idx, (uint32_t)(v>>32)))
  130. idx += 32;
  131. }
  132. return (int)idx;
  133. }
  134. #define CTZ64 msvc_ctz64
  135. #else
  136. /* There be black magics here. The popcnt method is derived from
  137. * https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
  138. * while the ctz-utilizing-popcnt algorithm is shown here
  139. * http://www.hackersdelight.org/hdcodetxt/ntz.c.txt
  140. * as the ntz2 variant. These likely aren't the most efficient methods, but
  141. * they're good enough if the GCC or MSVC intrinsics aren't available.
  142. */
  143. inline int fallback_popcnt32(uint32_t v)
  144. {
  145. v = v - ((v >> 1) & 0x55555555u);
  146. v = (v & 0x33333333u) + ((v >> 2) & 0x33333333u);
  147. v = (v + (v >> 4)) & 0x0f0f0f0fu;
  148. return (int)((v * 0x01010101u) >> 24);
  149. }
  150. #define POPCNT32 fallback_popcnt32
  151. inline int fallback_ctz32(uint32_t value)
  152. { return fallback_popcnt32(~value & (value - 1)); }
  153. #define CTZ32 fallback_ctz32
  154. inline int fallback_popcnt64(uint64_t v)
  155. {
  156. v = v - ((v >> 1) & 0x5555555555555555_u64);
  157. v = (v & 0x3333333333333333_u64) + ((v >> 2) & 0x3333333333333333_u64);
  158. v = (v + (v >> 4)) & 0x0f0f0f0f0f0f0f0f_u64;
  159. return (int)((v * 0x0101010101010101_u64) >> 56);
  160. }
  161. #define POPCNT64 fallback_popcnt64
  162. inline int fallback_ctz64(uint64_t value)
  163. { return fallback_popcnt64(~value & (value - 1)); }
  164. #define CTZ64 fallback_ctz64
  165. #endif
  166. /**
  167. * Fast float-to-int conversion. No particular rounding mode is assumed; the
  168. * IEEE-754 default is round-to-nearest with ties-to-even, though an app could
  169. * change it on its own threads. On some systems, a truncating conversion may
  170. * always be the fastest method.
  171. */
  172. inline int fastf2i(float f) noexcept
  173. {
  174. #if defined(HAVE_SSE_INTRINSICS)
  175. return _mm_cvt_ss2si(_mm_set_ss(f));
  176. #elif defined(_MSC_VER) && defined(_M_IX86_FP)
  177. int i;
  178. __asm fld f
  179. __asm fistp i
  180. return i;
  181. #elif (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__))
  182. int i;
  183. #ifdef __SSE_MATH__
  184. __asm__("cvtss2si %1, %0" : "=r"(i) : "x"(f));
  185. #else
  186. __asm__ __volatile__("fistpl %0" : "=m"(i) : "t"(f) : "st");
  187. #endif
  188. return i;
  189. #else
  190. return static_cast<int>(f);
  191. #endif
  192. }
  193. /** Converts float-to-int using standard behavior (truncation). */
  194. inline int float2int(float f) noexcept
  195. {
  196. #if defined(HAVE_SSE_INTRINSICS)
  197. return _mm_cvtt_ss2si(_mm_set_ss(f));
  198. #elif ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \
  199. !defined(__SSE_MATH__)) || (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP == 0)
  200. int sign, shift, mant;
  201. union {
  202. float f;
  203. int i;
  204. } conv;
  205. conv.f = f;
  206. sign = (conv.i>>31) | 1;
  207. shift = ((conv.i>>23)&0xff) - (127+23);
  208. /* Over/underflow */
  209. if(UNLIKELY(shift >= 31 || shift < -23))
  210. return 0;
  211. mant = (conv.i&0x7fffff) | 0x800000;
  212. if(LIKELY(shift < 0))
  213. return (mant >> -shift) * sign;
  214. return (mant << shift) * sign;
  215. #else
  216. return static_cast<int>(f);
  217. #endif
  218. }
  219. /**
  220. * Rounds a float to the nearest integral value, according to the current
  221. * rounding mode. This is essentially an inlined version of rintf, although
  222. * makes fewer promises (e.g. -0 or -0.25 rounded to 0 may result in +0).
  223. */
  224. inline float fast_roundf(float f) noexcept
  225. {
  226. #if (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \
  227. !defined(__SSE_MATH__)
  228. float out;
  229. __asm__ __volatile__("frndint" : "=t"(out) : "0"(f));
  230. return out;
  231. #else
  232. /* Integral limit, where sub-integral precision is not available for
  233. * floats.
  234. */
  235. static constexpr float ilim[2] = {
  236. 8388608.0f /* 0x1.0p+23 */,
  237. -8388608.0f /* -0x1.0p+23 */
  238. };
  239. unsigned int sign, expo;
  240. union {
  241. float f;
  242. unsigned int i;
  243. } conv;
  244. conv.f = f;
  245. sign = (conv.i>>31)&0x01;
  246. expo = (conv.i>>23)&0xff;
  247. if(UNLIKELY(expo >= 150/*+23*/))
  248. {
  249. /* An exponent (base-2) of 23 or higher is incapable of sub-integral
  250. * precision, so it's already an integral value. We don't need to worry
  251. * about infinity or NaN here.
  252. */
  253. return f;
  254. }
  255. /* Adding the integral limit to the value (with a matching sign) forces a
  256. * result that has no sub-integral precision, and is consequently forced to
  257. * round to an integral value. Removing the integral limit then restores
  258. * the initial value rounded to the integral. The compiler should not
  259. * optimize this out because of non-associative rules on floating-point
  260. * math (as long as you don't use -fassociative-math,
  261. * -funsafe-math-optimizations, -ffast-math, or -Ofast, in which case this
  262. * may break).
  263. */
  264. f += ilim[sign];
  265. return f - ilim[sign];
  266. #endif
  267. }
  268. #endif /* AL_NUMERIC_H */