mum.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. /* Copyright (c) 2016 Vladimir Makarov <vmakarov@gcc.gnu.org>
  2. Permission is hereby granted, free of charge, to any person
  3. obtaining a copy of this software and associated documentation
  4. files (the "Software"), to deal in the Software without
  5. restriction, including without limitation the rights to use, copy,
  6. modify, merge, publish, distribute, sublicense, and/or sell copies
  7. of the Software, and to permit persons to whom the Software is
  8. furnished to do so, subject to the following conditions:
  9. The above copyright notice and this permission notice shall be
  10. included in all copies or substantial portions of the Software.
  11. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  12. EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  13. MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  14. NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  15. BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  16. ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  17. CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  18. SOFTWARE.
  19. */
  20. /* This file implements MUM (MUltiply and Mix) hashing. We randomize
  21. input data by 64x64-bit multiplication and mixing hi- and low-parts
  22. of the multiplication result by using an addition and then mix it
  23. into the current state. We use prime numbers randomly generated
  24. with the equal probability of their bit values for the
  25. multiplication. When all primes are used once, the state is
  26. randomized and the same prime numbers are used again for data
  27. randomization.
  28. The MUM hashing passes all SMHasher tests. Pseudo Random Number
  29. Generator based on MUM also passes NIST Statistical Test Suite for
  30. Random and Pseudorandom Number Generators for Cryptographic
  31. Applications (version 2.2.1) with 1000 bitstreams each containing
  32. 1M bits. MUM hashing is also faster Spooky64 and City64 on small
  33. strings (at least upto 512-bit) on Haswell and Power7. The MUM bulk
  34. speed (speed on very long data) is bigger than Spooky and City on
  35. Power7. On Haswell the bulk speed is bigger than Spooky one and
  36. close to City speed. */
  37. #ifndef __MUM_HASH__
  38. #define __MUM_HASH__
  39. #include <stddef.h>
  40. #include <stdlib.h>
  41. #include <string.h>
  42. #include <limits.h>
  43. #ifdef _MSC_VER
  44. typedef unsigned __int16 uint16_t;
  45. typedef unsigned __int32 uint32_t;
  46. typedef unsigned __int64 uint64_t;
  47. #else
  48. #include <stdint.h>
  49. #endif
  50. #ifdef __GNUC__
  51. #define _MUM_ATTRIBUTE_UNUSED __attribute__((unused))
  52. # ifdef __clang__
  53. # define _MUM_OPTIMIZE(opts)
  54. # else
  55. # define _MUM_OPTIMIZE(opts) __attribute__((__optimize__ (opts)))
  56. # endif
  57. #define _MUM_TARGET(opts) __attribute__((__target__ (opts)))
  58. #else
  59. #define _MUM_ATTRIBUTE_UNUSED
  60. #define _MUM_OPTIMIZE(opts)
  61. #define _MUM_TARGET(opts)
  62. #endif
  63. /* Macro saying to use 128-bit integers implemented by GCC for some
  64. targets. */
  65. #ifndef _MUM_USE_INT128
  66. /* In GCC uint128_t is defined if HOST_BITS_PER_WIDE_INT >= 64.
  67. HOST_WIDE_INT is long if HOST_BITS_PER_LONG > HOST_BITS_PER_INT,
  68. otherwise int. */
  69. #if defined(__GNUC__) && UINT_MAX != ULONG_MAX
  70. #define _MUM_USE_INT128 1
  71. #else
  72. #define _MUM_USE_INT128 0
  73. #endif
  74. #endif
  75. #if defined(__GNUC__) && ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 9) || (__GNUC__ > 4))
  76. #define _MUM_FRESH_GCC
  77. #endif
  78. /* Here are different primes randomly generated with the equal
  79. probability of their bit values. They are used to randomize input
  80. values. */
  81. static uint64_t _mum_hash_step_prime = 0x2e0bb864e9ea7df5ULL;
  82. static uint64_t _mum_key_step_prime = 0xcdb32970830fcaa1ULL;
  83. static uint64_t _mum_block_start_prime = 0xc42b5e2e6480b23bULL;
  84. static uint64_t _mum_unroll_prime = 0x7b51ec3d22f7096fULL;
  85. static uint64_t _mum_tail_prime = 0xaf47d47c99b1461bULL;
  86. static uint64_t _mum_finish_prime1 = 0xa9a7ae7ceff79f3fULL;
  87. static uint64_t _mum_finish_prime2 = 0xaf47d47c99b1461bULL;
  88. static uint64_t _mum_primes [] = {
  89. 0X9ebdcae10d981691, 0X32b9b9b97a27ac7d, 0X29b5584d83d35bbd, 0X4b04e0e61401255f,
  90. 0X25e8f7b1f1c9d027, 0X80d4c8c000f3e881, 0Xbd1255431904b9dd, 0X8a3bd4485eee6d81,
  91. 0X3bc721b2aad05197, 0X71b1a19b907d6e33, 0X525e6c1084a8534b, 0X9e4c2cd340c1299f,
  92. 0Xde3add92e94caa37, 0X7e14eadb1f65311d, 0X3f5aa40f89812853, 0X33b15a3b587d15c9,
  93. };
  94. /* Multiply 64-bit V and P and return sum of high and low parts of the
  95. result. */
  96. static inline uint64_t
  97. _mum (uint64_t v, uint64_t p) {
  98. uint64_t hi, lo;
  99. #if _MUM_USE_INT128
  100. #if defined(__aarch64__)
  101. /* AARCH64 needs 2 insns to calculate 128-bit result of the
  102. multiplication. If we use a generic code we actually call a
  103. function doing 128x128->128 bit multiplication. The function is
  104. very slow. */
  105. lo = v * p, hi;
  106. asm ("umulh %0, %1, %2" : "=r" (hi) : "r" (v), "r" (p));
  107. #else
  108. __uint128_t r = (__uint128_t) v * (__uint128_t) p;
  109. hi = (uint64_t) (r >> 64);
  110. lo = (uint64_t) r;
  111. #endif
  112. #else
  113. /* Implementation of 64x64->128-bit multiplication by four 32x32->64
  114. bit multiplication. */
  115. uint64_t hv = v >> 32, hp = p >> 32;
  116. uint64_t lv = (uint32_t) v, lp = (uint32_t) p;
  117. uint64_t rh = hv * hp;
  118. uint64_t rm_0 = hv * lp;
  119. uint64_t rm_1 = hp * lv;
  120. uint64_t rl = lv * lp;
  121. uint64_t t, carry = 0;
  122. /* We could ignore a carry bit here if we did not care about the
  123. same hash for 32-bit and 64-bit targets. */
  124. t = rl + (rm_0 << 32);
  125. #ifdef MUM_TARGET_INDEPENDENT_HASH
  126. carry = t < rl;
  127. #endif
  128. lo = t + (rm_1 << 32);
  129. #ifdef MUM_TARGET_INDEPENDENT_HASH
  130. carry += lo < t;
  131. #endif
  132. hi = rh + (rm_0 >> 32) + (rm_1 >> 32) + carry;
  133. #endif
  134. /* We could use XOR here too but, for some reasons, on Haswell and
  135. Power7 using an addition improves hashing performance by 10% for
  136. small strings. */
  137. return hi + lo;
  138. }
  139. #if defined(_MSC_VER)
  140. #define _mum_bswap_32(x) _byteswap_uint32_t (x)
  141. #define _mum_bswap_64(x) _byteswap_uint64_t (x)
  142. #elif defined(__APPLE__)
  143. #include <libkern/OSByteOrder.h>
  144. #define _mum_bswap_32(x) OSSwapInt32 (x)
  145. #define _mum_bswap_64(x) OSSwapInt64 (x)
  146. #elif defined(__GNUC__)
  147. #define _mum_bswap32(x) __builtin_bswap32 (x)
  148. #define _mum_bswap64(x) __builtin_bswap64 (x)
  149. #else
  150. #include <byteswap.h>
  151. #define _mum_bswap32(x) bswap32 (x)
  152. #define _mum_bswap64(x) bswap64 (x)
  153. #endif
  154. static inline uint64_t
  155. _mum_le (uint64_t v) {
  156. #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || !defined(MUM_TARGET_INDEPENDENT_HASH)
  157. return v;
  158. #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  159. return _mum_bswap64 (v);
  160. #else
  161. #error "Unknown endianess"
  162. #endif
  163. }
  164. static inline uint32_t
  165. _mum_le32 (uint32_t v) {
  166. #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || !defined(MUM_TARGET_INDEPENDENT_HASH)
  167. return v;
  168. #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  169. return _mum_bswap32 (v);
  170. #else
  171. #error "Unknown endianess"
  172. #endif
  173. }
  174. /* Macro defining how many times the most nested loop in
  175. _mum_hash_aligned will be unrolled by the compiler (although it can
  176. make an own decision:). Use only a constant here to help a
  177. compiler to unroll a major loop.
  178. The macro value affects the result hash for strings > 128 bit. The
  179. unroll factor greatly affects the hashing speed. We prefer the
  180. speed. */
  181. #ifndef _MUM_UNROLL_FACTOR_POWER
  182. #if defined(__PPC64__) && !defined(MUM_TARGET_INDEPENDENT_HASH)
  183. #define _MUM_UNROLL_FACTOR_POWER 3
  184. #elif defined(__aarch64__) && !defined(MUM_TARGET_INDEPENDENT_HASH)
  185. #define _MUM_UNROLL_FACTOR_POWER 4
  186. #else
  187. #define _MUM_UNROLL_FACTOR_POWER 2
  188. #endif
  189. #endif
  190. #if _MUM_UNROLL_FACTOR_POWER < 1
  191. #error "too small unroll factor"
  192. #elif _MUM_UNROLL_FACTOR_POWER > 4
  193. #error "We have not enough primes for such unroll factor"
  194. #endif
  195. #define _MUM_UNROLL_FACTOR (1 << _MUM_UNROLL_FACTOR_POWER)
  196. static inline uint64_t _MUM_OPTIMIZE("unroll-loops")
  197. _mum_hash_aligned (uint64_t start, const void *key, size_t len) {
  198. uint64_t result = start;
  199. const unsigned char *str = (const unsigned char *) key;
  200. uint64_t u64;
  201. size_t i;
  202. size_t n;
  203. result = _mum (result, _mum_block_start_prime);
  204. while (len > _MUM_UNROLL_FACTOR * sizeof (uint64_t)) {
  205. /* This loop could be vectorized when we have vector insns for
  206. 64x64->128-bit multiplication. AVX2 currently only have a
  207. vector insn for 4 32x32->64-bit multiplication. */
  208. for (i = 0; i < _MUM_UNROLL_FACTOR; i++)
  209. result ^= _mum (_mum_le (((uint64_t *) str)[i]), _mum_primes[i]);
  210. len -= _MUM_UNROLL_FACTOR * sizeof (uint64_t);
  211. str += _MUM_UNROLL_FACTOR * sizeof (uint64_t);
  212. /* We will use the same prime numbers on the next iterations --
  213. randomize the state. */
  214. result = _mum (result, _mum_unroll_prime);
  215. }
  216. n = len / sizeof (uint64_t);
  217. for (i = 0; i < n; i++)
  218. result ^= _mum (_mum_le (((uint64_t *) str)[i]), _mum_primes[i]);
  219. len -= n * sizeof (uint64_t); str += n * sizeof (uint64_t);
  220. switch (len) {
  221. case 7:
  222. u64 = _mum_le32 (*(uint32_t *) str);
  223. u64 |= (uint64_t) str[4] << 32;
  224. u64 |= (uint64_t) str[5] << 40;
  225. u64 |= (uint64_t) str[6] << 48;
  226. return result ^ _mum (u64, _mum_tail_prime);
  227. case 6:
  228. u64 = _mum_le32 (*(uint32_t *) str);
  229. u64 |= (uint64_t) str[4] << 32;
  230. u64 |= (uint64_t) str[5] << 40;
  231. return result ^ _mum (u64, _mum_tail_prime);
  232. case 5:
  233. u64 = _mum_le32 (*(uint32_t *) str);
  234. u64 |= (uint64_t) str[4] << 32;
  235. return result ^ _mum (u64, _mum_tail_prime);
  236. case 4:
  237. u64 = _mum_le32 (*(uint32_t *) str);
  238. return result ^ _mum (u64, _mum_tail_prime);
  239. case 3:
  240. u64 = str[0];
  241. u64 |= (uint64_t) str[1] << 8;
  242. u64 |= (uint64_t) str[2] << 16;
  243. return result ^ _mum (u64, _mum_tail_prime);
  244. case 2:
  245. u64 = str[0];
  246. u64 |= (uint64_t) str[1] << 8;
  247. return result ^ _mum (u64, _mum_tail_prime);
  248. case 1:
  249. u64 = str[0];
  250. return result ^ _mum (u64, _mum_tail_prime);
  251. }
  252. return result;
  253. }
  254. /* Final randomization of H. */
  255. static inline uint64_t
  256. _mum_final (uint64_t h) {
  257. h ^= _mum (h, _mum_finish_prime1);
  258. h ^= _mum (h, _mum_finish_prime2);
  259. return h;
  260. }
  261. #if defined(__x86_64__) && defined(_MUM_FRESH_GCC)
  262. /* We want to use AVX2 insn MULX instead of generic x86-64 MULQ where
  263. it is possible. Although on modern Intel processors MULQ takes
  264. 3-cycles vs. 4 for MULX, MULX permits more freedom in insn
  265. scheduling as it uses less fixed registers. */
  266. static inline uint64_t _MUM_TARGET("arch=haswell")
  267. _mum_hash_avx2 (const void * key, size_t len, uint64_t seed) {
  268. return _mum_final (_mum_hash_aligned (seed + len, key, len));
  269. }
  270. #endif
  271. #ifndef _MUM_UNALIGNED_ACCESS
  272. #if defined(__x86_64__) || defined(__i386__) || defined(__PPC64__) \
  273. || defined(__s390__) || defined(__m32c__) || defined(cris) \
  274. || defined(__CR16__) || defined(__vax__) || defined(__m68k__) \
  275. || defined(__aarch64__) || defined(_M_AMD64) || defined(_M_IX86)
  276. #define _MUM_UNALIGNED_ACCESS 1
  277. #else
  278. #define _MUM_UNALIGNED_ACCESS 0
  279. #endif
  280. #endif
  281. /* When we need an aligned access to data being hashed we move part of
  282. the unaligned data to an aligned block of given size and then
  283. process it, repeating processing the data by the block. */
  284. #ifndef _MUM_BLOCK_LEN
  285. #define _MUM_BLOCK_LEN 1024
  286. #endif
  287. #if _MUM_BLOCK_LEN < 8
  288. #error "too small block length"
  289. #endif
  290. static inline uint64_t
  291. #if defined(__x86_64__)
  292. _MUM_TARGET("inline-all-stringops")
  293. #endif
  294. _mum_hash_default (const void *key, size_t len, uint64_t seed) {
  295. uint64_t result;
  296. const unsigned char *str = (const unsigned char *) key;
  297. size_t block_len;
  298. uint64_t buf[_MUM_BLOCK_LEN / sizeof (uint64_t)];
  299. result = seed + len;
  300. if (_MUM_UNALIGNED_ACCESS || ((size_t) str & 0x7) == 0)
  301. result = _mum_hash_aligned (result, key, len);
  302. else {
  303. while (len != 0) {
  304. block_len = len < _MUM_BLOCK_LEN ? len : _MUM_BLOCK_LEN;
  305. memmove (buf, str, block_len);
  306. result = _mum_hash_aligned (result, buf, block_len);
  307. len -= block_len;
  308. str += block_len;
  309. }
  310. }
  311. return _mum_final (result);
  312. }
  313. static inline uint64_t
  314. _mum_next_factor (void) {
  315. uint64_t start = 0;
  316. int i;
  317. for (i = 0; i < 8; i++)
  318. start = (start << 8) | rand() % 256;
  319. return start;
  320. }
  321. /* ++++++++++++++++++++++++++ Interface functions: +++++++++++++++++++ */
  322. /* Set random multiplicators depending on SEED. */
  323. static inline void
  324. mum_hash_randomize (uint64_t seed) {
  325. size_t i;
  326. srand (seed);
  327. _mum_hash_step_prime = _mum_next_factor ();
  328. _mum_key_step_prime = _mum_next_factor ();
  329. _mum_finish_prime1 = _mum_next_factor ();
  330. _mum_finish_prime2 = _mum_next_factor ();
  331. _mum_block_start_prime = _mum_next_factor ();
  332. _mum_unroll_prime = _mum_next_factor ();
  333. _mum_tail_prime = _mum_next_factor ();
  334. for (i = 0; i < sizeof (_mum_primes) / sizeof (uint64_t); i++)
  335. _mum_primes[i] = _mum_next_factor ();
  336. }
  337. /* Start hashing data with SEED. Return the state. */
  338. static inline uint64_t
  339. mum_hash_init (uint64_t seed) {
  340. return seed;
  341. }
  342. /* Process data KEY with the state H and return the updated state. */
  343. static inline uint64_t
  344. mum_hash_step (uint64_t h, uint64_t key)
  345. {
  346. return _mum (h, _mum_hash_step_prime) ^ _mum (key, _mum_key_step_prime);
  347. }
  348. /* Return the result of hashing using the current state H. */
  349. static inline uint64_t
  350. mum_hash_finish (uint64_t h) {
  351. return _mum_final (h);
  352. }
  353. /* Fast hashing of KEY with SEED. The hash is always the same for the
  354. same key on any target. */
  355. static inline size_t
  356. mum_hash64 (uint64_t key, uint64_t seed) {
  357. return mum_hash_finish (mum_hash_step (mum_hash_init (seed), key));
  358. }
  359. /* Hash data KEY of length LEN and SEED. The hash depends on the
  360. target endianess and the unroll factor. */
  361. static inline uint64_t
  362. mum_hash (const void *key, size_t len, uint64_t seed) {
  363. #if defined(__x86_64__) && defined(_MUM_FRESH_GCC)
  364. static int avx2_support = 0;
  365. if (avx2_support > 0)
  366. return _mum_hash_avx2 (key, len, seed);
  367. else if (! avx2_support) {
  368. __builtin_cpu_init ();
  369. avx2_support = __builtin_cpu_supports ("avx2") ? 1 : -1;
  370. if (avx2_support > 0)
  371. return _mum_hash_avx2 (key, len, seed);
  372. }
  373. #endif
  374. return _mum_hash_default (key, len, seed);
  375. }
  376. #endif