crc32_sse42.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. /*
  2. * Derived from crc32c.c version 1.1 by Mark Adler.
  3. *
  4. * Copyright (C) 2013 Mark Adler
  5. *
  6. * This software is provided 'as-is', without any express or implied warranty.
  7. * In no event will the author be held liable for any damages arising from the
  8. * use of this software.
  9. *
  10. * Permission is granted to anyone to use this software for any purpose,
  11. * including commercial applications, and to alter it and redistribute it
  12. * freely, subject to the following restrictions:
  13. *
  14. * 1. The origin of this software must not be misrepresented; you must not
  15. * claim that you wrote the original software. If you use this software
  16. * in a product, an acknowledgment in the product documentation would be
  17. * appreciated but is not required.
  18. * 2. Altered source versions must be plainly marked as such, and must not be
  19. * misrepresented as being the original software.
  20. * 3. This notice may not be removed or altered from any source distribution.
  21. *
  22. * Mark Adler
  23. * madler@alumni.caltech.edu
  24. */
  25. #include <sys/cdefs.h>
  26. __FBSDID("$FreeBSD$");
  27. /*
  28. * This file is compiled in userspace in order to run ATF unit tests.
  29. */
  30. #ifndef _KERNEL
  31. #include <stdint.h>
  32. #include <stdlib.h>
  33. #else
  34. #include <sys/param.h>
  35. #include <sys/kernel.h>
  36. #endif
  37. #include <sys/gsb_crc32.h>
  38. static __inline uint32_t
  39. _mm_crc32_u8(uint32_t x, uint8_t y)
  40. {
  41. /*
  42. * clang (at least 3.9.[0-1]) pessimizes "rm" (y) and "m" (y)
  43. * significantly and "r" (y) a lot by copying y to a different
  44. * local variable (on the stack or in a register), so only use
  45. * the latter. This costs a register and an instruction but
  46. * not a uop.
  47. */
  48. __asm("crc32b %1,%0" : "+r" (x) : "r" (y));
  49. return (x);
  50. }
  51. #ifdef __amd64__
  52. static __inline uint64_t
  53. _mm_crc32_u64(uint64_t x, uint64_t y)
  54. {
  55. __asm("crc32q %1,%0" : "+r" (x) : "r" (y));
  56. return (x);
  57. }
  58. #else
  59. static __inline uint32_t
  60. _mm_crc32_u32(uint32_t x, uint32_t y)
  61. {
  62. __asm("crc32l %1,%0" : "+r" (x) : "r" (y));
  63. return (x);
  64. }
  65. #endif
  66. /* CRC-32C (iSCSI) polynomial in reversed bit order. */
  67. #define POLY 0x82f63b78
  68. /*
  69. * Block sizes for three-way parallel crc computation. LONG and SHORT must
  70. * both be powers of two.
  71. */
  72. #define LONG 128
  73. #define SHORT 64
  74. /*
  75. * Tables for updating a crc for LONG, 2 * LONG, SHORT and 2 * SHORT bytes
  76. * of value 0 later in the input stream, in the same way that the hardware
  77. * would, but in software without calculating intermediate steps.
  78. */
  79. static uint32_t crc32c_long[4][256];
  80. static uint32_t crc32c_2long[4][256];
  81. static uint32_t crc32c_short[4][256];
  82. static uint32_t crc32c_2short[4][256];
  83. /*
  84. * Multiply a matrix times a vector over the Galois field of two elements,
  85. * GF(2). Each element is a bit in an unsigned integer. mat must have at
  86. * least as many entries as the power of two for most significant one bit in
  87. * vec.
  88. */
  89. static inline uint32_t
  90. gf2_matrix_times(uint32_t *mat, uint32_t vec)
  91. {
  92. uint32_t sum;
  93. sum = 0;
  94. while (vec) {
  95. if (vec & 1)
  96. sum ^= *mat;
  97. vec >>= 1;
  98. mat++;
  99. }
  100. return (sum);
  101. }
  102. /*
  103. * Multiply a matrix by itself over GF(2). Both mat and square must have 32
  104. * rows.
  105. */
  106. static inline void
  107. gf2_matrix_square(uint32_t *square, uint32_t *mat)
  108. {
  109. int n;
  110. for (n = 0; n < 32; n++)
  111. square[n] = gf2_matrix_times(mat, mat[n]);
  112. }
  113. /*
  114. * Construct an operator to apply len zeros to a crc. len must be a power of
  115. * two. If len is not a power of two, then the result is the same as for the
  116. * largest power of two less than len. The result for len == 0 is the same as
  117. * for len == 1. A version of this routine could be easily written for any
  118. * len, but that is not needed for this application.
  119. */
  120. static void
  121. crc32c_zeros_op(uint32_t *even, size_t len)
  122. {
  123. uint32_t odd[32]; /* odd-power-of-two zeros operator */
  124. uint32_t row;
  125. int n;
  126. /* put operator for one zero bit in odd */
  127. odd[0] = POLY; /* CRC-32C polynomial */
  128. row = 1;
  129. for (n = 1; n < 32; n++) {
  130. odd[n] = row;
  131. row <<= 1;
  132. }
  133. /* put operator for two zero bits in even */
  134. gf2_matrix_square(even, odd);
  135. /* put operator for four zero bits in odd */
  136. gf2_matrix_square(odd, even);
  137. /*
  138. * first square will put the operator for one zero byte (eight zero
  139. * bits), in even -- next square puts operator for two zero bytes in
  140. * odd, and so on, until len has been rotated down to zero
  141. */
  142. do {
  143. gf2_matrix_square(even, odd);
  144. len >>= 1;
  145. if (len == 0)
  146. return;
  147. gf2_matrix_square(odd, even);
  148. len >>= 1;
  149. } while (len);
  150. /* answer ended up in odd -- copy to even */
  151. for (n = 0; n < 32; n++)
  152. even[n] = odd[n];
  153. }
  154. /*
  155. * Take a length and build four lookup tables for applying the zeros operator
  156. * for that length, byte-by-byte on the operand.
  157. */
  158. static void
  159. crc32c_zeros(uint32_t zeros[][256], size_t len)
  160. {
  161. uint32_t op[32];
  162. uint32_t n;
  163. crc32c_zeros_op(op, len);
  164. for (n = 0; n < 256; n++) {
  165. zeros[0][n] = gf2_matrix_times(op, n);
  166. zeros[1][n] = gf2_matrix_times(op, n << 8);
  167. zeros[2][n] = gf2_matrix_times(op, n << 16);
  168. zeros[3][n] = gf2_matrix_times(op, n << 24);
  169. }
  170. }
  171. /* Apply the zeros operator table to crc. */
  172. static inline uint32_t
  173. crc32c_shift(uint32_t zeros[][256], uint32_t crc)
  174. {
  175. return (zeros[0][crc & 0xff] ^ zeros[1][(crc >> 8) & 0xff] ^
  176. zeros[2][(crc >> 16) & 0xff] ^ zeros[3][crc >> 24]);
  177. }
  178. /* Initialize tables for shifting crcs. */
  179. static void
  180. #ifndef _KERNEL
  181. __attribute__((__constructor__))
  182. #endif
  183. crc32c_init_hw(void)
  184. {
  185. crc32c_zeros(crc32c_long, LONG);
  186. crc32c_zeros(crc32c_2long, 2 * LONG);
  187. crc32c_zeros(crc32c_short, SHORT);
  188. crc32c_zeros(crc32c_2short, 2 * SHORT);
  189. }
  190. #ifdef _KERNEL
  191. SYSINIT(crc32c_sse42, SI_SUB_LOCK, SI_ORDER_ANY, crc32c_init_hw, NULL);
  192. #endif
  193. /* Compute CRC-32C using the Intel hardware instruction. */
  194. uint32_t
  195. sse42_crc32c(uint32_t crc, const unsigned char *buf, unsigned len)
  196. {
  197. #ifdef __amd64__
  198. const size_t align = 8;
  199. #else
  200. const size_t align = 4;
  201. #endif
  202. const unsigned char *next, *end;
  203. #ifdef __amd64__
  204. uint64_t crc0, crc1, crc2;
  205. #else
  206. uint32_t crc0, crc1, crc2;
  207. #endif
  208. next = buf;
  209. crc0 = crc;
  210. /* Compute the crc to bring the data pointer to an aligned boundary. */
  211. while (len && ((uintptr_t)next & (align - 1)) != 0) {
  212. crc0 = _mm_crc32_u8(crc0, *next);
  213. next++;
  214. len--;
  215. }
  216. #if LONG > SHORT
  217. /*
  218. * Compute the crc on sets of LONG*3 bytes, executing three independent
  219. * crc instructions, each on LONG bytes -- this is optimized for the
  220. * Nehalem, Westmere, Sandy Bridge, and Ivy Bridge architectures, which
  221. * have a throughput of one crc per cycle, but a latency of three
  222. * cycles.
  223. */
  224. crc = 0;
  225. while (len >= LONG * 3) {
  226. crc1 = 0;
  227. crc2 = 0;
  228. end = next + LONG;
  229. do {
  230. #ifdef __amd64__
  231. crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
  232. crc1 = _mm_crc32_u64(crc1,
  233. *(const uint64_t *)(next + LONG));
  234. crc2 = _mm_crc32_u64(crc2,
  235. *(const uint64_t *)(next + (LONG * 2)));
  236. #else
  237. crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
  238. crc1 = _mm_crc32_u32(crc1,
  239. *(const uint32_t *)(next + LONG));
  240. crc2 = _mm_crc32_u32(crc2,
  241. *(const uint32_t *)(next + (LONG * 2)));
  242. #endif
  243. next += align;
  244. } while (next < end);
  245. /*-
  246. * Update the crc. Try to do it in parallel with the inner
  247. * loop. 'crc' is used to accumulate crc0 and crc1
  248. * produced by the inner loop so that the next iteration
  249. * of the loop doesn't depend on anything except crc2.
  250. *
  251. * The full expression for the update is:
  252. * crc = S*S*S*crc + S*S*crc0 + S*crc1
  253. * where the terms are polynomials modulo the CRC polynomial.
  254. * We regroup this subtly as:
  255. * crc = S*S * (S*crc + crc0) + S*crc1.
  256. * This has an extra dependency which reduces possible
  257. * parallelism for the expression, but it turns out to be
  258. * best to intentionally delay evaluation of this expression
  259. * so that it competes less with the inner loop.
  260. *
  261. * We also intentionally reduce parallelism by feedng back
  262. * crc2 to the inner loop as crc0 instead of accumulating
  263. * it in crc. This synchronizes the loop with crc update.
  264. * CPU and/or compiler schedulers produced bad order without
  265. * this.
  266. *
  267. * Shifts take about 12 cycles each, so 3 here with 2
  268. * parallelizable take about 24 cycles and the crc update
  269. * takes slightly longer. 8 dependent crc32 instructions
  270. * can run in 24 cycles, so the 3-way blocking is worse
  271. * than useless for sizes less than 8 * <word size> = 64
  272. * on amd64. In practice, SHORT = 32 confirms these
  273. * timing calculations by giving a small improvement
  274. * starting at size 96. Then the inner loop takes about
  275. * 12 cycles and the crc update about 24, but these are
  276. * partly in parallel so the total time is less than the
  277. * 36 cycles that 12 dependent crc32 instructions would
  278. * take.
  279. *
  280. * To have a chance of completely hiding the overhead for
  281. * the crc update, the inner loop must take considerably
  282. * longer than 24 cycles. LONG = 64 makes the inner loop
  283. * take about 24 cycles, so is not quite large enough.
  284. * LONG = 128 works OK. Unhideable overheads are about
  285. * 12 cycles per inner loop. All assuming timing like
  286. * Haswell.
  287. */
  288. crc = crc32c_shift(crc32c_long, crc) ^ crc0;
  289. crc1 = crc32c_shift(crc32c_long, crc1);
  290. crc = crc32c_shift(crc32c_2long, crc) ^ crc1;
  291. crc0 = crc2;
  292. next += LONG * 2;
  293. len -= LONG * 3;
  294. }
  295. crc0 ^= crc;
  296. #endif /* LONG > SHORT */
  297. /*
  298. * Do the same thing, but now on SHORT*3 blocks for the remaining data
  299. * less than a LONG*3 block
  300. */
  301. crc = 0;
  302. while (len >= SHORT * 3) {
  303. crc1 = 0;
  304. crc2 = 0;
  305. end = next + SHORT;
  306. do {
  307. #ifdef __amd64__
  308. crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
  309. crc1 = _mm_crc32_u64(crc1,
  310. *(const uint64_t *)(next + SHORT));
  311. crc2 = _mm_crc32_u64(crc2,
  312. *(const uint64_t *)(next + (SHORT * 2)));
  313. #else
  314. crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
  315. crc1 = _mm_crc32_u32(crc1,
  316. *(const uint32_t *)(next + SHORT));
  317. crc2 = _mm_crc32_u32(crc2,
  318. *(const uint32_t *)(next + (SHORT * 2)));
  319. #endif
  320. next += align;
  321. } while (next < end);
  322. crc = crc32c_shift(crc32c_short, crc) ^ crc0;
  323. crc1 = crc32c_shift(crc32c_short, crc1);
  324. crc = crc32c_shift(crc32c_2short, crc) ^ crc1;
  325. crc0 = crc2;
  326. next += SHORT * 2;
  327. len -= SHORT * 3;
  328. }
  329. crc0 ^= crc;
  330. /* Compute the crc on the remaining bytes at native word size. */
  331. end = next + (len - (len & (align - 1)));
  332. while (next < end) {
  333. #ifdef __amd64__
  334. crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
  335. #else
  336. crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
  337. #endif
  338. next += align;
  339. }
  340. len &= (align - 1);
  341. /* Compute the crc for any trailing bytes. */
  342. while (len) {
  343. crc0 = _mm_crc32_u8(crc0, *next);
  344. next++;
  345. len--;
  346. }
  347. return ((uint32_t)crc0);
  348. }