crc32-ce-core.S 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /*
  2. * Accelerated CRC32(C) using arm64 CRC, NEON and Crypto Extensions instructions
  3. *
  4. * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. /* GPL HEADER START
  11. *
  12. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 only,
  16. * as published by the Free Software Foundation.
  17. *
  18. * This program is distributed in the hope that it will be useful, but
  19. * WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * General Public License version 2 for more details (a copy is included
  22. * in the LICENSE file that accompanied this code).
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * version 2 along with this program; If not, see http://www.gnu.org/licenses
  26. *
  27. * Please visit http://www.xyratex.com/contact if you need additional
  28. * information or have any questions.
  29. *
  30. * GPL HEADER END
  31. */
  32. /*
  33. * Copyright 2012 Xyratex Technology Limited
  34. *
  35. * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
  36. * calculation.
  37. * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
  38. * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
  39. * at:
  40. * http://www.intel.com/products/processor/manuals/
  41. * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
  42. * Volume 2B: Instruction Set Reference, N-Z
  43. *
  44. * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
  45. * Alexander Boyko <Alexander_Boyko@xyratex.com>
  46. */
  47. #include <linux/linkage.h>
  48. #include <asm/assembler.h>
  49. .text
  50. .align 6
  51. .cpu generic+crypto+crc
  52. .Lcrc32_constants:
  53. /*
  54. * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
  55. * #define CONSTANT_R1 0x154442bd4LL
  56. *
  57. * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
  58. * #define CONSTANT_R2 0x1c6e41596LL
  59. */
  60. .octa 0x00000001c6e415960000000154442bd4
  61. /*
  62. * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
  63. * #define CONSTANT_R3 0x1751997d0LL
  64. *
  65. * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
  66. * #define CONSTANT_R4 0x0ccaa009eLL
  67. */
  68. .octa 0x00000000ccaa009e00000001751997d0
  69. /*
  70. * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
  71. * #define CONSTANT_R5 0x163cd6124LL
  72. */
  73. .quad 0x0000000163cd6124
  74. .quad 0x00000000FFFFFFFF
  75. /*
  76. * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
  77. *
  78. * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
  79. * = 0x1F7011641LL
  80. * #define CONSTANT_RU 0x1F7011641LL
  81. */
  82. .octa 0x00000001F701164100000001DB710641
  83. .Lcrc32c_constants:
  84. .octa 0x000000009e4addf800000000740eef02
  85. .octa 0x000000014cd00bd600000000f20c0dfe
  86. .quad 0x00000000dd45aab8
  87. .quad 0x00000000FFFFFFFF
  88. .octa 0x00000000dea713f10000000105ec76f0
  89. vCONSTANT .req v0
  90. dCONSTANT .req d0
  91. qCONSTANT .req q0
  92. BUF .req x0
  93. LEN .req x1
  94. CRC .req x2
  95. vzr .req v9
  96. /**
  97. * Calculate crc32
  98. * BUF - buffer
  99. * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
  100. * CRC - initial crc32
  101. * return %eax crc32
  102. * uint crc32_pmull_le(unsigned char const *buffer,
  103. * size_t len, uint crc32)
  104. */
  105. ENTRY(crc32_pmull_le)
  106. adr x3, .Lcrc32_constants
  107. b 0f
  108. ENTRY(crc32c_pmull_le)
  109. adr x3, .Lcrc32c_constants
  110. 0: bic LEN, LEN, #15
  111. ld1 {v1.16b-v4.16b}, [BUF], #0x40
  112. movi vzr.16b, #0
  113. fmov dCONSTANT, CRC
  114. eor v1.16b, v1.16b, vCONSTANT.16b
  115. sub LEN, LEN, #0x40
  116. cmp LEN, #0x40
  117. b.lt less_64
  118. ldr qCONSTANT, [x3]
  119. loop_64: /* 64 bytes Full cache line folding */
  120. sub LEN, LEN, #0x40
  121. pmull2 v5.1q, v1.2d, vCONSTANT.2d
  122. pmull2 v6.1q, v2.2d, vCONSTANT.2d
  123. pmull2 v7.1q, v3.2d, vCONSTANT.2d
  124. pmull2 v8.1q, v4.2d, vCONSTANT.2d
  125. pmull v1.1q, v1.1d, vCONSTANT.1d
  126. pmull v2.1q, v2.1d, vCONSTANT.1d
  127. pmull v3.1q, v3.1d, vCONSTANT.1d
  128. pmull v4.1q, v4.1d, vCONSTANT.1d
  129. eor v1.16b, v1.16b, v5.16b
  130. ld1 {v5.16b}, [BUF], #0x10
  131. eor v2.16b, v2.16b, v6.16b
  132. ld1 {v6.16b}, [BUF], #0x10
  133. eor v3.16b, v3.16b, v7.16b
  134. ld1 {v7.16b}, [BUF], #0x10
  135. eor v4.16b, v4.16b, v8.16b
  136. ld1 {v8.16b}, [BUF], #0x10
  137. eor v1.16b, v1.16b, v5.16b
  138. eor v2.16b, v2.16b, v6.16b
  139. eor v3.16b, v3.16b, v7.16b
  140. eor v4.16b, v4.16b, v8.16b
  141. cmp LEN, #0x40
  142. b.ge loop_64
  143. less_64: /* Folding cache line into 128bit */
  144. ldr qCONSTANT, [x3, #16]
  145. pmull2 v5.1q, v1.2d, vCONSTANT.2d
  146. pmull v1.1q, v1.1d, vCONSTANT.1d
  147. eor v1.16b, v1.16b, v5.16b
  148. eor v1.16b, v1.16b, v2.16b
  149. pmull2 v5.1q, v1.2d, vCONSTANT.2d
  150. pmull v1.1q, v1.1d, vCONSTANT.1d
  151. eor v1.16b, v1.16b, v5.16b
  152. eor v1.16b, v1.16b, v3.16b
  153. pmull2 v5.1q, v1.2d, vCONSTANT.2d
  154. pmull v1.1q, v1.1d, vCONSTANT.1d
  155. eor v1.16b, v1.16b, v5.16b
  156. eor v1.16b, v1.16b, v4.16b
  157. cbz LEN, fold_64
  158. loop_16: /* Folding rest buffer into 128bit */
  159. subs LEN, LEN, #0x10
  160. ld1 {v2.16b}, [BUF], #0x10
  161. pmull2 v5.1q, v1.2d, vCONSTANT.2d
  162. pmull v1.1q, v1.1d, vCONSTANT.1d
  163. eor v1.16b, v1.16b, v5.16b
  164. eor v1.16b, v1.16b, v2.16b
  165. b.ne loop_16
  166. fold_64:
  167. /* perform the last 64 bit fold, also adds 32 zeroes
  168. * to the input stream */
  169. ext v2.16b, v1.16b, v1.16b, #8
  170. pmull2 v2.1q, v2.2d, vCONSTANT.2d
  171. ext v1.16b, v1.16b, vzr.16b, #8
  172. eor v1.16b, v1.16b, v2.16b
  173. /* final 32-bit fold */
  174. ldr dCONSTANT, [x3, #32]
  175. ldr d3, [x3, #40]
  176. ext v2.16b, v1.16b, vzr.16b, #4
  177. and v1.16b, v1.16b, v3.16b
  178. pmull v1.1q, v1.1d, vCONSTANT.1d
  179. eor v1.16b, v1.16b, v2.16b
  180. /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
  181. ldr qCONSTANT, [x3, #48]
  182. and v2.16b, v1.16b, v3.16b
  183. ext v2.16b, vzr.16b, v2.16b, #8
  184. pmull2 v2.1q, v2.2d, vCONSTANT.2d
  185. and v2.16b, v2.16b, v3.16b
  186. pmull v2.1q, v2.1d, vCONSTANT.1d
  187. eor v1.16b, v1.16b, v2.16b
  188. mov w0, v1.s[1]
  189. ret
  190. ENDPROC(crc32_pmull_le)
  191. ENDPROC(crc32c_pmull_le)
  192. .macro __crc32, c
  193. 0: subs x2, x2, #16
  194. b.mi 8f
  195. ldp x3, x4, [x1], #16
  196. CPU_BE( rev x3, x3 )
  197. CPU_BE( rev x4, x4 )
  198. crc32\c\()x w0, w0, x3
  199. crc32\c\()x w0, w0, x4
  200. b.ne 0b
  201. ret
  202. 8: tbz x2, #3, 4f
  203. ldr x3, [x1], #8
  204. CPU_BE( rev x3, x3 )
  205. crc32\c\()x w0, w0, x3
  206. 4: tbz x2, #2, 2f
  207. ldr w3, [x1], #4
  208. CPU_BE( rev w3, w3 )
  209. crc32\c\()w w0, w0, w3
  210. 2: tbz x2, #1, 1f
  211. ldrh w3, [x1], #2
  212. CPU_BE( rev16 w3, w3 )
  213. crc32\c\()h w0, w0, w3
  214. 1: tbz x2, #0, 0f
  215. ldrb w3, [x1]
  216. crc32\c\()b w0, w0, w3
  217. 0: ret
  218. .endm
  219. .align 5
  220. ENTRY(crc32_armv8_le)
  221. __crc32
  222. ENDPROC(crc32_armv8_le)
  223. .align 5
  224. ENTRY(crc32c_armv8_le)
  225. __crc32 c
  226. ENDPROC(crc32c_armv8_le)