crc32-ce-core.S 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288
  1. /*
  2. * Accelerated CRC32(C) using arm64 CRC, NEON and Crypto Extensions instructions
  3. *
  4. * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. /* GPL HEADER START
  11. *
  12. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 only,
  16. * as published by the Free Software Foundation.
  17. *
  18. * This program is distributed in the hope that it will be useful, but
  19. * WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * General Public License version 2 for more details (a copy is included
  22. * in the LICENSE file that accompanied this code).
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * version 2 along with this program; If not, see http://www.gnu.org/licenses
  26. *
  27. * Please visit http://www.xyratex.com/contact if you need additional
  28. * information or have any questions.
  29. *
  30. * GPL HEADER END
  31. */
  32. /*
  33. * Copyright 2012 Xyratex Technology Limited
  34. *
  35. * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
  36. * calculation.
  37. * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
  38. * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
  39. * at:
  40. * http://www.intel.com/products/processor/manuals/
  41. * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
  42. * Volume 2B: Instruction Set Reference, N-Z
  43. *
  44. * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
  45. * Alexander Boyko <Alexander_Boyko@xyratex.com>
  46. */
  47. #include <linux/linkage.h>
  48. #include <asm/assembler.h>
  49. .section ".rodata", "a"
  50. .align 6
  51. .cpu generic+crypto+crc
  52. .Lcrc32_constants:
  53. /*
  54. * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
  55. * #define CONSTANT_R1 0x154442bd4LL
  56. *
  57. * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
  58. * #define CONSTANT_R2 0x1c6e41596LL
  59. */
  60. .octa 0x00000001c6e415960000000154442bd4
  61. /*
  62. * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
  63. * #define CONSTANT_R3 0x1751997d0LL
  64. *
  65. * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
  66. * #define CONSTANT_R4 0x0ccaa009eLL
  67. */
  68. .octa 0x00000000ccaa009e00000001751997d0
  69. /*
  70. * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
  71. * #define CONSTANT_R5 0x163cd6124LL
  72. */
  73. .quad 0x0000000163cd6124
  74. .quad 0x00000000FFFFFFFF
  75. /*
  76. * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
  77. *
  78. * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
  79. * = 0x1F7011641LL
  80. * #define CONSTANT_RU 0x1F7011641LL
  81. */
  82. .octa 0x00000001F701164100000001DB710641
  83. .Lcrc32c_constants:
  84. .octa 0x000000009e4addf800000000740eef02
  85. .octa 0x000000014cd00bd600000000f20c0dfe
  86. .quad 0x00000000dd45aab8
  87. .quad 0x00000000FFFFFFFF
  88. .octa 0x00000000dea713f10000000105ec76f0
  89. vCONSTANT .req v0
  90. dCONSTANT .req d0
  91. qCONSTANT .req q0
  92. BUF .req x19
  93. LEN .req x20
  94. CRC .req x21
  95. CONST .req x22
  96. vzr .req v9
  97. /**
  98. * Calculate crc32
  99. * BUF - buffer
  100. * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
  101. * CRC - initial crc32
  102. * return %eax crc32
  103. * uint crc32_pmull_le(unsigned char const *buffer,
  104. * size_t len, uint crc32)
  105. */
  106. .text
  107. ENTRY(crc32_pmull_le)
  108. adr_l x3, .Lcrc32_constants
  109. b 0f
  110. ENTRY(crc32c_pmull_le)
  111. adr_l x3, .Lcrc32c_constants
  112. 0: frame_push 4, 64
  113. mov BUF, x0
  114. mov LEN, x1
  115. mov CRC, x2
  116. mov CONST, x3
  117. bic LEN, LEN, #15
  118. ld1 {v1.16b-v4.16b}, [BUF], #0x40
  119. movi vzr.16b, #0
  120. fmov dCONSTANT, CRC
  121. eor v1.16b, v1.16b, vCONSTANT.16b
  122. sub LEN, LEN, #0x40
  123. cmp LEN, #0x40
  124. b.lt less_64
  125. ldr qCONSTANT, [CONST]
  126. loop_64: /* 64 bytes Full cache line folding */
  127. sub LEN, LEN, #0x40
  128. pmull2 v5.1q, v1.2d, vCONSTANT.2d
  129. pmull2 v6.1q, v2.2d, vCONSTANT.2d
  130. pmull2 v7.1q, v3.2d, vCONSTANT.2d
  131. pmull2 v8.1q, v4.2d, vCONSTANT.2d
  132. pmull v1.1q, v1.1d, vCONSTANT.1d
  133. pmull v2.1q, v2.1d, vCONSTANT.1d
  134. pmull v3.1q, v3.1d, vCONSTANT.1d
  135. pmull v4.1q, v4.1d, vCONSTANT.1d
  136. eor v1.16b, v1.16b, v5.16b
  137. ld1 {v5.16b}, [BUF], #0x10
  138. eor v2.16b, v2.16b, v6.16b
  139. ld1 {v6.16b}, [BUF], #0x10
  140. eor v3.16b, v3.16b, v7.16b
  141. ld1 {v7.16b}, [BUF], #0x10
  142. eor v4.16b, v4.16b, v8.16b
  143. ld1 {v8.16b}, [BUF], #0x10
  144. eor v1.16b, v1.16b, v5.16b
  145. eor v2.16b, v2.16b, v6.16b
  146. eor v3.16b, v3.16b, v7.16b
  147. eor v4.16b, v4.16b, v8.16b
  148. cmp LEN, #0x40
  149. b.lt less_64
  150. if_will_cond_yield_neon
  151. stp q1, q2, [sp, #.Lframe_local_offset]
  152. stp q3, q4, [sp, #.Lframe_local_offset + 32]
  153. do_cond_yield_neon
  154. ldp q1, q2, [sp, #.Lframe_local_offset]
  155. ldp q3, q4, [sp, #.Lframe_local_offset + 32]
  156. ldr qCONSTANT, [CONST]
  157. movi vzr.16b, #0
  158. endif_yield_neon
  159. b loop_64
  160. less_64: /* Folding cache line into 128bit */
  161. ldr qCONSTANT, [CONST, #16]
  162. pmull2 v5.1q, v1.2d, vCONSTANT.2d
  163. pmull v1.1q, v1.1d, vCONSTANT.1d
  164. eor v1.16b, v1.16b, v5.16b
  165. eor v1.16b, v1.16b, v2.16b
  166. pmull2 v5.1q, v1.2d, vCONSTANT.2d
  167. pmull v1.1q, v1.1d, vCONSTANT.1d
  168. eor v1.16b, v1.16b, v5.16b
  169. eor v1.16b, v1.16b, v3.16b
  170. pmull2 v5.1q, v1.2d, vCONSTANT.2d
  171. pmull v1.1q, v1.1d, vCONSTANT.1d
  172. eor v1.16b, v1.16b, v5.16b
  173. eor v1.16b, v1.16b, v4.16b
  174. cbz LEN, fold_64
  175. loop_16: /* Folding rest buffer into 128bit */
  176. subs LEN, LEN, #0x10
  177. ld1 {v2.16b}, [BUF], #0x10
  178. pmull2 v5.1q, v1.2d, vCONSTANT.2d
  179. pmull v1.1q, v1.1d, vCONSTANT.1d
  180. eor v1.16b, v1.16b, v5.16b
  181. eor v1.16b, v1.16b, v2.16b
  182. b.ne loop_16
  183. fold_64:
  184. /* perform the last 64 bit fold, also adds 32 zeroes
  185. * to the input stream */
  186. ext v2.16b, v1.16b, v1.16b, #8
  187. pmull2 v2.1q, v2.2d, vCONSTANT.2d
  188. ext v1.16b, v1.16b, vzr.16b, #8
  189. eor v1.16b, v1.16b, v2.16b
  190. /* final 32-bit fold */
  191. ldr dCONSTANT, [CONST, #32]
  192. ldr d3, [CONST, #40]
  193. ext v2.16b, v1.16b, vzr.16b, #4
  194. and v1.16b, v1.16b, v3.16b
  195. pmull v1.1q, v1.1d, vCONSTANT.1d
  196. eor v1.16b, v1.16b, v2.16b
  197. /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
  198. ldr qCONSTANT, [CONST, #48]
  199. and v2.16b, v1.16b, v3.16b
  200. ext v2.16b, vzr.16b, v2.16b, #8
  201. pmull2 v2.1q, v2.2d, vCONSTANT.2d
  202. and v2.16b, v2.16b, v3.16b
  203. pmull v2.1q, v2.1d, vCONSTANT.1d
  204. eor v1.16b, v1.16b, v2.16b
  205. mov w0, v1.s[1]
  206. frame_pop
  207. ret
  208. ENDPROC(crc32_pmull_le)
  209. ENDPROC(crc32c_pmull_le)
  210. .macro __crc32, c
  211. 0: subs x2, x2, #16
  212. b.mi 8f
  213. ldp x3, x4, [x1], #16
  214. CPU_BE( rev x3, x3 )
  215. CPU_BE( rev x4, x4 )
  216. crc32\c\()x w0, w0, x3
  217. crc32\c\()x w0, w0, x4
  218. b.ne 0b
  219. ret
  220. 8: tbz x2, #3, 4f
  221. ldr x3, [x1], #8
  222. CPU_BE( rev x3, x3 )
  223. crc32\c\()x w0, w0, x3
  224. 4: tbz x2, #2, 2f
  225. ldr w3, [x1], #4
  226. CPU_BE( rev w3, w3 )
  227. crc32\c\()w w0, w0, w3
  228. 2: tbz x2, #1, 1f
  229. ldrh w3, [x1], #2
  230. CPU_BE( rev16 w3, w3 )
  231. crc32\c\()h w0, w0, w3
  232. 1: tbz x2, #0, 0f
  233. ldrb w3, [x1]
  234. crc32\c\()b w0, w0, w3
  235. 0: ret
  236. .endm
  237. .align 5
  238. ENTRY(crc32_armv8_le)
  239. __crc32
  240. ENDPROC(crc32_armv8_le)
  241. .align 5
  242. ENTRY(crc32c_armv8_le)
  243. __crc32 c
  244. ENDPROC(crc32c_armv8_le)