sha512-ssse3-amd64.S 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. /*
  2. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  3. ; Copyright (c) 2012, Intel Corporation
  4. ;
  5. ; All rights reserved.
  6. ;
  7. ; Redistribution and use in source and binary forms, with or without
  8. ; modification, are permitted provided that the following conditions are
  9. ; met:
  10. ;
  11. ; * Redistributions of source code must retain the above copyright
  12. ; notice, this list of conditions and the following disclaimer.
  13. ;
  14. ; * Redistributions in binary form must reproduce the above copyright
  15. ; notice, this list of conditions and the following disclaimer in the
  16. ; documentation and/or other materials provided with the
  17. ; distribution.
  18. ;
  19. ; * Neither the name of the Intel Corporation nor the names of its
  20. ; contributors may be used to endorse or promote products derived from
  21. ; this software without specific prior written permission.
  22. ;
  23. ;
  24. ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY
  25. ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
  28. ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  29. ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  30. ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  31. ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  32. ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  33. ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  34. ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  36. */
  37. /*
  38. * Conversion to GAS assembly and integration to libgcrypt
  39. * by Jussi Kivilinna <jussi.kivilinna@iki.fi>
  40. *
  41. * Note: original implementation was named as SHA512-SSE4. However, only SSSE3
  42. * is required.
  43. */
  44. #ifdef __x86_64
  45. #include <config.h>
  46. #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
  47. defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \
  48. defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \
  49. defined(HAVE_GCC_INLINE_ASM_SSSE3) && defined(USE_SHA512)
  50. #include "asm-common-amd64.h"
  51. .intel_syntax noprefix
  52. .text
  53. /* Virtual Registers */
  54. #define msg rdi /* ARG1 */
  55. #define digest rsi /* ARG2 */
  56. #define msglen rdx /* ARG3 */
  57. #define T1 rcx
  58. #define T2 r8
  59. #define a_64 r9
  60. #define b_64 r10
  61. #define c_64 r11
  62. #define d_64 r12
  63. #define e_64 r13
  64. #define f_64 r14
  65. #define g_64 r15
  66. #define h_64 rbx
  67. #define tmp0 rax
  68. /*
  69. ; Local variables (stack frame)
  70. ; Note: frame_size must be an odd multiple of 8 bytes to XMM align RSP
  71. */
  72. #define frame_W 0 /* Message Schedule */
  73. #define frame_W_size (80 * 8)
  74. #define frame_WK ((frame_W) + (frame_W_size)) /* W[t] + K[t] | W[t+1] + K[t+1] */
  75. #define frame_WK_size (2 * 8)
  76. #define frame_GPRSAVE ((frame_WK) + (frame_WK_size))
  77. #define frame_GPRSAVE_size (5 * 8)
  78. #define frame_size ((frame_GPRSAVE) + (frame_GPRSAVE_size))
  79. /* Useful QWORD "arrays" for simpler memory references */
  80. #define MSG(i) msg + 8*(i) /* Input message (arg1) */
  81. #define DIGEST(i) digest + 8*(i) /* Output Digest (arg2) */
  82. #define K_t(i) .LK512 + 8*(i) ADD_RIP /* SHA Constants (static mem) */
  83. #define W_t(i) rsp + frame_W + 8*(i) /* Message Schedule (stack frame) */
  84. #define WK_2(i) rsp + frame_WK + 8*((i) % 2) /* W[t]+K[t] (stack frame) */
  85. /* MSG, DIGEST, K_t, W_t are arrays */
  86. /* WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even */
  87. #define SHA512_Round(t, a, b, c, d, e, f, g, h) \
  88. /* Compute Round %%t */; \
  89. mov T1, f /* T1 = f */; \
  90. mov tmp0, e /* tmp = e */; \
  91. xor T1, g /* T1 = f ^ g */; \
  92. ror tmp0, 23 /* 41 ; tmp = e ror 23 */; \
  93. and T1, e /* T1 = (f ^ g) & e */; \
  94. xor tmp0, e /* tmp = (e ror 23) ^ e */; \
  95. xor T1, g /* T1 = ((f ^ g) & e) ^ g = CH(e,f,g) */; \
  96. add T1, [WK_2(t)] /* W[t] + K[t] from message scheduler */; \
  97. ror tmp0, 4 /* 18 ; tmp = ((e ror 23) ^ e) ror 4 */; \
  98. xor tmp0, e /* tmp = (((e ror 23) ^ e) ror 4) ^ e */; \
  99. mov T2, a /* T2 = a */; \
  100. add T1, h /* T1 = CH(e,f,g) + W[t] + K[t] + h */; \
  101. ror tmp0, 14 /* 14 ; tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) */; \
  102. add T1, tmp0 /* T1 = CH(e,f,g) + W[t] + K[t] + S1(e) */; \
  103. mov tmp0, a /* tmp = a */; \
  104. xor T2, c /* T2 = a ^ c */; \
  105. and tmp0, c /* tmp = a & c */; \
  106. and T2, b /* T2 = (a ^ c) & b */; \
  107. xor T2, tmp0 /* T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) */; \
  108. mov tmp0, a /* tmp = a */; \
  109. ror tmp0, 5 /* 39 ; tmp = a ror 5 */; \
  110. xor tmp0, a /* tmp = (a ror 5) ^ a */; \
  111. add d, T1 /* e(next_state) = d + T1 */; \
  112. ror tmp0, 6 /* 34 ; tmp = ((a ror 5) ^ a) ror 6 */; \
  113. xor tmp0, a /* tmp = (((a ror 5) ^ a) ror 6) ^ a */; \
  114. lea h, [T1 + T2] /* a(next_state) = T1 + Maj(a,b,c) */; \
  115. ror tmp0, 28 /* 28 ; tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) */; \
  116. add h, tmp0 /* a(next_state) = T1 + Maj(a,b,c) S0(a) */
  117. #define SHA512_2Sched_2Round_sse_PART1(t, a, b, c, d, e, f, g, h) \
  118. /* \
  119. ; Compute rounds %%t-2 and %%t-1 \
  120. ; Compute message schedule QWORDS %%t and %%t+1 \
  121. ; \
  122. ; Two rounds are computed based on the values for K[t-2]+W[t-2] and \
  123. ; K[t-1]+W[t-1] which were previously stored at WK_2 by the message \
  124. ; scheduler. \
  125. ; The two new schedule QWORDS are stored at [W_t(%%t)] and [W_t(%%t+1)]. \
  126. ; They are then added to their respective SHA512 constants at \
  127. ; [K_t(%%t)] and [K_t(%%t+1)] and stored at dqword [WK_2(%%t)] \
  128. ; For brievity, the comments following vectored instructions only refer to \
  129. ; the first of a pair of QWORDS. \
  130. ; Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]} \
  131. ; The computation of the message schedule and the rounds are tightly \
  132. ; stitched to take advantage of instruction-level parallelism. \
  133. ; For clarity, integer instructions (for the rounds calculation) are indented \
  134. ; by one tab. Vectored instructions (for the message scheduler) are indented \
  135. ; by two tabs. \
  136. */ \
  137. \
  138. mov T1, f; \
  139. movdqa xmm2, [W_t(t-2)] /* XMM2 = W[t-2] */; \
  140. xor T1, g; \
  141. and T1, e; \
  142. movdqa xmm0, xmm2 /* XMM0 = W[t-2] */; \
  143. xor T1, g; \
  144. add T1, [WK_2(t)]; \
  145. movdqu xmm5, [W_t(t-15)] /* XMM5 = W[t-15] */; \
  146. mov tmp0, e; \
  147. ror tmp0, 23 /* 41 */; \
  148. movdqa xmm3, xmm5 /* XMM3 = W[t-15] */; \
  149. xor tmp0, e; \
  150. ror tmp0, 4 /* 18 */; \
  151. psrlq xmm0, 61 - 19 /* XMM0 = W[t-2] >> 42 */; \
  152. xor tmp0, e; \
  153. ror tmp0, 14 /* 14 */; \
  154. psrlq xmm3, (8 - 7) /* XMM3 = W[t-15] >> 1 */; \
  155. add T1, tmp0; \
  156. add T1, h; \
  157. pxor xmm0, xmm2 /* XMM0 = (W[t-2] >> 42) ^ W[t-2] */; \
  158. mov T2, a; \
  159. xor T2, c; \
  160. pxor xmm3, xmm5 /* XMM3 = (W[t-15] >> 1) ^ W[t-15] */; \
  161. and T2, b; \
  162. mov tmp0, a; \
  163. psrlq xmm0, 19 - 6 /* XMM0 = ((W[t-2]>>42)^W[t-2])>>13 */; \
  164. and tmp0, c; \
  165. xor T2, tmp0; \
  166. psrlq xmm3, (7 - 1) /* XMM3 = ((W[t-15]>>1)^W[t-15])>>6 */; \
  167. mov tmp0, a; \
  168. ror tmp0, 5 /* 39 */; \
  169. pxor xmm0, xmm2 /* XMM0 = (((W[t-2]>>42)^W[t-2])>>13)^W[t-2] */; \
  170. xor tmp0, a; \
  171. ror tmp0, 6 /* 34 */; \
  172. pxor xmm3, xmm5 /* XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15] */; \
  173. xor tmp0, a; \
  174. ror tmp0, 28 /* 28 */; \
  175. psrlq xmm0, 6 /* XMM0 = ((((W[t-2]>>42)^W[t-2])>>13)^W[t-2])>>6 */; \
  176. add T2, tmp0; \
  177. add d, T1; \
  178. psrlq xmm3, 1 /* XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15]>>1 */; \
  179. lea h, [T1 + T2]
  180. #define SHA512_2Sched_2Round_sse_PART2(t, a, b, c, d, e, f, g, h) \
  181. movdqa xmm1, xmm2 /* XMM1 = W[t-2] */; \
  182. mov T1, f; \
  183. xor T1, g; \
  184. movdqa xmm4, xmm5 /* XMM4 = W[t-15] */; \
  185. and T1, e; \
  186. xor T1, g; \
  187. psllq xmm1, (64 - 19) - (64 - 61) /* XMM1 = W[t-2] << 42 */; \
  188. add T1, [WK_2(t+1)]; \
  189. mov tmp0, e; \
  190. psllq xmm4, (64 - 1) - (64 - 8) /* XMM4 = W[t-15] << 7 */; \
  191. ror tmp0, 23 /* 41 */; \
  192. xor tmp0, e; \
  193. pxor xmm1, xmm2 /* XMM1 = (W[t-2] << 42)^W[t-2] */; \
  194. ror tmp0, 4 /* 18 */; \
  195. xor tmp0, e; \
  196. pxor xmm4, xmm5 /* XMM4 = (W[t-15]<<7)^W[t-15] */; \
  197. ror tmp0, 14 /* 14 */; \
  198. add T1, tmp0; \
  199. psllq xmm1, (64 - 61) /* XMM1 = ((W[t-2] << 42)^W[t-2])<<3 */; \
  200. add T1, h; \
  201. mov T2, a; \
  202. psllq xmm4, (64 - 8) /* XMM4 = ((W[t-15]<<7)^W[t-15])<<56 */; \
  203. xor T2, c; \
  204. and T2, b; \
  205. pxor xmm0, xmm1 /* XMM0 = s1(W[t-2]) */; \
  206. mov tmp0, a; \
  207. and tmp0, c; \
  208. movdqu xmm1, [W_t(t- 7)] /* XMM1 = W[t-7] */; \
  209. xor T2, tmp0; \
  210. pxor xmm3, xmm4 /* XMM3 = s0(W[t-15]) */; \
  211. mov tmp0, a; \
  212. paddq xmm0, xmm3 /* XMM0 = s1(W[t-2]) + s0(W[t-15]) */; \
  213. ror tmp0, 5 /* 39 */; \
  214. paddq xmm0, [W_t(t-16)] /* XMM0 = s1(W[t-2]) + s0(W[t-15]) + W[t-16] */; \
  215. xor tmp0, a; \
  216. paddq xmm0, xmm1 /* XMM0 = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] */; \
  217. ror tmp0, 6 /* 34 */; \
  218. movdqa [W_t(t)], xmm0 /* Store scheduled qwords */; \
  219. xor tmp0, a; \
  220. paddq xmm0, [K_t(t)] /* Compute W[t]+K[t] */; \
  221. ror tmp0, 28 /* 28 */; \
  222. movdqa [WK_2(t)], xmm0 /* Store W[t]+K[t] for next rounds */; \
  223. add T2, tmp0; \
  224. add d, T1; \
  225. lea h, [T1 + T2]
  226. #define SHA512_2Sched_2Round_sse(t, a, b, c, d, e, f, g, h) \
  227. SHA512_2Sched_2Round_sse_PART1(t, a, b, c, d, e, f, g, h); \
  228. SHA512_2Sched_2Round_sse_PART2(t, h, a, b, c, d, e, f, g)
  229. /*
  230. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  231. ; void sha512_sse4(const void* M, void* D, uint64_t L);
  232. ; Purpose: Updates the SHA512 digest stored at D with the message stored in M.
  233. ; The size of the message pointed to by M must be an integer multiple of SHA512
  234. ; message blocks.
  235. ; L is the message length in SHA512 blocks.
  236. */
  237. .globl _gcry_sha512_transform_amd64_ssse3
  238. ELF(.type _gcry_sha512_transform_amd64_ssse3,@function;)
  239. .align 16
  240. _gcry_sha512_transform_amd64_ssse3:
  241. CFI_STARTPROC()
  242. xor eax, eax
  243. cmp msglen, 0
  244. je .Lnowork
  245. /* Allocate Stack Space */
  246. sub rsp, frame_size
  247. CFI_ADJUST_CFA_OFFSET(frame_size);
  248. /* Save GPRs */
  249. mov [rsp + frame_GPRSAVE + 8 * 0], rbx
  250. mov [rsp + frame_GPRSAVE + 8 * 1], r12
  251. mov [rsp + frame_GPRSAVE + 8 * 2], r13
  252. mov [rsp + frame_GPRSAVE + 8 * 3], r14
  253. mov [rsp + frame_GPRSAVE + 8 * 4], r15
  254. CFI_REL_OFFSET(rbx, frame_GPRSAVE + 8 * 0);
  255. CFI_REL_OFFSET(r12, frame_GPRSAVE + 8 * 1);
  256. CFI_REL_OFFSET(r13, frame_GPRSAVE + 8 * 2);
  257. CFI_REL_OFFSET(r14, frame_GPRSAVE + 8 * 3);
  258. CFI_REL_OFFSET(r15, frame_GPRSAVE + 8 * 4);
  259. .Lupdateblock:
  260. /* Load state variables */
  261. mov a_64, [DIGEST(0)]
  262. mov b_64, [DIGEST(1)]
  263. mov c_64, [DIGEST(2)]
  264. mov d_64, [DIGEST(3)]
  265. mov e_64, [DIGEST(4)]
  266. mov f_64, [DIGEST(5)]
  267. mov g_64, [DIGEST(6)]
  268. mov h_64, [DIGEST(7)]
  269. /* BSWAP 2 QWORDS */
  270. movdqa xmm1, [.LXMM_QWORD_BSWAP ADD_RIP]
  271. movdqu xmm0, [MSG(0)]
  272. pshufb xmm0, xmm1 /* BSWAP */
  273. movdqa [W_t(0)], xmm0 /* Store Scheduled Pair */
  274. paddq xmm0, [K_t(0)] /* Compute W[t]+K[t] */
  275. movdqa [WK_2(0)], xmm0 /* Store into WK for rounds */
  276. #define T_2_14(t, a, b, c, d, e, f, g, h) \
  277. /* BSWAP 2 QWORDS; Compute 2 Rounds */; \
  278. movdqu xmm0, [MSG(t)]; \
  279. pshufb xmm0, xmm1 /* BSWAP */; \
  280. SHA512_Round(((t) - 2), a##_64, b##_64, c##_64, d##_64, \
  281. e##_64, f##_64, g##_64, h##_64); \
  282. movdqa [W_t(t)], xmm0 /* Store Scheduled Pair */; \
  283. paddq xmm0, [K_t(t)] /* Compute W[t]+K[t] */; \
  284. SHA512_Round(((t) - 1), h##_64, a##_64, b##_64, c##_64, \
  285. d##_64, e##_64, f##_64, g##_64); \
  286. movdqa [WK_2(t)], xmm0 /* Store W[t]+K[t] into WK */
  287. #define T_16_78(t, a, b, c, d, e, f, g, h) \
  288. SHA512_2Sched_2Round_sse((t), a##_64, b##_64, c##_64, d##_64, \
  289. e##_64, f##_64, g##_64, h##_64)
  290. #define T_80(t, a, b, c, d, e, f, g, h) \
  291. /* Compute 2 Rounds */; \
  292. SHA512_Round((t - 2), a##_64, b##_64, c##_64, d##_64, \
  293. e##_64, f##_64, g##_64, h##_64); \
  294. SHA512_Round((t - 1), h##_64, a##_64, b##_64, c##_64, \
  295. d##_64, e##_64, f##_64, g##_64)
  296. T_2_14(2, a, b, c, d, e, f, g, h)
  297. T_2_14(4, g, h, a, b, c, d, e, f)
  298. T_2_14(6, e, f, g, h, a, b, c, d)
  299. T_2_14(8, c, d, e, f, g, h, a, b)
  300. T_2_14(10, a, b, c, d, e, f, g, h)
  301. T_2_14(12, g, h, a, b, c, d, e, f)
  302. T_2_14(14, e, f, g, h, a, b, c, d)
  303. T_16_78(16, c, d, e, f, g, h, a, b)
  304. T_16_78(18, a, b, c, d, e, f, g, h)
  305. T_16_78(20, g, h, a, b, c, d, e, f)
  306. T_16_78(22, e, f, g, h, a, b, c, d)
  307. T_16_78(24, c, d, e, f, g, h, a, b)
  308. T_16_78(26, a, b, c, d, e, f, g, h)
  309. T_16_78(28, g, h, a, b, c, d, e, f)
  310. T_16_78(30, e, f, g, h, a, b, c, d)
  311. T_16_78(32, c, d, e, f, g, h, a, b)
  312. T_16_78(34, a, b, c, d, e, f, g, h)
  313. T_16_78(36, g, h, a, b, c, d, e, f)
  314. T_16_78(38, e, f, g, h, a, b, c, d)
  315. T_16_78(40, c, d, e, f, g, h, a, b)
  316. T_16_78(42, a, b, c, d, e, f, g, h)
  317. T_16_78(44, g, h, a, b, c, d, e, f)
  318. T_16_78(46, e, f, g, h, a, b, c, d)
  319. T_16_78(48, c, d, e, f, g, h, a, b)
  320. T_16_78(50, a, b, c, d, e, f, g, h)
  321. T_16_78(52, g, h, a, b, c, d, e, f)
  322. T_16_78(54, e, f, g, h, a, b, c, d)
  323. T_16_78(56, c, d, e, f, g, h, a, b)
  324. T_16_78(58, a, b, c, d, e, f, g, h)
  325. T_16_78(60, g, h, a, b, c, d, e, f)
  326. T_16_78(62, e, f, g, h, a, b, c, d)
  327. T_16_78(64, c, d, e, f, g, h, a, b)
  328. T_16_78(66, a, b, c, d, e, f, g, h)
  329. T_16_78(68, g, h, a, b, c, d, e, f)
  330. T_16_78(70, e, f, g, h, a, b, c, d)
  331. T_16_78(72, c, d, e, f, g, h, a, b)
  332. T_16_78(74, a, b, c, d, e, f, g, h)
  333. T_16_78(76, g, h, a, b, c, d, e, f)
  334. T_16_78(78, e, f, g, h, a, b, c, d)
  335. T_80(80, c, d, e, f, g, h, a, b)
  336. /* Update digest */
  337. add [DIGEST(0)], a_64
  338. add [DIGEST(1)], b_64
  339. add [DIGEST(2)], c_64
  340. add [DIGEST(3)], d_64
  341. add [DIGEST(4)], e_64
  342. add [DIGEST(5)], f_64
  343. add [DIGEST(6)], g_64
  344. add [DIGEST(7)], h_64
  345. /* Advance to next message block */
  346. add msg, 16*8
  347. dec msglen
  348. jnz .Lupdateblock
  349. /* Restore GPRs */
  350. mov rbx, [rsp + frame_GPRSAVE + 8 * 0]
  351. mov r12, [rsp + frame_GPRSAVE + 8 * 1]
  352. mov r13, [rsp + frame_GPRSAVE + 8 * 2]
  353. mov r14, [rsp + frame_GPRSAVE + 8 * 3]
  354. mov r15, [rsp + frame_GPRSAVE + 8 * 4]
  355. CFI_RESTORE(rbx)
  356. CFI_RESTORE(r12)
  357. CFI_RESTORE(r13)
  358. CFI_RESTORE(r14)
  359. CFI_RESTORE(r15)
  360. pxor xmm0, xmm0
  361. pxor xmm1, xmm1
  362. pxor xmm2, xmm2
  363. pxor xmm3, xmm3
  364. pxor xmm4, xmm4
  365. pxor xmm5, xmm5
  366. /* Burn stack */
  367. mov eax, 0
  368. .Lerase_stack:
  369. movdqu [rsp + rax], xmm0
  370. add eax, 16
  371. cmp eax, frame_W_size
  372. jne .Lerase_stack
  373. movdqu [rsp + frame_WK], xmm0
  374. xor eax, eax
  375. /* Restore Stack Pointer */
  376. add rsp, frame_size
  377. CFI_ADJUST_CFA_OFFSET(-frame_size);
  378. .Lnowork:
  379. ret_spec_stop
  380. CFI_ENDPROC()
  381. /*
  382. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  383. ;;; Binary Data
  384. */
  385. SECTION_RODATA
  386. ELF(.type _sha512_ssse3_consts,@object)
  387. _sha512_ssse3_consts:
  388. .align 16
  389. /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */
  390. .LXMM_QWORD_BSWAP:
  391. .octa 0x08090a0b0c0d0e0f0001020304050607
  392. /* K[t] used in SHA512 hashing */
  393. .LK512:
  394. .quad 0x428a2f98d728ae22,0x7137449123ef65cd
  395. .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
  396. .quad 0x3956c25bf348b538,0x59f111f1b605d019
  397. .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
  398. .quad 0xd807aa98a3030242,0x12835b0145706fbe
  399. .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
  400. .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
  401. .quad 0x9bdc06a725c71235,0xc19bf174cf692694
  402. .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
  403. .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
  404. .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
  405. .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
  406. .quad 0x983e5152ee66dfab,0xa831c66d2db43210
  407. .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
  408. .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
  409. .quad 0x06ca6351e003826f,0x142929670a0e6e70
  410. .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
  411. .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
  412. .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
  413. .quad 0x81c2c92e47edaee6,0x92722c851482353b
  414. .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
  415. .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
  416. .quad 0xd192e819d6ef5218,0xd69906245565a910
  417. .quad 0xf40e35855771202a,0x106aa07032bbd1b8
  418. .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
  419. .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
  420. .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
  421. .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
  422. .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
  423. .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
  424. .quad 0x90befffa23631e28,0xa4506cebde82bde9
  425. .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
  426. .quad 0xca273eceea26619c,0xd186b8c721c0c207
  427. .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
  428. .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
  429. .quad 0x113f9804bef90dae,0x1b710b35131c471b
  430. .quad 0x28db77f523047d84,0x32caab7b40c72493
  431. .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
  432. .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
  433. .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
  434. #endif
  435. #endif