sm3-avx-bmi2-amd64.S 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. /* sm3-avx-bmi2-amd64.S - Intel AVX/BMI2 accelerated SM3 transform function
  2. * Copyright (C) 2021 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  3. *
  4. * This file is part of Libgcrypt.
  5. *
  6. * Libgcrypt is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as
  8. * published by the Free Software Foundation; either version 2.1 of
  9. * the License, or (at your option) any later version.
  10. *
  11. * Libgcrypt is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #ifdef __x86_64__
  20. #include <config.h>
  21. #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
  22. defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \
  23. defined(HAVE_GCC_INLINE_ASM_AVX) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \
  24. defined(USE_SM3)
  25. #include "asm-common-amd64.h"
  26. /* Context structure */
  27. #define state_h0 0
  28. #define state_h1 4
  29. #define state_h2 8
  30. #define state_h3 12
  31. #define state_h4 16
  32. #define state_h5 20
  33. #define state_h6 24
  34. #define state_h7 28
  35. /* Constants */
  36. SECTION_RODATA
  37. .align 16
  38. ELF(.type _gcry_sm3_avx2_consts,@object)
  39. _gcry_sm3_avx2_consts:
  40. .Lbe32mask:
  41. .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f
  42. ELF(.size _gcry_sm3_avx2_consts,.-_gcry_sm3_avx2_consts)
  43. /* Round constant macros */
  44. #define K0 2043430169 /* 0x79cc4519 */
  45. #define K1 -208106958 /* 0xf3988a32 */
  46. #define K2 -416213915 /* 0xe7311465 */
  47. #define K3 -832427829 /* 0xce6228cb */
  48. #define K4 -1664855657 /* 0x9cc45197 */
  49. #define K5 965255983 /* 0x3988a32f */
  50. #define K6 1930511966 /* 0x7311465e */
  51. #define K7 -433943364 /* 0xe6228cbc */
  52. #define K8 -867886727 /* 0xcc451979 */
  53. #define K9 -1735773453 /* 0x988a32f3 */
  54. #define K10 823420391 /* 0x311465e7 */
  55. #define K11 1646840782 /* 0x6228cbce */
  56. #define K12 -1001285732 /* 0xc451979c */
  57. #define K13 -2002571463 /* 0x88a32f39 */
  58. #define K14 289824371 /* 0x11465e73 */
  59. #define K15 579648742 /* 0x228cbce6 */
  60. #define K16 -1651869049 /* 0x9d8a7a87 */
  61. #define K17 991229199 /* 0x3b14f50f */
  62. #define K18 1982458398 /* 0x7629ea1e */
  63. #define K19 -330050500 /* 0xec53d43c */
  64. #define K20 -660100999 /* 0xd8a7a879 */
  65. #define K21 -1320201997 /* 0xb14f50f3 */
  66. #define K22 1654563303 /* 0x629ea1e7 */
  67. #define K23 -985840690 /* 0xc53d43ce */
  68. #define K24 -1971681379 /* 0x8a7a879d */
  69. #define K25 351604539 /* 0x14f50f3b */
  70. #define K26 703209078 /* 0x29ea1e76 */
  71. #define K27 1406418156 /* 0x53d43cec */
  72. #define K28 -1482130984 /* 0xa7a879d8 */
  73. #define K29 1330705329 /* 0x4f50f3b1 */
  74. #define K30 -1633556638 /* 0x9ea1e762 */
  75. #define K31 1027854021 /* 0x3d43cec5 */
  76. #define K32 2055708042 /* 0x7a879d8a */
  77. #define K33 -183551212 /* 0xf50f3b14 */
  78. #define K34 -367102423 /* 0xea1e7629 */
  79. #define K35 -734204845 /* 0xd43cec53 */
  80. #define K36 -1468409689 /* 0xa879d8a7 */
  81. #define K37 1358147919 /* 0x50f3b14f */
  82. #define K38 -1578671458 /* 0xa1e7629e */
  83. #define K39 1137624381 /* 0x43cec53d */
  84. #define K40 -2019718534 /* 0x879d8a7a */
  85. #define K41 255530229 /* 0x0f3b14f5 */
  86. #define K42 511060458 /* 0x1e7629ea */
  87. #define K43 1022120916 /* 0x3cec53d4 */
  88. #define K44 2044241832 /* 0x79d8a7a8 */
  89. #define K45 -206483632 /* 0xf3b14f50 */
  90. #define K46 -412967263 /* 0xe7629ea1 */
  91. #define K47 -825934525 /* 0xcec53d43 */
  92. #define K48 -1651869049 /* 0x9d8a7a87 */
  93. #define K49 991229199 /* 0x3b14f50f */
  94. #define K50 1982458398 /* 0x7629ea1e */
  95. #define K51 -330050500 /* 0xec53d43c */
  96. #define K52 -660100999 /* 0xd8a7a879 */
  97. #define K53 -1320201997 /* 0xb14f50f3 */
  98. #define K54 1654563303 /* 0x629ea1e7 */
  99. #define K55 -985840690 /* 0xc53d43ce */
  100. #define K56 -1971681379 /* 0x8a7a879d */
  101. #define K57 351604539 /* 0x14f50f3b */
  102. #define K58 703209078 /* 0x29ea1e76 */
  103. #define K59 1406418156 /* 0x53d43cec */
  104. #define K60 -1482130984 /* 0xa7a879d8 */
  105. #define K61 1330705329 /* 0x4f50f3b1 */
  106. #define K62 -1633556638 /* 0x9ea1e762 */
  107. #define K63 1027854021 /* 0x3d43cec5 */
  108. /* Register macros */
  109. #define RSTATE %rdi
  110. #define RDATA %rsi
  111. #define RNBLKS %rdx
  112. #define t0 %eax
  113. #define t1 %ebx
  114. #define t2 %ecx
  115. #define a %r8d
  116. #define b %r9d
  117. #define c %r10d
  118. #define d %r11d
  119. #define e %r12d
  120. #define f %r13d
  121. #define g %r14d
  122. #define h %r15d
  123. #define W0 %xmm0
  124. #define W1 %xmm1
  125. #define W2 %xmm2
  126. #define W3 %xmm3
  127. #define W4 %xmm4
  128. #define W5 %xmm5
  129. #define XTMP0 %xmm6
  130. #define XTMP1 %xmm7
  131. #define XTMP2 %xmm8
  132. #define XTMP3 %xmm9
  133. #define XTMP4 %xmm10
  134. #define XTMP5 %xmm11
  135. #define XTMP6 %xmm12
  136. #define BSWAP_REG %xmm15
  137. /* Stack structure */
  138. #define STACK_W_SIZE (32 * 2 * 3)
  139. #define STACK_REG_SAVE_SIZE (64)
  140. #define STACK_W (0)
  141. #define STACK_REG_SAVE (STACK_W + STACK_W_SIZE)
  142. #define STACK_SIZE (STACK_REG_SAVE + STACK_REG_SAVE_SIZE)
  143. /* Instruction helpers. */
  144. #define roll2(v, reg) \
  145. roll $(v), reg;
  146. #define roll3mov(v, src, dst) \
  147. movl src, dst; \
  148. roll $(v), dst;
  149. #define roll3(v, src, dst) \
  150. rorxl $(32-(v)), src, dst;
  151. #define addl2(a, out) \
  152. leal (a, out), out;
  153. /* Round function macros. */
  154. #define GG1(x, y, z, o, t) \
  155. movl x, o; \
  156. xorl y, o; \
  157. xorl z, o;
  158. #define FF1(x, y, z, o, t) GG1(x, y, z, o, t)
  159. #define GG2(x, y, z, o, t) \
  160. andnl z, x, o; \
  161. movl y, t; \
  162. andl x, t; \
  163. addl2(t, o);
  164. #define FF2(x, y, z, o, t) \
  165. movl y, o; \
  166. xorl x, o; \
  167. movl y, t; \
  168. andl x, t; \
  169. andl z, o; \
  170. xorl t, o;
  171. #define R(i, a, b, c, d, e, f, g, h, round, widx, wtype) \
  172. /* rol(a, 12) => t0 */ \
  173. roll3mov(12, a, t0); /* rorxl here would reduce perf by 6% on zen3 */ \
  174. /* rol (t0 + e + t), 7) => t1 */ \
  175. leal K##round(t0, e, 1), t1; \
  176. roll2(7, t1); \
  177. /* h + w1 => h */ \
  178. addl wtype##_W1_ADDR(round, widx), h; \
  179. /* h + t1 => h */ \
  180. addl2(t1, h); \
  181. /* t1 ^ t0 => t0 */ \
  182. xorl t1, t0; \
  183. /* w1w2 + d => d */ \
  184. addl wtype##_W1W2_ADDR(round, widx), d; \
  185. /* FF##i(a,b,c) => t1 */ \
  186. FF##i(a, b, c, t1, t2); \
  187. /* d + t1 => d */ \
  188. addl2(t1, d); \
  189. /* GG#i(e,f,g) => t2 */ \
  190. GG##i(e, f, g, t2, t1); \
  191. /* h + t2 => h */ \
  192. addl2(t2, h); \
  193. /* rol (f, 19) => f */ \
  194. roll2(19, f); \
  195. /* d + t0 => d */ \
  196. addl2(t0, d); \
  197. /* rol (b, 9) => b */ \
  198. roll2(9, b); \
  199. /* P0(h) => h */ \
  200. roll3(9, h, t2); \
  201. roll3(17, h, t1); \
  202. xorl t2, h; \
  203. xorl t1, h;
  204. #define R1(a, b, c, d, e, f, g, h, round, widx, wtype) \
  205. R(1, a, b, c, d, e, f, g, h, round, widx, wtype)
  206. #define R2(a, b, c, d, e, f, g, h, round, widx, wtype) \
  207. R(2, a, b, c, d, e, f, g, h, round, widx, wtype)
  208. /* Input expansion macros. */
  209. /* Byte-swapped input address. */
  210. #define IW_W_ADDR(round, widx, offs) \
  211. (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4))(%rsp)
  212. /* Expanded input address. */
  213. #define XW_W_ADDR(round, widx, offs) \
  214. (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4))(%rsp)
  215. /* Rounds 1-12, byte-swapped input block addresses. */
  216. #define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 0)
  217. #define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 32)
  218. /* Rounds 1-12, expanded input block addresses. */
  219. #define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0)
  220. #define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 32)
  221. /* Input block loading. */
  222. #define LOAD_W_XMM_1() \
  223. vmovdqu 0*16(RDATA), XTMP0; /* XTMP0: w3, w2, w1, w0 */ \
  224. vmovdqu 1*16(RDATA), XTMP1; /* XTMP1: w7, w6, w5, w4 */ \
  225. vmovdqu 2*16(RDATA), XTMP2; /* XTMP2: w11, w10, w9, w8 */ \
  226. vmovdqu 3*16(RDATA), XTMP3; /* XTMP3: w15, w14, w13, w12 */\
  227. vpshufb BSWAP_REG, XTMP0, XTMP0; \
  228. vpshufb BSWAP_REG, XTMP1, XTMP1; \
  229. vpshufb BSWAP_REG, XTMP2, XTMP2; \
  230. vpshufb BSWAP_REG, XTMP3, XTMP3; \
  231. vpxor XTMP0, XTMP1, XTMP4; \
  232. vpxor XTMP1, XTMP2, XTMP5; \
  233. vpxor XTMP2, XTMP3, XTMP6; \
  234. leaq 64(RDATA), RDATA; \
  235. vmovdqa XTMP0, IW_W1_ADDR(0, 0); \
  236. vmovdqa XTMP4, IW_W1W2_ADDR(0, 0); \
  237. vmovdqa XTMP1, IW_W1_ADDR(4, 0); \
  238. vmovdqa XTMP5, IW_W1W2_ADDR(4, 0);
  239. #define LOAD_W_XMM_2() \
  240. vmovdqa XTMP2, IW_W1_ADDR(8, 0); \
  241. vmovdqa XTMP6, IW_W1W2_ADDR(8, 0);
  242. #define LOAD_W_XMM_3() \
  243. vpshufd $0b00000000, XTMP0, W0; /* W0: xx, w0, xx, xx */ \
  244. vpshufd $0b11111001, XTMP0, W1; /* W1: xx, w3, w2, w1 */ \
  245. vmovdqa XTMP1, W2; /* W2: xx, w6, w5, w4 */ \
  246. vpalignr $12, XTMP1, XTMP2, W3; /* W3: xx, w9, w8, w7 */ \
  247. vpalignr $8, XTMP2, XTMP3, W4; /* W4: xx, w12, w11, w10 */ \
  248. vpshufd $0b11111001, XTMP3, W5; /* W5: xx, w15, w14, w13 */
  249. /* Message scheduling. Note: 3 words per XMM register. */
  250. #define SCHED_W_0(round, w0, w1, w2, w3, w4, w5) \
  251. /* Load (w[i - 16]) => XTMP0 */ \
  252. vpshufd $0b10111111, w0, XTMP0; \
  253. vpalignr $12, XTMP0, w1, XTMP0; /* XTMP0: xx, w2, w1, w0 */ \
  254. /* Load (w[i - 13]) => XTMP1 */ \
  255. vpshufd $0b10111111, w1, XTMP1; \
  256. vpalignr $12, XTMP1, w2, XTMP1; \
  257. /* w[i - 9] == w3 */ \
  258. /* XMM3 ^ XTMP0 => XTMP0 */ \
  259. vpxor w3, XTMP0, XTMP0;
  260. #define SCHED_W_1(round, w0, w1, w2, w3, w4, w5) \
  261. /* w[i - 3] == w5 */ \
  262. /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */ \
  263. vpslld $15, w5, XTMP2; \
  264. vpsrld $(32-15), w5, XTMP3; \
  265. vpxor XTMP2, XTMP3, XTMP3; \
  266. vpxor XTMP3, XTMP0, XTMP0; \
  267. /* rol(XTMP1, 7) => XTMP1 */ \
  268. vpslld $7, XTMP1, XTMP5; \
  269. vpsrld $(32-7), XTMP1, XTMP1; \
  270. vpxor XTMP5, XTMP1, XTMP1; \
  271. /* XMM4 ^ XTMP1 => XTMP1 */ \
  272. vpxor w4, XTMP1, XTMP1; \
  273. /* w[i - 6] == XMM4 */ \
  274. /* P1(XTMP0) ^ XTMP1 => XMM0 */ \
  275. vpslld $15, XTMP0, XTMP5; \
  276. vpsrld $(32-15), XTMP0, XTMP6; \
  277. vpslld $23, XTMP0, XTMP2; \
  278. vpsrld $(32-23), XTMP0, XTMP3; \
  279. vpxor XTMP0, XTMP1, XTMP1; \
  280. vpxor XTMP6, XTMP5, XTMP5; \
  281. vpxor XTMP3, XTMP2, XTMP2; \
  282. vpxor XTMP2, XTMP5, XTMP5; \
  283. vpxor XTMP5, XTMP1, w0;
  284. #define SCHED_W_2(round, w0, w1, w2, w3, w4, w5) \
  285. /* W1 in XMM12 */ \
  286. vpshufd $0b10111111, w4, XTMP4; \
  287. vpalignr $12, XTMP4, w5, XTMP4; \
  288. vmovdqa XTMP4, XW_W1_ADDR((round), 0); \
  289. /* W1 ^ W2 => XTMP1 */ \
  290. vpxor w0, XTMP4, XTMP1; \
  291. vmovdqa XTMP1, XW_W1W2_ADDR((round), 0);
  292. .text
  293. /*
  294. * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA.
  295. *
  296. * unsigned int
  297. * _gcry_sm3_transform_amd64_avx_bmi2 (void *ctx, const unsigned char *data,
  298. * size_t nblks)
  299. */
  300. .globl _gcry_sm3_transform_amd64_avx_bmi2
  301. ELF(.type _gcry_sm3_transform_amd64_avx_bmi2,@function)
  302. .align 16
  303. _gcry_sm3_transform_amd64_avx_bmi2:
  304. /* input:
  305. * %rdi: ctx, CTX
  306. * %rsi: data (64*nblks bytes)
  307. * %rdx: nblks
  308. */
  309. CFI_STARTPROC();
  310. vzeroupper;
  311. pushq %rbp;
  312. CFI_PUSH(%rbp);
  313. movq %rsp, %rbp;
  314. CFI_DEF_CFA_REGISTER(%rbp);
  315. movq %rdx, RNBLKS;
  316. subq $STACK_SIZE, %rsp;
  317. andq $(~63), %rsp;
  318. movq %rbx, (STACK_REG_SAVE + 0 * 8)(%rsp);
  319. CFI_REL_OFFSET(%rbx, STACK_REG_SAVE + 0 * 8);
  320. movq %r15, (STACK_REG_SAVE + 1 * 8)(%rsp);
  321. CFI_REL_OFFSET(%r15, STACK_REG_SAVE + 1 * 8);
  322. movq %r14, (STACK_REG_SAVE + 2 * 8)(%rsp);
  323. CFI_REL_OFFSET(%r14, STACK_REG_SAVE + 2 * 8);
  324. movq %r13, (STACK_REG_SAVE + 3 * 8)(%rsp);
  325. CFI_REL_OFFSET(%r13, STACK_REG_SAVE + 3 * 8);
  326. movq %r12, (STACK_REG_SAVE + 4 * 8)(%rsp);
  327. CFI_REL_OFFSET(%r12, STACK_REG_SAVE + 4 * 8);
  328. vmovdqa .Lbe32mask rRIP, BSWAP_REG;
  329. /* Get the values of the chaining variables. */
  330. movl state_h0(RSTATE), a;
  331. movl state_h1(RSTATE), b;
  332. movl state_h2(RSTATE), c;
  333. movl state_h3(RSTATE), d;
  334. movl state_h4(RSTATE), e;
  335. movl state_h5(RSTATE), f;
  336. movl state_h6(RSTATE), g;
  337. movl state_h7(RSTATE), h;
  338. .align 16
  339. .Loop:
  340. /* Load data part1. */
  341. LOAD_W_XMM_1();
  342. leaq -1(RNBLKS), RNBLKS;
  343. /* Transform 0-3 + Load data part2. */
  344. R1(a, b, c, d, e, f, g, h, 0, 0, IW); LOAD_W_XMM_2();
  345. R1(d, a, b, c, h, e, f, g, 1, 1, IW);
  346. R1(c, d, a, b, g, h, e, f, 2, 2, IW);
  347. R1(b, c, d, a, f, g, h, e, 3, 3, IW); LOAD_W_XMM_3();
  348. /* Transform 4-7 + Precalc 12-14. */
  349. R1(a, b, c, d, e, f, g, h, 4, 0, IW);
  350. R1(d, a, b, c, h, e, f, g, 5, 1, IW);
  351. R1(c, d, a, b, g, h, e, f, 6, 2, IW); SCHED_W_0(12, W0, W1, W2, W3, W4, W5);
  352. R1(b, c, d, a, f, g, h, e, 7, 3, IW); SCHED_W_1(12, W0, W1, W2, W3, W4, W5);
  353. /* Transform 8-11 + Precalc 12-17. */
  354. R1(a, b, c, d, e, f, g, h, 8, 0, IW); SCHED_W_2(12, W0, W1, W2, W3, W4, W5);
  355. R1(d, a, b, c, h, e, f, g, 9, 1, IW); SCHED_W_0(15, W1, W2, W3, W4, W5, W0);
  356. R1(c, d, a, b, g, h, e, f, 10, 2, IW); SCHED_W_1(15, W1, W2, W3, W4, W5, W0);
  357. R1(b, c, d, a, f, g, h, e, 11, 3, IW); SCHED_W_2(15, W1, W2, W3, W4, W5, W0);
  358. /* Transform 12-14 + Precalc 18-20 */
  359. R1(a, b, c, d, e, f, g, h, 12, 0, XW); SCHED_W_0(18, W2, W3, W4, W5, W0, W1);
  360. R1(d, a, b, c, h, e, f, g, 13, 1, XW); SCHED_W_1(18, W2, W3, W4, W5, W0, W1);
  361. R1(c, d, a, b, g, h, e, f, 14, 2, XW); SCHED_W_2(18, W2, W3, W4, W5, W0, W1);
  362. /* Transform 15-17 + Precalc 21-23 */
  363. R1(b, c, d, a, f, g, h, e, 15, 0, XW); SCHED_W_0(21, W3, W4, W5, W0, W1, W2);
  364. R2(a, b, c, d, e, f, g, h, 16, 1, XW); SCHED_W_1(21, W3, W4, W5, W0, W1, W2);
  365. R2(d, a, b, c, h, e, f, g, 17, 2, XW); SCHED_W_2(21, W3, W4, W5, W0, W1, W2);
  366. /* Transform 18-20 + Precalc 24-26 */
  367. R2(c, d, a, b, g, h, e, f, 18, 0, XW); SCHED_W_0(24, W4, W5, W0, W1, W2, W3);
  368. R2(b, c, d, a, f, g, h, e, 19, 1, XW); SCHED_W_1(24, W4, W5, W0, W1, W2, W3);
  369. R2(a, b, c, d, e, f, g, h, 20, 2, XW); SCHED_W_2(24, W4, W5, W0, W1, W2, W3);
  370. /* Transform 21-23 + Precalc 27-29 */
  371. R2(d, a, b, c, h, e, f, g, 21, 0, XW); SCHED_W_0(27, W5, W0, W1, W2, W3, W4);
  372. R2(c, d, a, b, g, h, e, f, 22, 1, XW); SCHED_W_1(27, W5, W0, W1, W2, W3, W4);
  373. R2(b, c, d, a, f, g, h, e, 23, 2, XW); SCHED_W_2(27, W5, W0, W1, W2, W3, W4);
  374. /* Transform 24-26 + Precalc 30-32 */
  375. R2(a, b, c, d, e, f, g, h, 24, 0, XW); SCHED_W_0(30, W0, W1, W2, W3, W4, W5);
  376. R2(d, a, b, c, h, e, f, g, 25, 1, XW); SCHED_W_1(30, W0, W1, W2, W3, W4, W5);
  377. R2(c, d, a, b, g, h, e, f, 26, 2, XW); SCHED_W_2(30, W0, W1, W2, W3, W4, W5);
  378. /* Transform 27-29 + Precalc 33-35 */
  379. R2(b, c, d, a, f, g, h, e, 27, 0, XW); SCHED_W_0(33, W1, W2, W3, W4, W5, W0);
  380. R2(a, b, c, d, e, f, g, h, 28, 1, XW); SCHED_W_1(33, W1, W2, W3, W4, W5, W0);
  381. R2(d, a, b, c, h, e, f, g, 29, 2, XW); SCHED_W_2(33, W1, W2, W3, W4, W5, W0);
  382. /* Transform 30-32 + Precalc 36-38 */
  383. R2(c, d, a, b, g, h, e, f, 30, 0, XW); SCHED_W_0(36, W2, W3, W4, W5, W0, W1);
  384. R2(b, c, d, a, f, g, h, e, 31, 1, XW); SCHED_W_1(36, W2, W3, W4, W5, W0, W1);
  385. R2(a, b, c, d, e, f, g, h, 32, 2, XW); SCHED_W_2(36, W2, W3, W4, W5, W0, W1);
  386. /* Transform 33-35 + Precalc 39-41 */
  387. R2(d, a, b, c, h, e, f, g, 33, 0, XW); SCHED_W_0(39, W3, W4, W5, W0, W1, W2);
  388. R2(c, d, a, b, g, h, e, f, 34, 1, XW); SCHED_W_1(39, W3, W4, W5, W0, W1, W2);
  389. R2(b, c, d, a, f, g, h, e, 35, 2, XW); SCHED_W_2(39, W3, W4, W5, W0, W1, W2);
  390. /* Transform 36-38 + Precalc 42-44 */
  391. R2(a, b, c, d, e, f, g, h, 36, 0, XW); SCHED_W_0(42, W4, W5, W0, W1, W2, W3);
  392. R2(d, a, b, c, h, e, f, g, 37, 1, XW); SCHED_W_1(42, W4, W5, W0, W1, W2, W3);
  393. R2(c, d, a, b, g, h, e, f, 38, 2, XW); SCHED_W_2(42, W4, W5, W0, W1, W2, W3);
  394. /* Transform 39-41 + Precalc 45-47 */
  395. R2(b, c, d, a, f, g, h, e, 39, 0, XW); SCHED_W_0(45, W5, W0, W1, W2, W3, W4);
  396. R2(a, b, c, d, e, f, g, h, 40, 1, XW); SCHED_W_1(45, W5, W0, W1, W2, W3, W4);
  397. R2(d, a, b, c, h, e, f, g, 41, 2, XW); SCHED_W_2(45, W5, W0, W1, W2, W3, W4);
  398. /* Transform 42-44 + Precalc 48-50 */
  399. R2(c, d, a, b, g, h, e, f, 42, 0, XW); SCHED_W_0(48, W0, W1, W2, W3, W4, W5);
  400. R2(b, c, d, a, f, g, h, e, 43, 1, XW); SCHED_W_1(48, W0, W1, W2, W3, W4, W5);
  401. R2(a, b, c, d, e, f, g, h, 44, 2, XW); SCHED_W_2(48, W0, W1, W2, W3, W4, W5);
  402. /* Transform 45-47 + Precalc 51-53 */
  403. R2(d, a, b, c, h, e, f, g, 45, 0, XW); SCHED_W_0(51, W1, W2, W3, W4, W5, W0);
  404. R2(c, d, a, b, g, h, e, f, 46, 1, XW); SCHED_W_1(51, W1, W2, W3, W4, W5, W0);
  405. R2(b, c, d, a, f, g, h, e, 47, 2, XW); SCHED_W_2(51, W1, W2, W3, W4, W5, W0);
  406. /* Transform 48-50 + Precalc 54-56 */
  407. R2(a, b, c, d, e, f, g, h, 48, 0, XW); SCHED_W_0(54, W2, W3, W4, W5, W0, W1);
  408. R2(d, a, b, c, h, e, f, g, 49, 1, XW); SCHED_W_1(54, W2, W3, W4, W5, W0, W1);
  409. R2(c, d, a, b, g, h, e, f, 50, 2, XW); SCHED_W_2(54, W2, W3, W4, W5, W0, W1);
  410. /* Transform 51-53 + Precalc 57-59 */
  411. R2(b, c, d, a, f, g, h, e, 51, 0, XW); SCHED_W_0(57, W3, W4, W5, W0, W1, W2);
  412. R2(a, b, c, d, e, f, g, h, 52, 1, XW); SCHED_W_1(57, W3, W4, W5, W0, W1, W2);
  413. R2(d, a, b, c, h, e, f, g, 53, 2, XW); SCHED_W_2(57, W3, W4, W5, W0, W1, W2);
  414. /* Transform 54-56 + Precalc 60-62 */
  415. R2(c, d, a, b, g, h, e, f, 54, 0, XW); SCHED_W_0(60, W4, W5, W0, W1, W2, W3);
  416. R2(b, c, d, a, f, g, h, e, 55, 1, XW); SCHED_W_1(60, W4, W5, W0, W1, W2, W3);
  417. R2(a, b, c, d, e, f, g, h, 56, 2, XW); SCHED_W_2(60, W4, W5, W0, W1, W2, W3);
  418. /* Transform 57-59 + Precalc 63 */
  419. R2(d, a, b, c, h, e, f, g, 57, 0, XW); SCHED_W_0(63, W5, W0, W1, W2, W3, W4);
  420. R2(c, d, a, b, g, h, e, f, 58, 1, XW);
  421. R2(b, c, d, a, f, g, h, e, 59, 2, XW); SCHED_W_1(63, W5, W0, W1, W2, W3, W4);
  422. /* Transform 60-62 + Precalc 63 */
  423. R2(a, b, c, d, e, f, g, h, 60, 0, XW);
  424. R2(d, a, b, c, h, e, f, g, 61, 1, XW); SCHED_W_2(63, W5, W0, W1, W2, W3, W4);
  425. R2(c, d, a, b, g, h, e, f, 62, 2, XW);
  426. /* Transform 63 */
  427. R2(b, c, d, a, f, g, h, e, 63, 0, XW);
  428. /* Update the chaining variables. */
  429. xorl state_h0(RSTATE), a;
  430. xorl state_h1(RSTATE), b;
  431. xorl state_h2(RSTATE), c;
  432. xorl state_h3(RSTATE), d;
  433. movl a, state_h0(RSTATE);
  434. movl b, state_h1(RSTATE);
  435. movl c, state_h2(RSTATE);
  436. movl d, state_h3(RSTATE);
  437. xorl state_h4(RSTATE), e;
  438. xorl state_h5(RSTATE), f;
  439. xorl state_h6(RSTATE), g;
  440. xorl state_h7(RSTATE), h;
  441. movl e, state_h4(RSTATE);
  442. movl f, state_h5(RSTATE);
  443. movl g, state_h6(RSTATE);
  444. movl h, state_h7(RSTATE);
  445. cmpq $0, RNBLKS;
  446. jne .Loop;
  447. vzeroall;
  448. movq (STACK_REG_SAVE + 0 * 8)(%rsp), %rbx;
  449. CFI_RESTORE(%rbx);
  450. movq (STACK_REG_SAVE + 1 * 8)(%rsp), %r15;
  451. CFI_RESTORE(%r15);
  452. movq (STACK_REG_SAVE + 2 * 8)(%rsp), %r14;
  453. CFI_RESTORE(%r14);
  454. movq (STACK_REG_SAVE + 3 * 8)(%rsp), %r13;
  455. CFI_RESTORE(%r13);
  456. movq (STACK_REG_SAVE + 4 * 8)(%rsp), %r12;
  457. CFI_RESTORE(%r12);
  458. vmovdqa %xmm0, IW_W1_ADDR(0, 0);
  459. vmovdqa %xmm0, IW_W1W2_ADDR(0, 0);
  460. vmovdqa %xmm0, IW_W1_ADDR(4, 0);
  461. vmovdqa %xmm0, IW_W1W2_ADDR(4, 0);
  462. vmovdqa %xmm0, IW_W1_ADDR(8, 0);
  463. vmovdqa %xmm0, IW_W1W2_ADDR(8, 0);
  464. xorl %eax, %eax; /* stack burned */
  465. leave;
  466. CFI_LEAVE();
  467. ret_spec_stop;
  468. CFI_ENDPROC();
  469. ELF(.size _gcry_sm3_transform_amd64_avx_bmi2,
  470. .-_gcry_sm3_transform_amd64_avx_bmi2;)
  471. #endif
  472. #endif