sha256-ppc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. /* sha256-ppc.c - PowerPC vcrypto implementation of SHA-256 transform
  2. * Copyright (C) 2019,2023 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  3. *
  4. * This file is part of Libgcrypt.
  5. *
  6. * Libgcrypt is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as
  8. * published by the Free Software Foundation; either version 2.1 of
  9. * the License, or (at your option) any later version.
  10. *
  11. * Libgcrypt is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <config.h>
  20. #if defined(ENABLE_PPC_CRYPTO_SUPPORT) && \
  21. defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
  22. defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && \
  23. defined(USE_SHA256) && \
  24. __GNUC__ >= 4
  25. #include <altivec.h>
  26. #include "bufhelp.h"
  27. typedef vector unsigned char vector16x_u8;
  28. typedef vector unsigned int vector4x_u32;
  29. typedef vector unsigned long long vector2x_u64;
  30. #define ALWAYS_INLINE inline __attribute__((always_inline))
  31. #define NO_INLINE __attribute__((noinline))
  32. #define NO_INSTRUMENT_FUNCTION __attribute__((no_instrument_function))
  33. #define ASM_FUNC_ATTR NO_INSTRUMENT_FUNCTION
  34. #define ASM_FUNC_ATTR_INLINE ASM_FUNC_ATTR ALWAYS_INLINE
  35. #define ASM_FUNC_ATTR_NOINLINE ASM_FUNC_ATTR NO_INLINE
  36. #ifdef HAVE_GCC_ATTRIBUTE_OPTIMIZE
  37. # define FUNC_ATTR_OPT_O2 __attribute__((optimize("-O2")))
  38. #else
  39. # define FUNC_ATTR_OPT_O2
  40. #endif
  41. #if defined(__clang__) && defined(HAVE_CLANG_ATTRIBUTE_PPC_TARGET)
  42. # define FUNC_ATTR_TARGET_P8 __attribute__((target("arch=pwr8")))
  43. # define FUNC_ATTR_TARGET_P9 __attribute__((target("arch=pwr9")))
  44. #elif defined(HAVE_GCC_ATTRIBUTE_PPC_TARGET)
  45. # define FUNC_ATTR_TARGET_P8 __attribute__((target("cpu=power8")))
  46. # define FUNC_ATTR_TARGET_P9 __attribute__((target("cpu=power9")))
  47. #else
  48. # define FUNC_ATTR_TARGET_P8
  49. # define FUNC_ATTR_TARGET_P9
  50. #endif
  51. static const vector4x_u32 K[64 / 4] =
  52. {
  53. #define TBL(v) v
  54. { TBL(0x428a2f98), TBL(0x71374491), TBL(0xb5c0fbcf), TBL(0xe9b5dba5) },
  55. { TBL(0x3956c25b), TBL(0x59f111f1), TBL(0x923f82a4), TBL(0xab1c5ed5) },
  56. { TBL(0xd807aa98), TBL(0x12835b01), TBL(0x243185be), TBL(0x550c7dc3) },
  57. { TBL(0x72be5d74), TBL(0x80deb1fe), TBL(0x9bdc06a7), TBL(0xc19bf174) },
  58. { TBL(0xe49b69c1), TBL(0xefbe4786), TBL(0x0fc19dc6), TBL(0x240ca1cc) },
  59. { TBL(0x2de92c6f), TBL(0x4a7484aa), TBL(0x5cb0a9dc), TBL(0x76f988da) },
  60. { TBL(0x983e5152), TBL(0xa831c66d), TBL(0xb00327c8), TBL(0xbf597fc7) },
  61. { TBL(0xc6e00bf3), TBL(0xd5a79147), TBL(0x06ca6351), TBL(0x14292967) },
  62. { TBL(0x27b70a85), TBL(0x2e1b2138), TBL(0x4d2c6dfc), TBL(0x53380d13) },
  63. { TBL(0x650a7354), TBL(0x766a0abb), TBL(0x81c2c92e), TBL(0x92722c85) },
  64. { TBL(0xa2bfe8a1), TBL(0xa81a664b), TBL(0xc24b8b70), TBL(0xc76c51a3) },
  65. { TBL(0xd192e819), TBL(0xd6990624), TBL(0xf40e3585), TBL(0x106aa070) },
  66. { TBL(0x19a4c116), TBL(0x1e376c08), TBL(0x2748774c), TBL(0x34b0bcb5) },
  67. { TBL(0x391c0cb3), TBL(0x4ed8aa4a), TBL(0x5b9cca4f), TBL(0x682e6ff3) },
  68. { TBL(0x748f82ee), TBL(0x78a5636f), TBL(0x84c87814), TBL(0x8cc70208) },
  69. { TBL(0x90befffa), TBL(0xa4506ceb), TBL(0xbef9a3f7), TBL(0xc67178f2) }
  70. #undef TBL
  71. };
  72. static ASM_FUNC_ATTR_INLINE vector4x_u32
  73. vec_rol_elems(vector4x_u32 v, unsigned int idx)
  74. {
  75. #ifndef WORDS_BIGENDIAN
  76. return vec_sld (v, v, (16 - (4 * idx)) & 15);
  77. #else
  78. return vec_sld (v, v, (4 * idx) & 15);
  79. #endif
  80. }
  81. static ASM_FUNC_ATTR_INLINE vector4x_u32
  82. vec_merge_idx0_elems(vector4x_u32 v0, vector4x_u32 v1,
  83. vector4x_u32 v2, vector4x_u32 v3)
  84. {
  85. return (vector4x_u32)vec_mergeh ((vector2x_u64) vec_mergeh(v0, v1),
  86. (vector2x_u64) vec_mergeh(v2, v3));
  87. }
  88. static ASM_FUNC_ATTR_INLINE vector4x_u32
  89. vec_vshasigma_u32(vector4x_u32 v, unsigned int a, unsigned int b)
  90. {
  91. asm ("vshasigmaw %0,%1,%2,%3"
  92. : "=v" (v)
  93. : "v" (v), "g" (a), "g" (b)
  94. : "memory");
  95. return v;
  96. }
  97. static ASM_FUNC_ATTR_INLINE vector4x_u32
  98. vec_add_u32(vector4x_u32 v, vector4x_u32 w)
  99. {
  100. __asm__ ("vadduwm %0,%1,%2"
  101. : "=v" (v)
  102. : "v" (v), "v" (w)
  103. : "memory");
  104. return v;
  105. }
  106. static ASM_FUNC_ATTR_INLINE vector4x_u32
  107. vec_u32_load_be(unsigned long offset, const void *ptr)
  108. {
  109. vector4x_u32 vecu32;
  110. #if __GNUC__ >= 4
  111. if (__builtin_constant_p (offset) && offset == 0)
  112. __asm__ volatile ("lxvw4x %x0,0,%1\n\t"
  113. : "=wa" (vecu32)
  114. : "r" ((uintptr_t)ptr)
  115. : "memory");
  116. else
  117. #endif
  118. __asm__ volatile ("lxvw4x %x0,%1,%2\n\t"
  119. : "=wa" (vecu32)
  120. : "r" (offset), "r" ((uintptr_t)ptr)
  121. : "memory", "r0");
  122. #ifndef WORDS_BIGENDIAN
  123. return (vector4x_u32)vec_reve((vector16x_u8)vecu32);
  124. #else
  125. return vecu32;
  126. #endif
  127. }
  128. /* SHA2 round in vector registers */
  129. #define R(a,b,c,d,e,f,g,h,ki,w) do \
  130. { \
  131. t1 = vec_add_u32((h), (w)); \
  132. t2 = Cho((e),(f),(g)); \
  133. t1 = vec_add_u32(t1, GETK(ki)); \
  134. t1 = vec_add_u32(t1, t2); \
  135. t1 = Sum1add(t1, e); \
  136. t2 = Maj((a),(b),(c)); \
  137. t2 = Sum0add(t2, a); \
  138. h = vec_add_u32(t1, t2); \
  139. d += t1; \
  140. } while (0)
  141. #define GETK(kidx) \
  142. ({ \
  143. vector4x_u32 rk; \
  144. if (((kidx) % 4) == 0) \
  145. { \
  146. rk = ktmp = *(kptr++); \
  147. if ((kidx) < 63) \
  148. asm volatile("" : "+r" (kptr) :: "memory"); \
  149. } \
  150. else if (((kidx) % 4) == 1) \
  151. { \
  152. rk = vec_mergeo(ktmp, ktmp); \
  153. } \
  154. else \
  155. { \
  156. rk = vec_rol_elems(ktmp, ((kidx) % 4)); \
  157. } \
  158. rk; \
  159. })
  160. #define Cho(b, c, d) (vec_sel(d, c, b))
  161. #define Maj(c, d, b) (vec_sel(c, b, c ^ d))
  162. #define Sum0(x) (vec_vshasigma_u32(x, 1, 0))
  163. #define Sum1(x) (vec_vshasigma_u32(x, 1, 15))
  164. #define S0(x) (vec_vshasigma_u32(x, 0, 0))
  165. #define S1(x) (vec_vshasigma_u32(x, 0, 15))
  166. #define Xadd(X, d, x) vec_add_u32(d, X(x))
  167. #define Sum0add(d, x) Xadd(Sum0, d, x)
  168. #define Sum1add(d, x) Xadd(Sum1, d, x)
  169. #define S0add(d, x) Xadd(S0, d, x)
  170. #define S1add(d, x) Xadd(S1, d, x)
  171. #define I(i) \
  172. ({ \
  173. if (((i) % 4) == 0) \
  174. { \
  175. w[i] = vec_u32_load_be(0, data); \
  176. data += 4 * 4; \
  177. if ((i) / 4 < 3) \
  178. asm volatile("" : "+r"(data) :: "memory"); \
  179. } \
  180. else if (((i) % 4) == 1) \
  181. { \
  182. w[i] = vec_mergeo(w[(i) - 1], w[(i) - 1]); \
  183. } \
  184. else \
  185. { \
  186. w[i] = vec_rol_elems(w[(i) - (i) % 4], (i)); \
  187. } \
  188. })
  189. #define WN(i) ({ w[(i)&0x0f] += w[((i)-7) &0x0f]; \
  190. w[(i)&0x0f] = S0add(w[(i)&0x0f], w[((i)-15)&0x0f]); \
  191. w[(i)&0x0f] = S1add(w[(i)&0x0f], w[((i)-2) &0x0f]); })
  192. #define W(i) ({ vector4x_u32 r = w[(i)&0x0f]; WN(i); r; })
  193. #define L(i) w[(i)&0x0f]
  194. #define I2(i) \
  195. ({ \
  196. if ((i) % 4 == 0) \
  197. { \
  198. vector4x_u32 iw = vec_u32_load_be(0, data); \
  199. vector4x_u32 iw2 = vec_u32_load_be(64, data); \
  200. if ((i) / 4 < 3) \
  201. { \
  202. data += 4 * 4; \
  203. asm volatile("" : "+r"(data) :: "memory"); \
  204. } \
  205. else \
  206. { \
  207. data += 4 * 4 + 64; \
  208. asm volatile("" : "+r"(data) :: "memory"); \
  209. } \
  210. w[(i) + 0] = vec_mergeh(iw, iw2); \
  211. w[(i) + 1] = vec_rol_elems(w[(i) + 0], 2); \
  212. w[(i) + 2] = vec_mergel(iw, iw2); \
  213. w[(i) + 3] = vec_rol_elems(w[(i) + 2], 2); \
  214. } \
  215. })
  216. #define W2(i) \
  217. ({ \
  218. vector4x_u32 wt1 = w[(i)&0x0f]; \
  219. WN(i); \
  220. w2[(i) / 2] = (((i) % 2) == 0) ? wt1 : vec_mergeo(w2[(i) / 2], wt1); \
  221. wt1; \
  222. })
  223. #define L2(i) \
  224. ({ \
  225. vector4x_u32 lt1 = w[(i)&0x0f]; \
  226. w2[(i) / 2] = (((i) % 2) == 0) ? lt1 : vec_mergeo(w2[(i) / 2], lt1); \
  227. lt1; \
  228. })
  229. #define WL(i) \
  230. ({ \
  231. vector4x_u32 wlt1 = w2[(i) / 2]; \
  232. if (((i) % 2) == 0 && (i) < 63) \
  233. w2[(i) / 2] = vec_mergeo(wlt1, wlt1); \
  234. wlt1; \
  235. })
  236. static ASM_FUNC_ATTR_INLINE FUNC_ATTR_OPT_O2 unsigned int
  237. sha256_transform_ppc(u32 state[8], const unsigned char *data, size_t nblks)
  238. {
  239. vector4x_u32 h0, h1, h2, h3, h4, h5, h6, h7;
  240. vector4x_u32 h0_h3, h4_h7;
  241. vector4x_u32 a, b, c, d, e, f, g, h, t1, t2;
  242. vector4x_u32 w[16];
  243. vector4x_u32 w2[64 / 2];
  244. h0_h3 = vec_vsx_ld (4 * 0, state);
  245. h4_h7 = vec_vsx_ld (4 * 4, state);
  246. h0 = h0_h3;
  247. h1 = vec_mergeo (h0_h3, h0_h3);
  248. h2 = vec_rol_elems (h0_h3, 2);
  249. h3 = vec_rol_elems (h0_h3, 3);
  250. h4 = h4_h7;
  251. h5 = vec_mergeo (h4_h7, h4_h7);
  252. h6 = vec_rol_elems (h4_h7, 2);
  253. h7 = vec_rol_elems (h4_h7, 3);
  254. while (nblks >= 2)
  255. {
  256. const vector4x_u32 *kptr = K;
  257. vector4x_u32 ktmp;
  258. a = h0;
  259. b = h1;
  260. c = h2;
  261. d = h3;
  262. e = h4;
  263. f = h5;
  264. g = h6;
  265. h = h7;
  266. I2(0); I2(1); I2(2); I2(3);
  267. I2(4); I2(5); I2(6); I2(7);
  268. I2(8); I2(9); I2(10); I2(11);
  269. I2(12); I2(13); I2(14); I2(15);
  270. R(a, b, c, d, e, f, g, h, 0, W2(0));
  271. R(h, a, b, c, d, e, f, g, 1, W2(1));
  272. R(g, h, a, b, c, d, e, f, 2, W2(2));
  273. R(f, g, h, a, b, c, d, e, 3, W2(3));
  274. R(e, f, g, h, a, b, c, d, 4, W2(4));
  275. R(d, e, f, g, h, a, b, c, 5, W2(5));
  276. R(c, d, e, f, g, h, a, b, 6, W2(6));
  277. R(b, c, d, e, f, g, h, a, 7, W2(7));
  278. R(a, b, c, d, e, f, g, h, 8, W2(8));
  279. R(h, a, b, c, d, e, f, g, 9, W2(9));
  280. R(g, h, a, b, c, d, e, f, 10, W2(10));
  281. R(f, g, h, a, b, c, d, e, 11, W2(11));
  282. R(e, f, g, h, a, b, c, d, 12, W2(12));
  283. R(d, e, f, g, h, a, b, c, 13, W2(13));
  284. R(c, d, e, f, g, h, a, b, 14, W2(14));
  285. R(b, c, d, e, f, g, h, a, 15, W2(15));
  286. R(a, b, c, d, e, f, g, h, 16, W2(16));
  287. R(h, a, b, c, d, e, f, g, 17, W2(17));
  288. R(g, h, a, b, c, d, e, f, 18, W2(18));
  289. R(f, g, h, a, b, c, d, e, 19, W2(19));
  290. R(e, f, g, h, a, b, c, d, 20, W2(20));
  291. R(d, e, f, g, h, a, b, c, 21, W2(21));
  292. R(c, d, e, f, g, h, a, b, 22, W2(22));
  293. R(b, c, d, e, f, g, h, a, 23, W2(23));
  294. R(a, b, c, d, e, f, g, h, 24, W2(24));
  295. R(h, a, b, c, d, e, f, g, 25, W2(25));
  296. R(g, h, a, b, c, d, e, f, 26, W2(26));
  297. R(f, g, h, a, b, c, d, e, 27, W2(27));
  298. R(e, f, g, h, a, b, c, d, 28, W2(28));
  299. R(d, e, f, g, h, a, b, c, 29, W2(29));
  300. R(c, d, e, f, g, h, a, b, 30, W2(30));
  301. R(b, c, d, e, f, g, h, a, 31, W2(31));
  302. R(a, b, c, d, e, f, g, h, 32, W2(32));
  303. R(h, a, b, c, d, e, f, g, 33, W2(33));
  304. R(g, h, a, b, c, d, e, f, 34, W2(34));
  305. R(f, g, h, a, b, c, d, e, 35, W2(35));
  306. R(e, f, g, h, a, b, c, d, 36, W2(36));
  307. R(d, e, f, g, h, a, b, c, 37, W2(37));
  308. R(c, d, e, f, g, h, a, b, 38, W2(38));
  309. R(b, c, d, e, f, g, h, a, 39, W2(39));
  310. R(a, b, c, d, e, f, g, h, 40, W2(40));
  311. R(h, a, b, c, d, e, f, g, 41, W2(41));
  312. R(g, h, a, b, c, d, e, f, 42, W2(42));
  313. R(f, g, h, a, b, c, d, e, 43, W2(43));
  314. R(e, f, g, h, a, b, c, d, 44, W2(44));
  315. R(d, e, f, g, h, a, b, c, 45, W2(45));
  316. R(c, d, e, f, g, h, a, b, 46, W2(46));
  317. R(b, c, d, e, f, g, h, a, 47, W2(47));
  318. R(a, b, c, d, e, f, g, h, 48, L2(48));
  319. R(h, a, b, c, d, e, f, g, 49, L2(49));
  320. R(g, h, a, b, c, d, e, f, 50, L2(50));
  321. R(f, g, h, a, b, c, d, e, 51, L2(51));
  322. R(e, f, g, h, a, b, c, d, 52, L2(52));
  323. R(d, e, f, g, h, a, b, c, 53, L2(53));
  324. R(c, d, e, f, g, h, a, b, 54, L2(54));
  325. R(b, c, d, e, f, g, h, a, 55, L2(55));
  326. R(a, b, c, d, e, f, g, h, 56, L2(56));
  327. R(h, a, b, c, d, e, f, g, 57, L2(57));
  328. R(g, h, a, b, c, d, e, f, 58, L2(58));
  329. R(f, g, h, a, b, c, d, e, 59, L2(59));
  330. R(e, f, g, h, a, b, c, d, 60, L2(60));
  331. R(d, e, f, g, h, a, b, c, 61, L2(61));
  332. R(c, d, e, f, g, h, a, b, 62, L2(62));
  333. R(b, c, d, e, f, g, h, a, 63, L2(63));
  334. h0 += a;
  335. h1 += b;
  336. h2 += c;
  337. h3 += d;
  338. h4 += e;
  339. h5 += f;
  340. h6 += g;
  341. h7 += h;
  342. kptr = K;
  343. a = h0;
  344. b = h1;
  345. c = h2;
  346. d = h3;
  347. e = h4;
  348. f = h5;
  349. g = h6;
  350. h = h7;
  351. R(a, b, c, d, e, f, g, h, 0, WL(0));
  352. R(h, a, b, c, d, e, f, g, 1, WL(1));
  353. R(g, h, a, b, c, d, e, f, 2, WL(2));
  354. R(f, g, h, a, b, c, d, e, 3, WL(3));
  355. R(e, f, g, h, a, b, c, d, 4, WL(4));
  356. R(d, e, f, g, h, a, b, c, 5, WL(5));
  357. R(c, d, e, f, g, h, a, b, 6, WL(6));
  358. R(b, c, d, e, f, g, h, a, 7, WL(7));
  359. R(a, b, c, d, e, f, g, h, 8, WL(8));
  360. R(h, a, b, c, d, e, f, g, 9, WL(9));
  361. R(g, h, a, b, c, d, e, f, 10, WL(10));
  362. R(f, g, h, a, b, c, d, e, 11, WL(11));
  363. R(e, f, g, h, a, b, c, d, 12, WL(12));
  364. R(d, e, f, g, h, a, b, c, 13, WL(13));
  365. R(c, d, e, f, g, h, a, b, 14, WL(14));
  366. R(b, c, d, e, f, g, h, a, 15, WL(15));
  367. R(a, b, c, d, e, f, g, h, 16, WL(16));
  368. R(h, a, b, c, d, e, f, g, 17, WL(17));
  369. R(g, h, a, b, c, d, e, f, 18, WL(18));
  370. R(f, g, h, a, b, c, d, e, 19, WL(19));
  371. R(e, f, g, h, a, b, c, d, 20, WL(20));
  372. R(d, e, f, g, h, a, b, c, 21, WL(21));
  373. R(c, d, e, f, g, h, a, b, 22, WL(22));
  374. R(b, c, d, e, f, g, h, a, 23, WL(23));
  375. R(a, b, c, d, e, f, g, h, 24, WL(24));
  376. R(h, a, b, c, d, e, f, g, 25, WL(25));
  377. R(g, h, a, b, c, d, e, f, 26, WL(26));
  378. R(f, g, h, a, b, c, d, e, 27, WL(27));
  379. R(e, f, g, h, a, b, c, d, 28, WL(28));
  380. R(d, e, f, g, h, a, b, c, 29, WL(29));
  381. R(c, d, e, f, g, h, a, b, 30, WL(30));
  382. R(b, c, d, e, f, g, h, a, 31, WL(31));
  383. R(a, b, c, d, e, f, g, h, 32, WL(32));
  384. R(h, a, b, c, d, e, f, g, 33, WL(33));
  385. R(g, h, a, b, c, d, e, f, 34, WL(34));
  386. R(f, g, h, a, b, c, d, e, 35, WL(35));
  387. R(e, f, g, h, a, b, c, d, 36, WL(36));
  388. R(d, e, f, g, h, a, b, c, 37, WL(37));
  389. R(c, d, e, f, g, h, a, b, 38, WL(38));
  390. R(b, c, d, e, f, g, h, a, 39, WL(39));
  391. R(a, b, c, d, e, f, g, h, 40, WL(40));
  392. R(h, a, b, c, d, e, f, g, 41, WL(41));
  393. R(g, h, a, b, c, d, e, f, 42, WL(42));
  394. R(f, g, h, a, b, c, d, e, 43, WL(43));
  395. R(e, f, g, h, a, b, c, d, 44, WL(44));
  396. R(d, e, f, g, h, a, b, c, 45, WL(45));
  397. R(c, d, e, f, g, h, a, b, 46, WL(46));
  398. R(b, c, d, e, f, g, h, a, 47, WL(47));
  399. R(a, b, c, d, e, f, g, h, 48, WL(48));
  400. R(h, a, b, c, d, e, f, g, 49, WL(49));
  401. R(g, h, a, b, c, d, e, f, 50, WL(50));
  402. R(f, g, h, a, b, c, d, e, 51, WL(51));
  403. R(e, f, g, h, a, b, c, d, 52, WL(52));
  404. R(d, e, f, g, h, a, b, c, 53, WL(53));
  405. R(c, d, e, f, g, h, a, b, 54, WL(54));
  406. R(b, c, d, e, f, g, h, a, 55, WL(55));
  407. R(a, b, c, d, e, f, g, h, 56, WL(56));
  408. R(h, a, b, c, d, e, f, g, 57, WL(57));
  409. R(g, h, a, b, c, d, e, f, 58, WL(58));
  410. R(f, g, h, a, b, c, d, e, 59, WL(59));
  411. R(e, f, g, h, a, b, c, d, 60, WL(60));
  412. R(d, e, f, g, h, a, b, c, 61, WL(61));
  413. R(c, d, e, f, g, h, a, b, 62, WL(62));
  414. R(b, c, d, e, f, g, h, a, 63, WL(63));
  415. h0 += a;
  416. h1 += b;
  417. h2 += c;
  418. h3 += d;
  419. h4 += e;
  420. h5 += f;
  421. h6 += g;
  422. h7 += h;
  423. nblks -= 2;
  424. }
  425. if (nblks)
  426. {
  427. const vector4x_u32 *kptr = K;
  428. vector4x_u32 ktmp;
  429. a = h0;
  430. b = h1;
  431. c = h2;
  432. d = h3;
  433. e = h4;
  434. f = h5;
  435. g = h6;
  436. h = h7;
  437. I(0); I(1); I(2); I(3);
  438. I(4); I(5); I(6); I(7);
  439. I(8); I(9); I(10); I(11);
  440. I(12); I(13); I(14); I(15);
  441. R(a, b, c, d, e, f, g, h, 0, W(0));
  442. R(h, a, b, c, d, e, f, g, 1, W(1));
  443. R(g, h, a, b, c, d, e, f, 2, W(2));
  444. R(f, g, h, a, b, c, d, e, 3, W(3));
  445. R(e, f, g, h, a, b, c, d, 4, W(4));
  446. R(d, e, f, g, h, a, b, c, 5, W(5));
  447. R(c, d, e, f, g, h, a, b, 6, W(6));
  448. R(b, c, d, e, f, g, h, a, 7, W(7));
  449. R(a, b, c, d, e, f, g, h, 8, W(8));
  450. R(h, a, b, c, d, e, f, g, 9, W(9));
  451. R(g, h, a, b, c, d, e, f, 10, W(10));
  452. R(f, g, h, a, b, c, d, e, 11, W(11));
  453. R(e, f, g, h, a, b, c, d, 12, W(12));
  454. R(d, e, f, g, h, a, b, c, 13, W(13));
  455. R(c, d, e, f, g, h, a, b, 14, W(14));
  456. R(b, c, d, e, f, g, h, a, 15, W(15));
  457. R(a, b, c, d, e, f, g, h, 16, W(16));
  458. R(h, a, b, c, d, e, f, g, 17, W(17));
  459. R(g, h, a, b, c, d, e, f, 18, W(18));
  460. R(f, g, h, a, b, c, d, e, 19, W(19));
  461. R(e, f, g, h, a, b, c, d, 20, W(20));
  462. R(d, e, f, g, h, a, b, c, 21, W(21));
  463. R(c, d, e, f, g, h, a, b, 22, W(22));
  464. R(b, c, d, e, f, g, h, a, 23, W(23));
  465. R(a, b, c, d, e, f, g, h, 24, W(24));
  466. R(h, a, b, c, d, e, f, g, 25, W(25));
  467. R(g, h, a, b, c, d, e, f, 26, W(26));
  468. R(f, g, h, a, b, c, d, e, 27, W(27));
  469. R(e, f, g, h, a, b, c, d, 28, W(28));
  470. R(d, e, f, g, h, a, b, c, 29, W(29));
  471. R(c, d, e, f, g, h, a, b, 30, W(30));
  472. R(b, c, d, e, f, g, h, a, 31, W(31));
  473. R(a, b, c, d, e, f, g, h, 32, W(32));
  474. R(h, a, b, c, d, e, f, g, 33, W(33));
  475. R(g, h, a, b, c, d, e, f, 34, W(34));
  476. R(f, g, h, a, b, c, d, e, 35, W(35));
  477. R(e, f, g, h, a, b, c, d, 36, W(36));
  478. R(d, e, f, g, h, a, b, c, 37, W(37));
  479. R(c, d, e, f, g, h, a, b, 38, W(38));
  480. R(b, c, d, e, f, g, h, a, 39, W(39));
  481. R(a, b, c, d, e, f, g, h, 40, W(40));
  482. R(h, a, b, c, d, e, f, g, 41, W(41));
  483. R(g, h, a, b, c, d, e, f, 42, W(42));
  484. R(f, g, h, a, b, c, d, e, 43, W(43));
  485. R(e, f, g, h, a, b, c, d, 44, W(44));
  486. R(d, e, f, g, h, a, b, c, 45, W(45));
  487. R(c, d, e, f, g, h, a, b, 46, W(46));
  488. R(b, c, d, e, f, g, h, a, 47, W(47));
  489. R(a, b, c, d, e, f, g, h, 48, L(48));
  490. R(h, a, b, c, d, e, f, g, 49, L(49));
  491. R(g, h, a, b, c, d, e, f, 50, L(50));
  492. R(f, g, h, a, b, c, d, e, 51, L(51));
  493. R(e, f, g, h, a, b, c, d, 52, L(52));
  494. R(d, e, f, g, h, a, b, c, 53, L(53));
  495. R(c, d, e, f, g, h, a, b, 54, L(54));
  496. R(b, c, d, e, f, g, h, a, 55, L(55));
  497. R(a, b, c, d, e, f, g, h, 56, L(56));
  498. R(h, a, b, c, d, e, f, g, 57, L(57));
  499. R(g, h, a, b, c, d, e, f, 58, L(58));
  500. R(f, g, h, a, b, c, d, e, 59, L(59));
  501. R(e, f, g, h, a, b, c, d, 60, L(60));
  502. R(d, e, f, g, h, a, b, c, 61, L(61));
  503. R(c, d, e, f, g, h, a, b, 62, L(62));
  504. R(b, c, d, e, f, g, h, a, 63, L(63));
  505. h0 += a;
  506. h1 += b;
  507. h2 += c;
  508. h3 += d;
  509. h4 += e;
  510. h5 += f;
  511. h6 += g;
  512. h7 += h;
  513. nblks--;
  514. }
  515. h0_h3 = vec_merge_idx0_elems (h0, h1, h2, h3);
  516. h4_h7 = vec_merge_idx0_elems (h4, h5, h6, h7);
  517. vec_vsx_st (h0_h3, 4 * 0, state);
  518. vec_vsx_st (h4_h7, 4 * 4, state);
  519. return sizeof(w2) + sizeof(w);
  520. }
  521. unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P8 FUNC_ATTR_OPT_O2
  522. _gcry_sha256_transform_ppc8(u32 state[8], const unsigned char *data,
  523. size_t nblks)
  524. {
  525. return sha256_transform_ppc(state, data, nblks);
  526. }
  527. unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P9 FUNC_ATTR_OPT_O2
  528. _gcry_sha256_transform_ppc9(u32 state[8], const unsigned char *data,
  529. size_t nblks)
  530. {
  531. return sha256_transform_ppc(state, data, nblks);
  532. }
  533. #endif /* ENABLE_PPC_CRYPTO_SUPPORT */