sha1-spe-asm.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300
  1. /*
  2. * Fast SHA-1 implementation for SPE instruction set (PPC)
  3. *
  4. * This code makes use of the SPE SIMD instruction set as defined in
  5. * http://cache.freescale.com/files/32bit/doc/ref_manual/SPEPIM.pdf
  6. * Implementation is based on optimization guide notes from
  7. * http://cache.freescale.com/files/32bit/doc/app_note/AN2665.pdf
  8. *
  9. * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms of the GNU General Public License as published by the Free
  13. * Software Foundation; either version 2 of the License, or (at your option)
  14. * any later version.
  15. *
  16. */
  17. #include <asm/ppc_asm.h>
  18. #include <asm/asm-offsets.h>
  19. #define rHP r3 /* pointer to hash value */
  20. #define rWP r4 /* pointer to input */
  21. #define rKP r5 /* pointer to constants */
  22. #define rW0 r14 /* 64 bit round words */
  23. #define rW1 r15
  24. #define rW2 r16
  25. #define rW3 r17
  26. #define rW4 r18
  27. #define rW5 r19
  28. #define rW6 r20
  29. #define rW7 r21
  30. #define rH0 r6 /* 32 bit hash values */
  31. #define rH1 r7
  32. #define rH2 r8
  33. #define rH3 r9
  34. #define rH4 r10
  35. #define rT0 r22 /* 64 bit temporary */
  36. #define rT1 r0 /* 32 bit temporaries */
  37. #define rT2 r11
  38. #define rT3 r12
  39. #define rK r23 /* 64 bit constant in volatile register */
  40. #define LOAD_K01
  41. #define LOAD_K11 \
  42. evlwwsplat rK,0(rKP);
  43. #define LOAD_K21 \
  44. evlwwsplat rK,4(rKP);
  45. #define LOAD_K31 \
  46. evlwwsplat rK,8(rKP);
  47. #define LOAD_K41 \
  48. evlwwsplat rK,12(rKP);
  49. #define INITIALIZE \
  50. stwu r1,-128(r1); /* create stack frame */ \
  51. evstdw r14,8(r1); /* We must save non volatile */ \
  52. evstdw r15,16(r1); /* registers. Take the chance */ \
  53. evstdw r16,24(r1); /* and save the SPE part too */ \
  54. evstdw r17,32(r1); \
  55. evstdw r18,40(r1); \
  56. evstdw r19,48(r1); \
  57. evstdw r20,56(r1); \
  58. evstdw r21,64(r1); \
  59. evstdw r22,72(r1); \
  60. evstdw r23,80(r1);
  61. #define FINALIZE \
  62. evldw r14,8(r1); /* restore SPE registers */ \
  63. evldw r15,16(r1); \
  64. evldw r16,24(r1); \
  65. evldw r17,32(r1); \
  66. evldw r18,40(r1); \
  67. evldw r19,48(r1); \
  68. evldw r20,56(r1); \
  69. evldw r21,64(r1); \
  70. evldw r22,72(r1); \
  71. evldw r23,80(r1); \
  72. xor r0,r0,r0; \
  73. stw r0,8(r1); /* Delete sensitive data */ \
  74. stw r0,16(r1); /* that we might have pushed */ \
  75. stw r0,24(r1); /* from other context that runs */ \
  76. stw r0,32(r1); /* the same code. Assume that */ \
  77. stw r0,40(r1); /* the lower part of the GPRs */ \
  78. stw r0,48(r1); /* were already overwritten on */ \
  79. stw r0,56(r1); /* the way down to here */ \
  80. stw r0,64(r1); \
  81. stw r0,72(r1); \
  82. stw r0,80(r1); \
  83. addi r1,r1,128; /* cleanup stack frame */
  84. #ifdef __BIG_ENDIAN__
  85. #define LOAD_DATA(reg, off) \
  86. lwz reg,off(rWP); /* load data */
  87. #define NEXT_BLOCK \
  88. addi rWP,rWP,64; /* increment per block */
  89. #else
  90. #define LOAD_DATA(reg, off) \
  91. lwbrx reg,0,rWP; /* load data */ \
  92. addi rWP,rWP,4; /* increment per word */
  93. #define NEXT_BLOCK /* nothing to do */
  94. #endif
  95. #define R_00_15(a, b, c, d, e, w0, w1, k, off) \
  96. LOAD_DATA(w0, off) /* 1: W */ \
  97. and rT2,b,c; /* 1: F' = B and C */ \
  98. LOAD_K##k##1 \
  99. andc rT1,d,b; /* 1: F" = ~B and D */ \
  100. rotrwi rT0,a,27; /* 1: A' = A rotl 5 */ \
  101. or rT2,rT2,rT1; /* 1: F = F' or F" */ \
  102. add e,e,rT0; /* 1: E = E + A' */ \
  103. rotrwi b,b,2; /* 1: B = B rotl 30 */ \
  104. add e,e,w0; /* 1: E = E + W */ \
  105. LOAD_DATA(w1, off+4) /* 2: W */ \
  106. add e,e,rT2; /* 1: E = E + F */ \
  107. and rT1,a,b; /* 2: F' = B and C */ \
  108. add e,e,rK; /* 1: E = E + K */ \
  109. andc rT2,c,a; /* 2: F" = ~B and D */ \
  110. add d,d,rK; /* 2: E = E + K */ \
  111. or rT2,rT2,rT1; /* 2: F = F' or F" */ \
  112. rotrwi rT0,e,27; /* 2: A' = A rotl 5 */ \
  113. add d,d,w1; /* 2: E = E + W */ \
  114. rotrwi a,a,2; /* 2: B = B rotl 30 */ \
  115. add d,d,rT0; /* 2: E = E + A' */ \
  116. evmergelo w1,w1,w0; /* mix W[0]/W[1] */ \
  117. add d,d,rT2 /* 2: E = E + F */
  118. #define R_16_19(a, b, c, d, e, w0, w1, w4, w6, w7, k) \
  119. and rT2,b,c; /* 1: F' = B and C */ \
  120. evmergelohi rT0,w7,w6; /* W[-3] */ \
  121. andc rT1,d,b; /* 1: F" = ~B and D */ \
  122. evxor w0,w0,rT0; /* W = W[-16] xor W[-3] */ \
  123. or rT1,rT1,rT2; /* 1: F = F' or F" */ \
  124. evxor w0,w0,w4; /* W = W xor W[-8] */ \
  125. add e,e,rT1; /* 1: E = E + F */ \
  126. evxor w0,w0,w1; /* W = W xor W[-14] */ \
  127. rotrwi rT2,a,27; /* 1: A' = A rotl 5 */ \
  128. evrlwi w0,w0,1; /* W = W rotl 1 */ \
  129. add e,e,rT2; /* 1: E = E + A' */ \
  130. evaddw rT0,w0,rK; /* WK = W + K */ \
  131. rotrwi b,b,2; /* 1: B = B rotl 30 */ \
  132. LOAD_K##k##1 \
  133. evmergehi rT1,rT1,rT0; /* WK1/WK2 */ \
  134. add e,e,rT0; /* 1: E = E + WK */ \
  135. add d,d,rT1; /* 2: E = E + WK */ \
  136. and rT2,a,b; /* 2: F' = B and C */ \
  137. andc rT1,c,a; /* 2: F" = ~B and D */ \
  138. rotrwi rT0,e,27; /* 2: A' = A rotl 5 */ \
  139. or rT1,rT1,rT2; /* 2: F = F' or F" */ \
  140. add d,d,rT0; /* 2: E = E + A' */ \
  141. rotrwi a,a,2; /* 2: B = B rotl 30 */ \
  142. add d,d,rT1 /* 2: E = E + F */
  143. #define R_20_39(a, b, c, d, e, w0, w1, w4, w6, w7, k) \
  144. evmergelohi rT0,w7,w6; /* W[-3] */ \
  145. xor rT2,b,c; /* 1: F' = B xor C */ \
  146. evxor w0,w0,rT0; /* W = W[-16] xor W[-3] */ \
  147. xor rT2,rT2,d; /* 1: F = F' xor D */ \
  148. evxor w0,w0,w4; /* W = W xor W[-8] */ \
  149. add e,e,rT2; /* 1: E = E + F */ \
  150. evxor w0,w0,w1; /* W = W xor W[-14] */ \
  151. rotrwi rT2,a,27; /* 1: A' = A rotl 5 */ \
  152. evrlwi w0,w0,1; /* W = W rotl 1 */ \
  153. add e,e,rT2; /* 1: E = E + A' */ \
  154. evaddw rT0,w0,rK; /* WK = W + K */ \
  155. rotrwi b,b,2; /* 1: B = B rotl 30 */ \
  156. LOAD_K##k##1 \
  157. evmergehi rT1,rT1,rT0; /* WK1/WK2 */ \
  158. add e,e,rT0; /* 1: E = E + WK */ \
  159. xor rT2,a,b; /* 2: F' = B xor C */ \
  160. add d,d,rT1; /* 2: E = E + WK */ \
  161. xor rT2,rT2,c; /* 2: F = F' xor D */ \
  162. rotrwi rT0,e,27; /* 2: A' = A rotl 5 */ \
  163. add d,d,rT2; /* 2: E = E + F */ \
  164. rotrwi a,a,2; /* 2: B = B rotl 30 */ \
  165. add d,d,rT0 /* 2: E = E + A' */
  166. #define R_40_59(a, b, c, d, e, w0, w1, w4, w6, w7, k) \
  167. and rT2,b,c; /* 1: F' = B and C */ \
  168. evmergelohi rT0,w7,w6; /* W[-3] */ \
  169. or rT1,b,c; /* 1: F" = B or C */ \
  170. evxor w0,w0,rT0; /* W = W[-16] xor W[-3] */ \
  171. and rT1,d,rT1; /* 1: F" = F" and D */ \
  172. evxor w0,w0,w4; /* W = W xor W[-8] */ \
  173. or rT2,rT2,rT1; /* 1: F = F' or F" */ \
  174. evxor w0,w0,w1; /* W = W xor W[-14] */ \
  175. add e,e,rT2; /* 1: E = E + F */ \
  176. evrlwi w0,w0,1; /* W = W rotl 1 */ \
  177. rotrwi rT2,a,27; /* 1: A' = A rotl 5 */ \
  178. evaddw rT0,w0,rK; /* WK = W + K */ \
  179. add e,e,rT2; /* 1: E = E + A' */ \
  180. LOAD_K##k##1 \
  181. evmergehi rT1,rT1,rT0; /* WK1/WK2 */ \
  182. rotrwi b,b,2; /* 1: B = B rotl 30 */ \
  183. add e,e,rT0; /* 1: E = E + WK */ \
  184. and rT2,a,b; /* 2: F' = B and C */ \
  185. or rT0,a,b; /* 2: F" = B or C */ \
  186. add d,d,rT1; /* 2: E = E + WK */ \
  187. and rT0,c,rT0; /* 2: F" = F" and D */ \
  188. rotrwi a,a,2; /* 2: B = B rotl 30 */ \
  189. or rT2,rT2,rT0; /* 2: F = F' or F" */ \
  190. rotrwi rT0,e,27; /* 2: A' = A rotl 5 */ \
  191. add d,d,rT2; /* 2: E = E + F */ \
  192. add d,d,rT0 /* 2: E = E + A' */
  193. #define R_60_79(a, b, c, d, e, w0, w1, w4, w6, w7, k) \
  194. R_20_39(a, b, c, d, e, w0, w1, w4, w6, w7, k)
  195. _GLOBAL(ppc_spe_sha1_transform)
  196. INITIALIZE
  197. lwz rH0,0(rHP)
  198. lwz rH1,4(rHP)
  199. mtctr r5
  200. lwz rH2,8(rHP)
  201. lis rKP,PPC_SPE_SHA1_K@h
  202. lwz rH3,12(rHP)
  203. ori rKP,rKP,PPC_SPE_SHA1_K@l
  204. lwz rH4,16(rHP)
  205. ppc_spe_sha1_main:
  206. R_00_15(rH0, rH1, rH2, rH3, rH4, rW1, rW0, 1, 0)
  207. R_00_15(rH3, rH4, rH0, rH1, rH2, rW2, rW1, 0, 8)
  208. R_00_15(rH1, rH2, rH3, rH4, rH0, rW3, rW2, 0, 16)
  209. R_00_15(rH4, rH0, rH1, rH2, rH3, rW4, rW3, 0, 24)
  210. R_00_15(rH2, rH3, rH4, rH0, rH1, rW5, rW4, 0, 32)
  211. R_00_15(rH0, rH1, rH2, rH3, rH4, rW6, rW5, 0, 40)
  212. R_00_15(rH3, rH4, rH0, rH1, rH2, rT3, rW6, 0, 48)
  213. R_00_15(rH1, rH2, rH3, rH4, rH0, rT3, rW7, 0, 56)
  214. R_16_19(rH4, rH0, rH1, rH2, rH3, rW0, rW1, rW4, rW6, rW7, 0)
  215. R_16_19(rH2, rH3, rH4, rH0, rH1, rW1, rW2, rW5, rW7, rW0, 2)
  216. R_20_39(rH0, rH1, rH2, rH3, rH4, rW2, rW3, rW6, rW0, rW1, 0)
  217. R_20_39(rH3, rH4, rH0, rH1, rH2, rW3, rW4, rW7, rW1, rW2, 0)
  218. R_20_39(rH1, rH2, rH3, rH4, rH0, rW4, rW5, rW0, rW2, rW3, 0)
  219. R_20_39(rH4, rH0, rH1, rH2, rH3, rW5, rW6, rW1, rW3, rW4, 0)
  220. R_20_39(rH2, rH3, rH4, rH0, rH1, rW6, rW7, rW2, rW4, rW5, 0)
  221. R_20_39(rH0, rH1, rH2, rH3, rH4, rW7, rW0, rW3, rW5, rW6, 0)
  222. R_20_39(rH3, rH4, rH0, rH1, rH2, rW0, rW1, rW4, rW6, rW7, 0)
  223. R_20_39(rH1, rH2, rH3, rH4, rH0, rW1, rW2, rW5, rW7, rW0, 0)
  224. R_20_39(rH4, rH0, rH1, rH2, rH3, rW2, rW3, rW6, rW0, rW1, 0)
  225. R_20_39(rH2, rH3, rH4, rH0, rH1, rW3, rW4, rW7, rW1, rW2, 3)
  226. R_40_59(rH0, rH1, rH2, rH3, rH4, rW4, rW5, rW0, rW2, rW3, 0)
  227. R_40_59(rH3, rH4, rH0, rH1, rH2, rW5, rW6, rW1, rW3, rW4, 0)
  228. R_40_59(rH1, rH2, rH3, rH4, rH0, rW6, rW7, rW2, rW4, rW5, 0)
  229. R_40_59(rH4, rH0, rH1, rH2, rH3, rW7, rW0, rW3, rW5, rW6, 0)
  230. R_40_59(rH2, rH3, rH4, rH0, rH1, rW0, rW1, rW4, rW6, rW7, 0)
  231. R_40_59(rH0, rH1, rH2, rH3, rH4, rW1, rW2, rW5, rW7, rW0, 0)
  232. R_40_59(rH3, rH4, rH0, rH1, rH2, rW2, rW3, rW6, rW0, rW1, 0)
  233. R_40_59(rH1, rH2, rH3, rH4, rH0, rW3, rW4, rW7, rW1, rW2, 0)
  234. R_40_59(rH4, rH0, rH1, rH2, rH3, rW4, rW5, rW0, rW2, rW3, 0)
  235. R_40_59(rH2, rH3, rH4, rH0, rH1, rW5, rW6, rW1, rW3, rW4, 4)
  236. R_60_79(rH0, rH1, rH2, rH3, rH4, rW6, rW7, rW2, rW4, rW5, 0)
  237. R_60_79(rH3, rH4, rH0, rH1, rH2, rW7, rW0, rW3, rW5, rW6, 0)
  238. R_60_79(rH1, rH2, rH3, rH4, rH0, rW0, rW1, rW4, rW6, rW7, 0)
  239. R_60_79(rH4, rH0, rH1, rH2, rH3, rW1, rW2, rW5, rW7, rW0, 0)
  240. R_60_79(rH2, rH3, rH4, rH0, rH1, rW2, rW3, rW6, rW0, rW1, 0)
  241. R_60_79(rH0, rH1, rH2, rH3, rH4, rW3, rW4, rW7, rW1, rW2, 0)
  242. R_60_79(rH3, rH4, rH0, rH1, rH2, rW4, rW5, rW0, rW2, rW3, 0)
  243. lwz rT3,0(rHP)
  244. R_60_79(rH1, rH2, rH3, rH4, rH0, rW5, rW6, rW1, rW3, rW4, 0)
  245. lwz rW1,4(rHP)
  246. R_60_79(rH4, rH0, rH1, rH2, rH3, rW6, rW7, rW2, rW4, rW5, 0)
  247. lwz rW2,8(rHP)
  248. R_60_79(rH2, rH3, rH4, rH0, rH1, rW7, rW0, rW3, rW5, rW6, 0)
  249. lwz rW3,12(rHP)
  250. NEXT_BLOCK
  251. lwz rW4,16(rHP)
  252. add rH0,rH0,rT3
  253. stw rH0,0(rHP)
  254. add rH1,rH1,rW1
  255. stw rH1,4(rHP)
  256. add rH2,rH2,rW2
  257. stw rH2,8(rHP)
  258. add rH3,rH3,rW3
  259. stw rH3,12(rHP)
  260. add rH4,rH4,rW4
  261. stw rH4,16(rHP)
  262. bdnz ppc_spe_sha1_main
  263. FINALIZE
  264. blr
  265. .data
  266. .align 4
  267. PPC_SPE_SHA1_K:
  268. .long 0x5A827999,0x6ED9EBA1,0x8F1BBCDC,0xCA62C1D6