twofish-avx-x86_64-asm_64.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. /*
  2. * Twofish Cipher 8-way parallel algorithm (AVX/x86_64)
  3. *
  4. * Copyright (C) 2012 Johannes Goetzfried
  5. * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
  6. *
  7. * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22. * USA
  23. *
  24. */
  25. #include <linux/linkage.h>
  26. #include <asm/frame.h>
  27. #include "glue_helper-asm-avx.S"
  28. .file "twofish-avx-x86_64-asm_64.S"
  29. .data
  30. .align 16
  31. .Lbswap128_mask:
  32. .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  33. .Lxts_gf128mul_and_shl1_mask:
  34. .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
  35. .text
  36. /* structure of crypto context */
  37. #define s0 0
  38. #define s1 1024
  39. #define s2 2048
  40. #define s3 3072
  41. #define w 4096
  42. #define k 4128
  43. /**********************************************************************
  44. 8-way AVX twofish
  45. **********************************************************************/
  46. #define CTX %rdi
  47. #define RA1 %xmm0
  48. #define RB1 %xmm1
  49. #define RC1 %xmm2
  50. #define RD1 %xmm3
  51. #define RA2 %xmm4
  52. #define RB2 %xmm5
  53. #define RC2 %xmm6
  54. #define RD2 %xmm7
  55. #define RX0 %xmm8
  56. #define RY0 %xmm9
  57. #define RX1 %xmm10
  58. #define RY1 %xmm11
  59. #define RK1 %xmm12
  60. #define RK2 %xmm13
  61. #define RT %xmm14
  62. #define RR %xmm15
  63. #define RID1 %rbp
  64. #define RID1d %ebp
  65. #define RID2 %rsi
  66. #define RID2d %esi
  67. #define RGI1 %rdx
  68. #define RGI1bl %dl
  69. #define RGI1bh %dh
  70. #define RGI2 %rcx
  71. #define RGI2bl %cl
  72. #define RGI2bh %ch
  73. #define RGI3 %rax
  74. #define RGI3bl %al
  75. #define RGI3bh %ah
  76. #define RGI4 %rbx
  77. #define RGI4bl %bl
  78. #define RGI4bh %bh
  79. #define RGS1 %r8
  80. #define RGS1d %r8d
  81. #define RGS2 %r9
  82. #define RGS2d %r9d
  83. #define RGS3 %r10
  84. #define RGS3d %r10d
  85. #define lookup_32bit(t0, t1, t2, t3, src, dst, interleave_op, il_reg) \
  86. movzbl src ## bl, RID1d; \
  87. movzbl src ## bh, RID2d; \
  88. shrq $16, src; \
  89. movl t0(CTX, RID1, 4), dst ## d; \
  90. movl t1(CTX, RID2, 4), RID2d; \
  91. movzbl src ## bl, RID1d; \
  92. xorl RID2d, dst ## d; \
  93. movzbl src ## bh, RID2d; \
  94. interleave_op(il_reg); \
  95. xorl t2(CTX, RID1, 4), dst ## d; \
  96. xorl t3(CTX, RID2, 4), dst ## d;
  97. #define dummy(d) /* do nothing */
  98. #define shr_next(reg) \
  99. shrq $16, reg;
  100. #define G(gi1, gi2, x, t0, t1, t2, t3) \
  101. lookup_32bit(t0, t1, t2, t3, ##gi1, RGS1, shr_next, ##gi1); \
  102. lookup_32bit(t0, t1, t2, t3, ##gi2, RGS3, shr_next, ##gi2); \
  103. \
  104. lookup_32bit(t0, t1, t2, t3, ##gi1, RGS2, dummy, none); \
  105. shlq $32, RGS2; \
  106. orq RGS1, RGS2; \
  107. lookup_32bit(t0, t1, t2, t3, ##gi2, RGS1, dummy, none); \
  108. shlq $32, RGS1; \
  109. orq RGS1, RGS3;
  110. #define round_head_2(a, b, x1, y1, x2, y2) \
  111. vmovq b ## 1, RGI3; \
  112. vpextrq $1, b ## 1, RGI4; \
  113. \
  114. G(RGI1, RGI2, x1, s0, s1, s2, s3); \
  115. vmovq a ## 2, RGI1; \
  116. vpextrq $1, a ## 2, RGI2; \
  117. vmovq RGS2, x1; \
  118. vpinsrq $1, RGS3, x1, x1; \
  119. \
  120. G(RGI3, RGI4, y1, s1, s2, s3, s0); \
  121. vmovq b ## 2, RGI3; \
  122. vpextrq $1, b ## 2, RGI4; \
  123. vmovq RGS2, y1; \
  124. vpinsrq $1, RGS3, y1, y1; \
  125. \
  126. G(RGI1, RGI2, x2, s0, s1, s2, s3); \
  127. vmovq RGS2, x2; \
  128. vpinsrq $1, RGS3, x2, x2; \
  129. \
  130. G(RGI3, RGI4, y2, s1, s2, s3, s0); \
  131. vmovq RGS2, y2; \
  132. vpinsrq $1, RGS3, y2, y2;
  133. #define encround_tail(a, b, c, d, x, y, prerotate) \
  134. vpaddd x, y, x; \
  135. vpaddd x, RK1, RT;\
  136. prerotate(b); \
  137. vpxor RT, c, c; \
  138. vpaddd y, x, y; \
  139. vpaddd y, RK2, y; \
  140. vpsrld $1, c, RT; \
  141. vpslld $(32 - 1), c, c; \
  142. vpor c, RT, c; \
  143. vpxor d, y, d; \
  144. #define decround_tail(a, b, c, d, x, y, prerotate) \
  145. vpaddd x, y, x; \
  146. vpaddd x, RK1, RT;\
  147. prerotate(a); \
  148. vpxor RT, c, c; \
  149. vpaddd y, x, y; \
  150. vpaddd y, RK2, y; \
  151. vpxor d, y, d; \
  152. vpsrld $1, d, y; \
  153. vpslld $(32 - 1), d, d; \
  154. vpor d, y, d; \
  155. #define rotate_1l(x) \
  156. vpslld $1, x, RR; \
  157. vpsrld $(32 - 1), x, x; \
  158. vpor x, RR, x;
  159. #define preload_rgi(c) \
  160. vmovq c, RGI1; \
  161. vpextrq $1, c, RGI2;
  162. #define encrypt_round(n, a, b, c, d, preload, prerotate) \
  163. vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
  164. vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
  165. round_head_2(a, b, RX0, RY0, RX1, RY1); \
  166. encround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \
  167. preload(c ## 1); \
  168. encround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate);
  169. #define decrypt_round(n, a, b, c, d, preload, prerotate) \
  170. vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
  171. vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
  172. round_head_2(a, b, RX0, RY0, RX1, RY1); \
  173. decround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \
  174. preload(c ## 1); \
  175. decround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate);
  176. #define encrypt_cycle(n) \
  177. encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
  178. encrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l);
  179. #define encrypt_cycle_last(n) \
  180. encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
  181. encrypt_round(((2*n) + 1), RC, RD, RA, RB, dummy, dummy);
  182. #define decrypt_cycle(n) \
  183. decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
  184. decrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l);
  185. #define decrypt_cycle_last(n) \
  186. decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
  187. decrypt_round((2*n), RA, RB, RC, RD, dummy, dummy);
  188. #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
  189. vpunpckldq x1, x0, t0; \
  190. vpunpckhdq x1, x0, t2; \
  191. vpunpckldq x3, x2, t1; \
  192. vpunpckhdq x3, x2, x3; \
  193. \
  194. vpunpcklqdq t1, t0, x0; \
  195. vpunpckhqdq t1, t0, x1; \
  196. vpunpcklqdq x3, t2, x2; \
  197. vpunpckhqdq x3, t2, x3;
  198. #define inpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \
  199. vpxor x0, wkey, x0; \
  200. vpxor x1, wkey, x1; \
  201. vpxor x2, wkey, x2; \
  202. vpxor x3, wkey, x3; \
  203. \
  204. transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
  205. #define outunpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \
  206. transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
  207. \
  208. vpxor x0, wkey, x0; \
  209. vpxor x1, wkey, x1; \
  210. vpxor x2, wkey, x2; \
  211. vpxor x3, wkey, x3;
  212. .align 8
  213. __twofish_enc_blk8:
  214. /* input:
  215. * %rdi: ctx, CTX
  216. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
  217. * output:
  218. * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks
  219. */
  220. vmovdqu w(CTX), RK1;
  221. pushq %rbp;
  222. pushq %rbx;
  223. pushq %rcx;
  224. inpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
  225. preload_rgi(RA1);
  226. rotate_1l(RD1);
  227. inpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
  228. rotate_1l(RD2);
  229. encrypt_cycle(0);
  230. encrypt_cycle(1);
  231. encrypt_cycle(2);
  232. encrypt_cycle(3);
  233. encrypt_cycle(4);
  234. encrypt_cycle(5);
  235. encrypt_cycle(6);
  236. encrypt_cycle_last(7);
  237. vmovdqu (w+4*4)(CTX), RK1;
  238. popq %rcx;
  239. popq %rbx;
  240. popq %rbp;
  241. outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
  242. outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
  243. ret;
  244. ENDPROC(__twofish_enc_blk8)
  245. .align 8
  246. __twofish_dec_blk8:
  247. /* input:
  248. * %rdi: ctx, CTX
  249. * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks
  250. * output:
  251. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
  252. */
  253. vmovdqu (w+4*4)(CTX), RK1;
  254. pushq %rbp;
  255. pushq %rbx;
  256. inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
  257. preload_rgi(RC1);
  258. rotate_1l(RA1);
  259. inpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
  260. rotate_1l(RA2);
  261. decrypt_cycle(7);
  262. decrypt_cycle(6);
  263. decrypt_cycle(5);
  264. decrypt_cycle(4);
  265. decrypt_cycle(3);
  266. decrypt_cycle(2);
  267. decrypt_cycle(1);
  268. decrypt_cycle_last(0);
  269. vmovdqu (w)(CTX), RK1;
  270. popq %rbx;
  271. popq %rbp;
  272. outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
  273. outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
  274. ret;
  275. ENDPROC(__twofish_dec_blk8)
  276. ENTRY(twofish_ecb_enc_8way)
  277. /* input:
  278. * %rdi: ctx, CTX
  279. * %rsi: dst
  280. * %rdx: src
  281. */
  282. FRAME_BEGIN
  283. movq %rsi, %r11;
  284. load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  285. call __twofish_enc_blk8;
  286. store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
  287. FRAME_END
  288. ret;
  289. ENDPROC(twofish_ecb_enc_8way)
  290. ENTRY(twofish_ecb_dec_8way)
  291. /* input:
  292. * %rdi: ctx, CTX
  293. * %rsi: dst
  294. * %rdx: src
  295. */
  296. FRAME_BEGIN
  297. movq %rsi, %r11;
  298. load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
  299. call __twofish_dec_blk8;
  300. store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  301. FRAME_END
  302. ret;
  303. ENDPROC(twofish_ecb_dec_8way)
  304. ENTRY(twofish_cbc_dec_8way)
  305. /* input:
  306. * %rdi: ctx, CTX
  307. * %rsi: dst
  308. * %rdx: src
  309. */
  310. FRAME_BEGIN
  311. pushq %r12;
  312. movq %rsi, %r11;
  313. movq %rdx, %r12;
  314. load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
  315. call __twofish_dec_blk8;
  316. store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  317. popq %r12;
  318. FRAME_END
  319. ret;
  320. ENDPROC(twofish_cbc_dec_8way)
  321. ENTRY(twofish_ctr_8way)
  322. /* input:
  323. * %rdi: ctx, CTX
  324. * %rsi: dst
  325. * %rdx: src
  326. * %rcx: iv (little endian, 128bit)
  327. */
  328. FRAME_BEGIN
  329. pushq %r12;
  330. movq %rsi, %r11;
  331. movq %rdx, %r12;
  332. load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
  333. RD2, RX0, RX1, RY0);
  334. call __twofish_enc_blk8;
  335. store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
  336. popq %r12;
  337. FRAME_END
  338. ret;
  339. ENDPROC(twofish_ctr_8way)
  340. ENTRY(twofish_xts_enc_8way)
  341. /* input:
  342. * %rdi: ctx, CTX
  343. * %rsi: dst
  344. * %rdx: src
  345. * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
  346. */
  347. FRAME_BEGIN
  348. movq %rsi, %r11;
  349. /* regs <= src, dst <= IVs, regs <= regs xor IVs */
  350. load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
  351. RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask);
  352. call __twofish_enc_blk8;
  353. /* dst <= regs xor IVs(in dst) */
  354. store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
  355. FRAME_END
  356. ret;
  357. ENDPROC(twofish_xts_enc_8way)
  358. ENTRY(twofish_xts_dec_8way)
  359. /* input:
  360. * %rdi: ctx, CTX
  361. * %rsi: dst
  362. * %rdx: src
  363. * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
  364. */
  365. FRAME_BEGIN
  366. movq %rsi, %r11;
  367. /* regs <= src, dst <= IVs, regs <= regs xor IVs */
  368. load_xts_8way(%rcx, %rdx, %rsi, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2,
  369. RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask);
  370. call __twofish_dec_blk8;
  371. /* dst <= regs xor IVs(in dst) */
  372. store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  373. FRAME_END
  374. ret;
  375. ENDPROC(twofish_xts_dec_8way)