cast6-avx-x86_64-asm_64.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512
  1. /*
  2. * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
  3. *
  4. * Copyright (C) 2012 Johannes Goetzfried
  5. * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
  6. *
  7. * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22. * USA
  23. *
  24. */
  25. #include <linux/linkage.h>
  26. #include <asm/frame.h>
  27. #include "glue_helper-asm-avx.S"
  28. .file "cast6-avx-x86_64-asm_64.S"
  29. .extern cast_s1
  30. .extern cast_s2
  31. .extern cast_s3
  32. .extern cast_s4
  33. /* structure of crypto context */
  34. #define km 0
  35. #define kr (12*4*4)
  36. /* s-boxes */
  37. #define s1 cast_s1
  38. #define s2 cast_s2
  39. #define s3 cast_s3
  40. #define s4 cast_s4
  41. /**********************************************************************
  42. 8-way AVX cast6
  43. **********************************************************************/
  44. #define CTX %r15
  45. #define RA1 %xmm0
  46. #define RB1 %xmm1
  47. #define RC1 %xmm2
  48. #define RD1 %xmm3
  49. #define RA2 %xmm4
  50. #define RB2 %xmm5
  51. #define RC2 %xmm6
  52. #define RD2 %xmm7
  53. #define RX %xmm8
  54. #define RKM %xmm9
  55. #define RKR %xmm10
  56. #define RKRF %xmm11
  57. #define RKRR %xmm12
  58. #define R32 %xmm13
  59. #define R1ST %xmm14
  60. #define RTMP %xmm15
  61. #define RID1 %rdi
  62. #define RID1d %edi
  63. #define RID2 %rsi
  64. #define RID2d %esi
  65. #define RGI1 %rdx
  66. #define RGI1bl %dl
  67. #define RGI1bh %dh
  68. #define RGI2 %rcx
  69. #define RGI2bl %cl
  70. #define RGI2bh %ch
  71. #define RGI3 %rax
  72. #define RGI3bl %al
  73. #define RGI3bh %ah
  74. #define RGI4 %rbx
  75. #define RGI4bl %bl
  76. #define RGI4bh %bh
  77. #define RFS1 %r8
  78. #define RFS1d %r8d
  79. #define RFS2 %r9
  80. #define RFS2d %r9d
  81. #define RFS3 %r10
  82. #define RFS3d %r10d
  83. #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
  84. movzbl src ## bh, RID1d; \
  85. movzbl src ## bl, RID2d; \
  86. shrq $16, src; \
  87. movl s1(, RID1, 4), dst ## d; \
  88. op1 s2(, RID2, 4), dst ## d; \
  89. movzbl src ## bh, RID1d; \
  90. movzbl src ## bl, RID2d; \
  91. interleave_op(il_reg); \
  92. op2 s3(, RID1, 4), dst ## d; \
  93. op3 s4(, RID2, 4), dst ## d;
  94. #define dummy(d) /* do nothing */
  95. #define shr_next(reg) \
  96. shrq $16, reg;
  97. #define F_head(a, x, gi1, gi2, op0) \
  98. op0 a, RKM, x; \
  99. vpslld RKRF, x, RTMP; \
  100. vpsrld RKRR, x, x; \
  101. vpor RTMP, x, x; \
  102. \
  103. vmovq x, gi1; \
  104. vpextrq $1, x, gi2;
  105. #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
  106. lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
  107. lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
  108. \
  109. lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
  110. shlq $32, RFS2; \
  111. orq RFS1, RFS2; \
  112. lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
  113. shlq $32, RFS1; \
  114. orq RFS1, RFS3; \
  115. \
  116. vmovq RFS2, x; \
  117. vpinsrq $1, RFS3, x, x;
  118. #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
  119. F_head(b1, RX, RGI1, RGI2, op0); \
  120. F_head(b2, RX, RGI3, RGI4, op0); \
  121. \
  122. F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
  123. F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
  124. \
  125. vpxor a1, RX, a1; \
  126. vpxor a2, RTMP, a2;
  127. #define F1_2(a1, b1, a2, b2) \
  128. F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
  129. #define F2_2(a1, b1, a2, b2) \
  130. F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
  131. #define F3_2(a1, b1, a2, b2) \
  132. F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
  133. #define qop(in, out, f) \
  134. F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
  135. #define get_round_keys(nn) \
  136. vbroadcastss (km+(4*(nn)))(CTX), RKM; \
  137. vpand R1ST, RKR, RKRF; \
  138. vpsubq RKRF, R32, RKRR; \
  139. vpsrldq $1, RKR, RKR;
  140. #define Q(n) \
  141. get_round_keys(4*n+0); \
  142. qop(RD, RC, 1); \
  143. \
  144. get_round_keys(4*n+1); \
  145. qop(RC, RB, 2); \
  146. \
  147. get_round_keys(4*n+2); \
  148. qop(RB, RA, 3); \
  149. \
  150. get_round_keys(4*n+3); \
  151. qop(RA, RD, 1);
  152. #define QBAR(n) \
  153. get_round_keys(4*n+3); \
  154. qop(RA, RD, 1); \
  155. \
  156. get_round_keys(4*n+2); \
  157. qop(RB, RA, 3); \
  158. \
  159. get_round_keys(4*n+1); \
  160. qop(RC, RB, 2); \
  161. \
  162. get_round_keys(4*n+0); \
  163. qop(RD, RC, 1);
  164. #define shuffle(mask) \
  165. vpshufb mask, RKR, RKR;
  166. #define preload_rkr(n, do_mask, mask) \
  167. vbroadcastss .L16_mask, RKR; \
  168. /* add 16-bit rotation to key rotations (mod 32) */ \
  169. vpxor (kr+n*16)(CTX), RKR, RKR; \
  170. do_mask(mask);
  171. #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
  172. vpunpckldq x1, x0, t0; \
  173. vpunpckhdq x1, x0, t2; \
  174. vpunpckldq x3, x2, t1; \
  175. vpunpckhdq x3, x2, x3; \
  176. \
  177. vpunpcklqdq t1, t0, x0; \
  178. vpunpckhqdq t1, t0, x1; \
  179. vpunpcklqdq x3, t2, x2; \
  180. vpunpckhqdq x3, t2, x3;
  181. #define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
  182. vpshufb rmask, x0, x0; \
  183. vpshufb rmask, x1, x1; \
  184. vpshufb rmask, x2, x2; \
  185. vpshufb rmask, x3, x3; \
  186. \
  187. transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
  188. #define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
  189. transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
  190. \
  191. vpshufb rmask, x0, x0; \
  192. vpshufb rmask, x1, x1; \
  193. vpshufb rmask, x2, x2; \
  194. vpshufb rmask, x3, x3;
  195. .section .rodata.cst16, "aM", @progbits, 16
  196. .align 16
  197. .Lxts_gf128mul_and_shl1_mask:
  198. .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
  199. .Lbswap_mask:
  200. .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  201. .Lbswap128_mask:
  202. .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  203. .Lrkr_enc_Q_Q_QBAR_QBAR:
  204. .byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
  205. .Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
  206. .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  207. .Lrkr_dec_Q_Q_Q_Q:
  208. .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
  209. .Lrkr_dec_Q_Q_QBAR_QBAR:
  210. .byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
  211. .Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
  212. .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  213. .section .rodata.cst4.L16_mask, "aM", @progbits, 4
  214. .align 4
  215. .L16_mask:
  216. .byte 16, 16, 16, 16
  217. .section .rodata.cst4.L32_mask, "aM", @progbits, 4
  218. .align 4
  219. .L32_mask:
  220. .byte 32, 0, 0, 0
  221. .section .rodata.cst4.first_mask, "aM", @progbits, 4
  222. .align 4
  223. .Lfirst_mask:
  224. .byte 0x1f, 0, 0, 0
  225. .text
  226. .align 8
  227. __cast6_enc_blk8:
  228. /* input:
  229. * %rdi: ctx
  230. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
  231. * output:
  232. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
  233. */
  234. pushq %r15;
  235. pushq %rbx;
  236. movq %rdi, CTX;
  237. vmovdqa .Lbswap_mask, RKM;
  238. vmovd .Lfirst_mask, R1ST;
  239. vmovd .L32_mask, R32;
  240. inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  241. inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  242. preload_rkr(0, dummy, none);
  243. Q(0);
  244. Q(1);
  245. Q(2);
  246. Q(3);
  247. preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
  248. Q(4);
  249. Q(5);
  250. QBAR(6);
  251. QBAR(7);
  252. preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
  253. QBAR(8);
  254. QBAR(9);
  255. QBAR(10);
  256. QBAR(11);
  257. popq %rbx;
  258. popq %r15;
  259. vmovdqa .Lbswap_mask, RKM;
  260. outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  261. outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  262. ret;
  263. ENDPROC(__cast6_enc_blk8)
  264. .align 8
  265. __cast6_dec_blk8:
  266. /* input:
  267. * %rdi: ctx
  268. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
  269. * output:
  270. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
  271. */
  272. pushq %r15;
  273. pushq %rbx;
  274. movq %rdi, CTX;
  275. vmovdqa .Lbswap_mask, RKM;
  276. vmovd .Lfirst_mask, R1ST;
  277. vmovd .L32_mask, R32;
  278. inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  279. inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  280. preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
  281. Q(11);
  282. Q(10);
  283. Q(9);
  284. Q(8);
  285. preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
  286. Q(7);
  287. Q(6);
  288. QBAR(5);
  289. QBAR(4);
  290. preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
  291. QBAR(3);
  292. QBAR(2);
  293. QBAR(1);
  294. QBAR(0);
  295. popq %rbx;
  296. popq %r15;
  297. vmovdqa .Lbswap_mask, RKM;
  298. outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  299. outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  300. ret;
  301. ENDPROC(__cast6_dec_blk8)
  302. ENTRY(cast6_ecb_enc_8way)
  303. /* input:
  304. * %rdi: ctx
  305. * %rsi: dst
  306. * %rdx: src
  307. */
  308. FRAME_BEGIN
  309. pushq %r15;
  310. movq %rdi, CTX;
  311. movq %rsi, %r11;
  312. load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  313. call __cast6_enc_blk8;
  314. store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  315. popq %r15;
  316. FRAME_END
  317. ret;
  318. ENDPROC(cast6_ecb_enc_8way)
  319. ENTRY(cast6_ecb_dec_8way)
  320. /* input:
  321. * %rdi: ctx
  322. * %rsi: dst
  323. * %rdx: src
  324. */
  325. FRAME_BEGIN
  326. pushq %r15;
  327. movq %rdi, CTX;
  328. movq %rsi, %r11;
  329. load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  330. call __cast6_dec_blk8;
  331. store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  332. popq %r15;
  333. FRAME_END
  334. ret;
  335. ENDPROC(cast6_ecb_dec_8way)
  336. ENTRY(cast6_cbc_dec_8way)
  337. /* input:
  338. * %rdi: ctx
  339. * %rsi: dst
  340. * %rdx: src
  341. */
  342. FRAME_BEGIN
  343. pushq %r12;
  344. pushq %r15;
  345. movq %rdi, CTX;
  346. movq %rsi, %r11;
  347. movq %rdx, %r12;
  348. load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  349. call __cast6_dec_blk8;
  350. store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  351. popq %r15;
  352. popq %r12;
  353. FRAME_END
  354. ret;
  355. ENDPROC(cast6_cbc_dec_8way)
  356. ENTRY(cast6_ctr_8way)
  357. /* input:
  358. * %rdi: ctx, CTX
  359. * %rsi: dst
  360. * %rdx: src
  361. * %rcx: iv (little endian, 128bit)
  362. */
  363. FRAME_BEGIN
  364. pushq %r12;
  365. pushq %r15
  366. movq %rdi, CTX;
  367. movq %rsi, %r11;
  368. movq %rdx, %r12;
  369. load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
  370. RD2, RX, RKR, RKM);
  371. call __cast6_enc_blk8;
  372. store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  373. popq %r15;
  374. popq %r12;
  375. FRAME_END
  376. ret;
  377. ENDPROC(cast6_ctr_8way)
  378. ENTRY(cast6_xts_enc_8way)
  379. /* input:
  380. * %rdi: ctx, CTX
  381. * %rsi: dst
  382. * %rdx: src
  383. * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
  384. */
  385. FRAME_BEGIN
  386. pushq %r15;
  387. movq %rdi, CTX
  388. movq %rsi, %r11;
  389. /* regs <= src, dst <= IVs, regs <= regs xor IVs */
  390. load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
  391. RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
  392. call __cast6_enc_blk8;
  393. /* dst <= regs xor IVs(in dst) */
  394. store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  395. popq %r15;
  396. FRAME_END
  397. ret;
  398. ENDPROC(cast6_xts_enc_8way)
  399. ENTRY(cast6_xts_dec_8way)
  400. /* input:
  401. * %rdi: ctx, CTX
  402. * %rsi: dst
  403. * %rdx: src
  404. * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
  405. */
  406. FRAME_BEGIN
  407. pushq %r15;
  408. movq %rdi, CTX
  409. movq %rsi, %r11;
  410. /* regs <= src, dst <= IVs, regs <= regs xor IVs */
  411. load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
  412. RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
  413. call __cast6_dec_blk8;
  414. /* dst <= regs xor IVs(in dst) */
  415. store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  416. popq %r15;
  417. FRAME_END
  418. ret;
  419. ENDPROC(cast6_xts_dec_8way)