asm-poly1305-amd64.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /* asm-common-amd64.h - Poly1305 macros for AMD64 assembly
  2. *
  3. * Copyright (C) 2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  4. *
  5. * This file is part of Libgcrypt.
  6. *
  7. * Libgcrypt is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as
  9. * published by the Free Software Foundation; either version 2.1 of
  10. * the License, or (at your option) any later version.
  11. *
  12. * Libgcrypt is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #ifndef GCRY_ASM_POLY1305_AMD64_H
  21. #define GCRY_ASM_POLY1305_AMD64_H
  22. #include "asm-common-amd64.h"
  23. /**********************************************************************
  24. poly1305 for stitched chacha20-poly1305 AMD64 implementations
  25. **********************************************************************/
  26. #define POLY_RSTATE %r8
  27. #define POLY_RSRC %r9
  28. #define POLY_R_H0 %rbx
  29. #define POLY_R_H1 %rcx
  30. #define POLY_R_H2 %r10
  31. #define POLY_R_H2d %r10d
  32. #define POLY_R_R0 %r11
  33. #define POLY_R_R1_MUL5 %r12
  34. #define POLY_R_X0_HI %r13
  35. #define POLY_R_X0_LO %r14
  36. #define POLY_R_X1_HI %r15
  37. #define POLY_R_X1_LO %rsi
  38. #define POLY_S_R0 (4 * 4 + 0 * 8)(POLY_RSTATE)
  39. #define POLY_S_R1 (4 * 4 + 1 * 8)(POLY_RSTATE)
  40. #define POLY_S_H0 (4 * 4 + 2 * 8 + 0 * 8)(POLY_RSTATE)
  41. #define POLY_S_H1 (4 * 4 + 2 * 8 + 1 * 8)(POLY_RSTATE)
  42. #define POLY_S_H2d (4 * 4 + 2 * 8 + 2 * 8)(POLY_RSTATE)
  43. #define POLY1305_LOAD_STATE() \
  44. movq POLY_S_H0, POLY_R_H0; \
  45. movq POLY_S_H1, POLY_R_H1; \
  46. movl POLY_S_H2d, POLY_R_H2d; \
  47. movq POLY_S_R0, POLY_R_R0; \
  48. movq POLY_S_R1, POLY_R_R1_MUL5; \
  49. shrq $2, POLY_R_R1_MUL5; \
  50. addq POLY_S_R1, POLY_R_R1_MUL5;
  51. #define POLY1305_STORE_STATE() \
  52. movq POLY_R_H0, POLY_S_H0; \
  53. movq POLY_R_H1, POLY_S_H1; \
  54. movl POLY_R_H2d, POLY_S_H2d;
  55. /* a = h + m */
  56. #define POLY1305_BLOCK_PART1(src_offset) \
  57. addq ((src_offset) + 0 * 8)(POLY_RSRC), POLY_R_H0; \
  58. adcq ((src_offset) + 1 * 8)(POLY_RSRC), POLY_R_H1; \
  59. adcl $1, POLY_R_H2d; \
  60. \
  61. /* h = a * r (partial mod 2^130-5): */ \
  62. \
  63. /* h0 * r1 */ \
  64. movq POLY_R_H0, %rax; \
  65. mulq POLY_S_R1; \
  66. movq %rax, POLY_R_X1_LO; \
  67. movq %rdx, POLY_R_X1_HI;
  68. #define POLY1305_BLOCK_PART2() \
  69. \
  70. /* h0 * r0 */ \
  71. movq POLY_R_H0, %rax; \
  72. mulq POLY_R_R0; \
  73. movq %rax, POLY_R_X0_LO; \
  74. movq %rdx, POLY_R_X0_HI;
  75. #define POLY1305_BLOCK_PART3() \
  76. \
  77. /* h1 * r0 */ \
  78. movq POLY_R_H1, %rax; \
  79. mulq POLY_R_R0; \
  80. addq %rax, POLY_R_X1_LO; \
  81. adcq %rdx, POLY_R_X1_HI; \
  82. \
  83. /* h1 * r1 mod 2^130-5 */ \
  84. movq POLY_R_R1_MUL5, %rax; \
  85. mulq POLY_R_H1;
  86. #define POLY1305_BLOCK_PART4() \
  87. movq POLY_R_H2, POLY_R_H1; \
  88. imulq POLY_R_R1_MUL5, POLY_R_H1; /* h2 * r1 mod 2^130-5 */ \
  89. addq %rax, POLY_R_X0_LO; \
  90. adcq %rdx, POLY_R_X0_HI; \
  91. imulq POLY_R_R0, POLY_R_H2; /* h2 * r0 */ \
  92. addq POLY_R_X1_LO, POLY_R_H1; \
  93. adcq POLY_R_X1_HI, POLY_R_H2;
  94. #define POLY1305_BLOCK_PART5() \
  95. \
  96. /* carry propagation */ \
  97. movq POLY_R_H2, POLY_R_H0; \
  98. andl $3, POLY_R_H2d; \
  99. shrq $2, POLY_R_H0; \
  100. leaq (POLY_R_H0, POLY_R_H0, 4), POLY_R_H0; \
  101. addq POLY_R_X0_LO, POLY_R_H0; \
  102. adcq POLY_R_X0_HI, POLY_R_H1; \
  103. adcl $0, POLY_R_H2d;
  104. #ifdef TESTING_POLY1305_ASM
  105. /* for testing only, mixed C/asm poly1305.c is marginally faster (~2%). */
  106. .align 8
  107. .globl _gcry_poly1305_amd64_ssse3_blocks1
  108. ELF(.type _gcry_poly1305_amd64_ssse3_blocks1,@function;)
  109. _gcry_poly1305_amd64_ssse3_blocks1:
  110. /* input:
  111. * %rdi: poly1305-state
  112. * %rsi: src
  113. * %rdx: nblks
  114. */
  115. pushq %rbp;
  116. movq %rsp, %rbp;
  117. subq $(10 * 8), %rsp;
  118. movq %rbx, (1 * 8)(%rsp);
  119. movq %r12, (2 * 8)(%rsp);
  120. movq %r13, (3 * 8)(%rsp);
  121. movq %r14, (4 * 8)(%rsp);
  122. movq %r15, (5 * 8)(%rsp);
  123. movq %rdx, (8 * 8)(%rsp); # NBLKS
  124. movq %rdi, POLY_RSTATE;
  125. movq %rsi, POLY_RSRC;
  126. POLY1305_LOAD_STATE();
  127. .L_poly1:
  128. POLY1305_BLOCK_PART1(0 * 16);
  129. POLY1305_BLOCK_PART2();
  130. POLY1305_BLOCK_PART3();
  131. POLY1305_BLOCK_PART4();
  132. POLY1305_BLOCK_PART5();
  133. subq $1, (8 * 8)(%rsp); # NBLKS
  134. leaq (16)(POLY_RSRC), POLY_RSRC;
  135. jnz .L_poly1;
  136. POLY1305_STORE_STATE();
  137. movq (1 * 8)(%rsp), %rbx;
  138. movq (2 * 8)(%rsp), %r12;
  139. movq (3 * 8)(%rsp), %r13;
  140. movq (4 * 8)(%rsp), %r14;
  141. movq (5 * 8)(%rsp), %r15;
  142. xorl %eax, %eax;
  143. leave
  144. ret;
  145. #endif
  146. #endif /* GCRY_ASM_POLY1305_AMD64_H */