sha256_mb_mgr_submit_avx2.S 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /*
  2. * Buffer submit code for multi buffer SHA256 algorithm
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * Copyright(c) 2016 Intel Corporation.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of version 2 of the GNU General Public License as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * Contact Information:
  21. * Megha Dey <megha.dey@linux.intel.com>
  22. *
  23. * BSD LICENSE
  24. *
  25. * Copyright(c) 2016 Intel Corporation.
  26. *
  27. * Redistribution and use in source and binary forms, with or without
  28. * modification, are permitted provided that the following conditions
  29. * are met:
  30. *
  31. * * Redistributions of source code must retain the above copyright
  32. * notice, this list of conditions and the following disclaimer.
  33. * * Redistributions in binary form must reproduce the above copyright
  34. * notice, this list of conditions and the following disclaimer in
  35. * the documentation and/or other materials provided with the
  36. * distribution.
  37. * * Neither the name of Intel Corporation nor the names of its
  38. * contributors may be used to endorse or promote products derived
  39. * from this software without specific prior written permission.
  40. *
  41. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  42. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  43. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  44. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  45. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  46. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  47. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  48. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  49. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  50. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  51. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  52. */
  53. #include <linux/linkage.h>
  54. #include <asm/frame.h>
  55. #include "sha256_mb_mgr_datastruct.S"
  56. .extern sha256_x8_avx2
  57. # LINUX register definitions
  58. arg1 = %rdi
  59. arg2 = %rsi
  60. size_offset = %rcx
  61. tmp2 = %rcx
  62. extra_blocks = %rdx
  63. # Common definitions
  64. #define state arg1
  65. #define job %rsi
  66. #define len2 arg2
  67. #define p2 arg2
  68. # idx must be a register not clobberred by sha1_x8_avx2
  69. idx = %r8
  70. DWORD_idx = %r8d
  71. last_len = %r8
  72. p = %r11
  73. start_offset = %r11
  74. unused_lanes = %rbx
  75. BYTE_unused_lanes = %bl
  76. job_rax = %rax
  77. len = %rax
  78. DWORD_len = %eax
  79. lane = %r12
  80. tmp3 = %r12
  81. tmp = %r9
  82. DWORD_tmp = %r9d
  83. lane_data = %r10
  84. # JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job)
  85. # arg 1 : rcx : state
  86. # arg 2 : rdx : job
  87. ENTRY(sha256_mb_mgr_submit_avx2)
  88. FRAME_BEGIN
  89. push %rbx
  90. push %r12
  91. mov _unused_lanes(state), unused_lanes
  92. mov unused_lanes, lane
  93. and $0xF, lane
  94. shr $4, unused_lanes
  95. imul $_LANE_DATA_size, lane, lane_data
  96. movl $STS_BEING_PROCESSED, _status(job)
  97. lea _ldata(state, lane_data), lane_data
  98. mov unused_lanes, _unused_lanes(state)
  99. movl _len(job), DWORD_len
  100. mov job, _job_in_lane(lane_data)
  101. shl $4, len
  102. or lane, len
  103. movl DWORD_len, _lens(state , lane, 4)
  104. # Load digest words from result_digest
  105. vmovdqu _result_digest(job), %xmm0
  106. vmovdqu _result_digest+1*16(job), %xmm1
  107. vmovd %xmm0, _args_digest(state, lane, 4)
  108. vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
  109. vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
  110. vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
  111. vmovd %xmm1, _args_digest+4*32(state , lane, 4)
  112. vpextrd $1, %xmm1, _args_digest+5*32(state , lane, 4)
  113. vpextrd $2, %xmm1, _args_digest+6*32(state , lane, 4)
  114. vpextrd $3, %xmm1, _args_digest+7*32(state , lane, 4)
  115. mov _buffer(job), p
  116. mov p, _args_data_ptr(state, lane, 8)
  117. cmp $0xF, unused_lanes
  118. jne return_null
  119. start_loop:
  120. # Find min length
  121. vmovdqa _lens(state), %xmm0
  122. vmovdqa _lens+1*16(state), %xmm1
  123. vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
  124. vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
  125. vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
  126. vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
  127. vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
  128. vmovd %xmm2, DWORD_idx
  129. mov idx, len2
  130. and $0xF, idx
  131. shr $4, len2
  132. jz len_is_0
  133. vpand clear_low_nibble(%rip), %xmm2, %xmm2
  134. vpshufd $0, %xmm2, %xmm2
  135. vpsubd %xmm2, %xmm0, %xmm0
  136. vpsubd %xmm2, %xmm1, %xmm1
  137. vmovdqa %xmm0, _lens + 0*16(state)
  138. vmovdqa %xmm1, _lens + 1*16(state)
  139. # "state" and "args" are the same address, arg1
  140. # len is arg2
  141. call sha256_x8_avx2
  142. # state and idx are intact
  143. len_is_0:
  144. # process completed job "idx"
  145. imul $_LANE_DATA_size, idx, lane_data
  146. lea _ldata(state, lane_data), lane_data
  147. mov _job_in_lane(lane_data), job_rax
  148. mov _unused_lanes(state), unused_lanes
  149. movq $0, _job_in_lane(lane_data)
  150. movl $STS_COMPLETED, _status(job_rax)
  151. shl $4, unused_lanes
  152. or idx, unused_lanes
  153. mov unused_lanes, _unused_lanes(state)
  154. movl $0xFFFFFFFF, _lens(state,idx,4)
  155. vmovd _args_digest(state, idx, 4), %xmm0
  156. vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
  157. vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
  158. vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
  159. vmovd _args_digest+4*32(state, idx, 4), %xmm1
  160. vpinsrd $1, _args_digest+5*32(state , idx, 4), %xmm1, %xmm1
  161. vpinsrd $2, _args_digest+6*32(state , idx, 4), %xmm1, %xmm1
  162. vpinsrd $3, _args_digest+7*32(state , idx, 4), %xmm1, %xmm1
  163. vmovdqu %xmm0, _result_digest(job_rax)
  164. vmovdqu %xmm1, _result_digest+1*16(job_rax)
  165. return:
  166. pop %r12
  167. pop %rbx
  168. FRAME_END
  169. ret
  170. return_null:
  171. xor job_rax, job_rax
  172. jmp return
  173. ENDPROC(sha256_mb_mgr_submit_avx2)
  174. .section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
  175. .align 16
  176. clear_low_nibble:
  177. .octa 0x000000000000000000000000FFFFFFF0