memmove.S 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. /*
  2. * Copyright (C) 2013 ARM Ltd.
  3. * Copyright (C) 2013 Linaro.
  4. *
  5. * This code is based on glibc cortex strings work originally authored by Linaro
  6. * and re-licensed under GPLv2 for the Linux kernel. The original code can
  7. * be found @
  8. *
  9. * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
  10. * files/head:/src/aarch64/
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. */
  24. #include <linux/linkage.h>
  25. #include <asm/assembler.h>
  26. #include <asm/cache.h>
  27. /*
  28. * Move a buffer from src to test (alignment handled by the hardware).
  29. * If dest <= src, call memcpy, otherwise copy in reverse order.
  30. *
  31. * Parameters:
  32. * x0 - dest
  33. * x1 - src
  34. * x2 - n
  35. * Returns:
  36. * x0 - dest
  37. */
  38. dstin .req x0
  39. src .req x1
  40. count .req x2
  41. tmp1 .req x3
  42. tmp1w .req w3
  43. tmp2 .req x4
  44. tmp2w .req w4
  45. tmp3 .req x5
  46. tmp3w .req w5
  47. dst .req x6
  48. A_l .req x7
  49. A_h .req x8
  50. B_l .req x9
  51. B_h .req x10
  52. C_l .req x11
  53. C_h .req x12
  54. D_l .req x13
  55. D_h .req x14
  56. .weak memmove
  57. ENTRY(__memmove)
  58. ENTRY(memmove)
  59. prfm pldl1strm, [src, #L1_CACHE_BYTES]
  60. cmp dstin, src
  61. b.lo __memcpy
  62. add tmp1, src, count
  63. cmp dstin, tmp1
  64. b.hs __memcpy /* No overlap. */
  65. add dst, dstin, count
  66. add src, src, count
  67. cmp count, #16
  68. b.lo .Ltail15 /*probably non-alignment accesses.*/
  69. ands tmp2, src, #15 /* Bytes to reach alignment. */
  70. b.eq .LSrcAligned
  71. sub count, count, tmp2
  72. /*
  73. * process the aligned offset length to make the src aligned firstly.
  74. * those extra instructions' cost is acceptable. It also make the
  75. * coming accesses are based on aligned address.
  76. */
  77. tbz tmp2, #0, 1f
  78. ldrb tmp1w, [src, #-1]!
  79. strb tmp1w, [dst, #-1]!
  80. 1:
  81. tbz tmp2, #1, 2f
  82. ldrh tmp1w, [src, #-2]!
  83. strh tmp1w, [dst, #-2]!
  84. 2:
  85. tbz tmp2, #2, 3f
  86. ldr tmp1w, [src, #-4]!
  87. str tmp1w, [dst, #-4]!
  88. 3:
  89. tbz tmp2, #3, .LSrcAligned
  90. ldr tmp1, [src, #-8]!
  91. str tmp1, [dst, #-8]!
  92. .LSrcAligned:
  93. cmp count, #64
  94. b.ge .Lcpy_over64
  95. /*
  96. * Deal with small copies quickly by dropping straight into the
  97. * exit block.
  98. */
  99. .Ltail63:
  100. /*
  101. * Copy up to 48 bytes of data. At this point we only need the
  102. * bottom 6 bits of count to be accurate.
  103. */
  104. ands tmp1, count, #0x30
  105. b.eq .Ltail15
  106. cmp tmp1w, #0x20
  107. b.eq 1f
  108. b.lt 2f
  109. ldp A_l, A_h, [src, #-16]!
  110. stp A_l, A_h, [dst, #-16]!
  111. 1:
  112. ldp A_l, A_h, [src, #-16]!
  113. stp A_l, A_h, [dst, #-16]!
  114. 2:
  115. ldp A_l, A_h, [src, #-16]!
  116. stp A_l, A_h, [dst, #-16]!
  117. .Ltail15:
  118. tbz count, #3, 1f
  119. ldr tmp1, [src, #-8]!
  120. str tmp1, [dst, #-8]!
  121. 1:
  122. tbz count, #2, 2f
  123. ldr tmp1w, [src, #-4]!
  124. str tmp1w, [dst, #-4]!
  125. 2:
  126. tbz count, #1, 3f
  127. ldrh tmp1w, [src, #-2]!
  128. strh tmp1w, [dst, #-2]!
  129. 3:
  130. tbz count, #0, .Lexitfunc
  131. ldrb tmp1w, [src, #-1]
  132. strb tmp1w, [dst, #-1]
  133. .Lexitfunc:
  134. ret
  135. .Lcpy_over64:
  136. subs count, count, #128
  137. b.ge .Lcpy_body_large
  138. /*
  139. * Less than 128 bytes to copy, so handle 64 bytes here and then jump
  140. * to the tail.
  141. */
  142. ldp A_l, A_h, [src, #-16]
  143. stp A_l, A_h, [dst, #-16]
  144. ldp B_l, B_h, [src, #-32]
  145. ldp C_l, C_h, [src, #-48]
  146. stp B_l, B_h, [dst, #-32]
  147. stp C_l, C_h, [dst, #-48]
  148. ldp D_l, D_h, [src, #-64]!
  149. stp D_l, D_h, [dst, #-64]!
  150. tst count, #0x3f
  151. b.ne .Ltail63
  152. ret
  153. /*
  154. * Critical loop. Start at a new cache line boundary. Assuming
  155. * 64 bytes per line this ensures the entire loop is in one line.
  156. */
  157. .p2align L1_CACHE_SHIFT
  158. .Lcpy_body_large:
  159. /* pre-load 64 bytes data. */
  160. ldp A_l, A_h, [src, #-16]
  161. ldp B_l, B_h, [src, #-32]
  162. ldp C_l, C_h, [src, #-48]
  163. ldp D_l, D_h, [src, #-64]!
  164. 1:
  165. /*
  166. * interlace the load of next 64 bytes data block with store of the last
  167. * loaded 64 bytes data.
  168. */
  169. stp A_l, A_h, [dst, #-16]
  170. ldp A_l, A_h, [src, #-16]
  171. stp B_l, B_h, [dst, #-32]
  172. ldp B_l, B_h, [src, #-32]
  173. stp C_l, C_h, [dst, #-48]
  174. ldp C_l, C_h, [src, #-48]
  175. stp D_l, D_h, [dst, #-64]!
  176. ldp D_l, D_h, [src, #-64]!
  177. prfm pldl1strm, [src, #(4*L1_CACHE_BYTES)]
  178. subs count, count, #64
  179. b.ge 1b
  180. stp A_l, A_h, [dst, #-16]
  181. stp B_l, B_h, [dst, #-32]
  182. stp C_l, C_h, [dst, #-48]
  183. stp D_l, D_h, [dst, #-64]!
  184. tst count, #0x3f
  185. b.ne .Ltail63
  186. ret
  187. ENDPIPROC(memmove)
  188. ENDPROC(__memmove)