cache.S 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /*
  2. * Cache maintenance
  3. *
  4. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/errno.h>
  20. #include <linux/linkage.h>
  21. #include <linux/init.h>
  22. #include <asm/assembler.h>
  23. #include <asm/cpufeature.h>
  24. #include <asm/alternative.h>
  25. #include <asm/asm-uaccess.h>
  26. /*
  27. * flush_icache_range(start,end)
  28. *
  29. * Ensure that the I and D caches are coherent within specified region.
  30. * This is typically used when code has been written to a memory region,
  31. * and will be executed.
  32. *
  33. * - start - virtual start address of region
  34. * - end - virtual end address of region
  35. */
  36. ENTRY(__flush_icache_range)
  37. /* FALLTHROUGH */
  38. /*
  39. * __flush_cache_user_range(start,end)
  40. *
  41. * Ensure that the I and D caches are coherent within specified region.
  42. * This is typically used when code has been written to a memory region,
  43. * and will be executed.
  44. *
  45. * - start - virtual start address of region
  46. * - end - virtual end address of region
  47. */
  48. ENTRY(__flush_cache_user_range)
  49. uaccess_ttbr0_enable x2, x3, x4
  50. alternative_if ARM64_HAS_CACHE_IDC
  51. dsb ishst
  52. b 7f
  53. alternative_else_nop_endif
  54. dcache_line_size x2, x3
  55. sub x3, x2, #1
  56. bic x4, x0, x3
  57. 1:
  58. user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
  59. add x4, x4, x2
  60. cmp x4, x1
  61. b.lo 1b
  62. dsb ish
  63. 7:
  64. alternative_if ARM64_HAS_CACHE_DIC
  65. isb
  66. b 8f
  67. alternative_else_nop_endif
  68. invalidate_icache_by_line x0, x1, x2, x3, 9f
  69. 8: mov x0, #0
  70. 1:
  71. uaccess_ttbr0_disable x1, x2
  72. ret
  73. 9:
  74. mov x0, #-EFAULT
  75. b 1b
  76. ENDPROC(__flush_icache_range)
  77. ENDPROC(__flush_cache_user_range)
  78. /*
  79. * invalidate_icache_range(start,end)
  80. *
  81. * Ensure that the I cache is invalid within specified region.
  82. *
  83. * - start - virtual start address of region
  84. * - end - virtual end address of region
  85. */
  86. ENTRY(invalidate_icache_range)
  87. alternative_if ARM64_HAS_CACHE_DIC
  88. mov x0, xzr
  89. isb
  90. ret
  91. alternative_else_nop_endif
  92. uaccess_ttbr0_enable x2, x3, x4
  93. invalidate_icache_by_line x0, x1, x2, x3, 2f
  94. mov x0, xzr
  95. 1:
  96. uaccess_ttbr0_disable x1, x2
  97. ret
  98. 2:
  99. mov x0, #-EFAULT
  100. b 1b
  101. ENDPROC(invalidate_icache_range)
  102. /*
  103. * __flush_dcache_area(kaddr, size)
  104. *
  105. * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
  106. * are cleaned and invalidated to the PoC.
  107. *
  108. * - kaddr - kernel address
  109. * - size - size in question
  110. */
  111. ENTRY(__flush_dcache_area)
  112. dcache_by_line_op civac, sy, x0, x1, x2, x3
  113. ret
  114. ENDPIPROC(__flush_dcache_area)
  115. /*
  116. * __clean_dcache_area_pou(kaddr, size)
  117. *
  118. * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
  119. * are cleaned to the PoU.
  120. *
  121. * - kaddr - kernel address
  122. * - size - size in question
  123. */
  124. ENTRY(__clean_dcache_area_pou)
  125. alternative_if ARM64_HAS_CACHE_IDC
  126. dsb ishst
  127. ret
  128. alternative_else_nop_endif
  129. dcache_by_line_op cvau, ish, x0, x1, x2, x3
  130. ret
  131. ENDPROC(__clean_dcache_area_pou)
  132. /*
  133. * __inval_dcache_area(kaddr, size)
  134. *
  135. * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
  136. * are invalidated. Any partial lines at the ends of the interval are
  137. * also cleaned to PoC to prevent data loss.
  138. *
  139. * - kaddr - kernel address
  140. * - size - size in question
  141. */
  142. ENTRY(__inval_dcache_area)
  143. /* FALLTHROUGH */
  144. /*
  145. * __dma_inv_area(start, size)
  146. * - start - virtual start address of region
  147. * - size - size in question
  148. */
  149. __dma_inv_area:
  150. add x1, x1, x0
  151. dcache_line_size x2, x3
  152. sub x3, x2, #1
  153. tst x1, x3 // end cache line aligned?
  154. bic x1, x1, x3
  155. b.eq 1f
  156. dc civac, x1 // clean & invalidate D / U line
  157. 1: tst x0, x3 // start cache line aligned?
  158. bic x0, x0, x3
  159. b.eq 2f
  160. dc civac, x0 // clean & invalidate D / U line
  161. b 3f
  162. 2: dc ivac, x0 // invalidate D / U line
  163. 3: add x0, x0, x2
  164. cmp x0, x1
  165. b.lo 2b
  166. dsb sy
  167. ret
  168. ENDPIPROC(__inval_dcache_area)
  169. ENDPROC(__dma_inv_area)
  170. /*
  171. * __clean_dcache_area_poc(kaddr, size)
  172. *
  173. * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
  174. * are cleaned to the PoC.
  175. *
  176. * - kaddr - kernel address
  177. * - size - size in question
  178. */
  179. ENTRY(__clean_dcache_area_poc)
  180. /* FALLTHROUGH */
  181. /*
  182. * __dma_clean_area(start, size)
  183. * - start - virtual start address of region
  184. * - size - size in question
  185. */
  186. __dma_clean_area:
  187. dcache_by_line_op cvac, sy, x0, x1, x2, x3
  188. ret
  189. ENDPIPROC(__clean_dcache_area_poc)
  190. ENDPROC(__dma_clean_area)
  191. /*
  192. * __clean_dcache_area_pop(kaddr, size)
  193. *
  194. * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
  195. * are cleaned to the PoP.
  196. *
  197. * - kaddr - kernel address
  198. * - size - size in question
  199. */
  200. ENTRY(__clean_dcache_area_pop)
  201. alternative_if_not ARM64_HAS_DCPOP
  202. b __clean_dcache_area_poc
  203. alternative_else_nop_endif
  204. dcache_by_line_op cvap, sy, x0, x1, x2, x3
  205. ret
  206. ENDPIPROC(__clean_dcache_area_pop)
  207. /*
  208. * __dma_flush_area(start, size)
  209. *
  210. * clean & invalidate D / U line
  211. *
  212. * - start - virtual start address of region
  213. * - size - size in question
  214. */
  215. ENTRY(__dma_flush_area)
  216. dcache_by_line_op civac, sy, x0, x1, x2, x3
  217. ret
  218. ENDPIPROC(__dma_flush_area)
  219. /*
  220. * __dma_map_area(start, size, dir)
  221. * - start - kernel virtual start address
  222. * - size - size of region
  223. * - dir - DMA direction
  224. */
  225. ENTRY(__dma_map_area)
  226. cmp w2, #DMA_FROM_DEVICE
  227. b.eq __dma_inv_area
  228. b __dma_clean_area
  229. ENDPIPROC(__dma_map_area)
  230. /*
  231. * __dma_unmap_area(start, size, dir)
  232. * - start - kernel virtual start address
  233. * - size - size of region
  234. * - dir - DMA direction
  235. */
  236. ENTRY(__dma_unmap_area)
  237. cmp w2, #DMA_TO_DEVICE
  238. b.ne __dma_inv_area
  239. ret
  240. ENDPIPROC(__dma_unmap_area)