cache-v6.S 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. /*
  2. * linux/arch/arm/mm/cache-v6.S
  3. *
  4. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This is the "shell" of the ARMv6 processor support.
  11. */
  12. #include <linux/linkage.h>
  13. #include <linux/init.h>
  14. #include <asm/assembler.h>
  15. #include <asm/errno.h>
  16. #include <asm/unwind.h>
  17. #include "proc-macros.S"
  18. #define HARVARD_CACHE
  19. #define CACHE_LINE_SIZE 32
  20. #define D_CACHE_LINE_SIZE 32
  21. #define BTB_FLUSH_SIZE 8
  22. /*
  23. * v6_flush_icache_all()
  24. *
  25. * Flush the whole I-cache.
  26. *
  27. * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
  28. * This erratum is present in 1136, 1156 and 1176. It does not affect the
  29. * MPCore.
  30. *
  31. * Registers:
  32. * r0 - set to 0
  33. * r1 - corrupted
  34. */
  35. ENTRY(v6_flush_icache_all)
  36. mov r0, #0
  37. #ifdef CONFIG_ARM_ERRATA_411920
  38. mrs r1, cpsr
  39. cpsid ifa @ disable interrupts
  40. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  41. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  42. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  43. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  44. msr cpsr_cx, r1 @ restore interrupts
  45. .rept 11 @ ARM Ltd recommends at least
  46. nop @ 11 NOPs
  47. .endr
  48. #else
  49. mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
  50. #endif
  51. ret lr
  52. ENDPROC(v6_flush_icache_all)
  53. /*
  54. * v6_flush_cache_all()
  55. *
  56. * Flush the entire cache.
  57. *
  58. * It is assumed that:
  59. */
  60. ENTRY(v6_flush_kern_cache_all)
  61. mov r0, #0
  62. #ifdef HARVARD_CACHE
  63. mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
  64. #ifndef CONFIG_ARM_ERRATA_411920
  65. mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
  66. #else
  67. b v6_flush_icache_all
  68. #endif
  69. #else
  70. mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
  71. #endif
  72. ret lr
  73. /*
  74. * v6_flush_cache_all()
  75. *
  76. * Flush all TLB entries in a particular address space
  77. *
  78. * - mm - mm_struct describing address space
  79. */
  80. ENTRY(v6_flush_user_cache_all)
  81. /*FALLTHROUGH*/
  82. /*
  83. * v6_flush_cache_range(start, end, flags)
  84. *
  85. * Flush a range of TLB entries in the specified address space.
  86. *
  87. * - start - start address (may not be aligned)
  88. * - end - end address (exclusive, may not be aligned)
  89. * - flags - vm_area_struct flags describing address space
  90. *
  91. * It is assumed that:
  92. * - we have a VIPT cache.
  93. */
  94. ENTRY(v6_flush_user_cache_range)
  95. ret lr
  96. /*
  97. * v6_coherent_kern_range(start,end)
  98. *
  99. * Ensure that the I and D caches are coherent within specified
  100. * region. This is typically used when code has been written to
  101. * a memory region, and will be executed.
  102. *
  103. * - start - virtual start address of region
  104. * - end - virtual end address of region
  105. *
  106. * It is assumed that:
  107. * - the Icache does not read data from the write buffer
  108. */
  109. ENTRY(v6_coherent_kern_range)
  110. /* FALLTHROUGH */
  111. /*
  112. * v6_coherent_user_range(start,end)
  113. *
  114. * Ensure that the I and D caches are coherent within specified
  115. * region. This is typically used when code has been written to
  116. * a memory region, and will be executed.
  117. *
  118. * - start - virtual start address of region
  119. * - end - virtual end address of region
  120. *
  121. * It is assumed that:
  122. * - the Icache does not read data from the write buffer
  123. */
  124. ENTRY(v6_coherent_user_range)
  125. UNWIND(.fnstart )
  126. #ifdef HARVARD_CACHE
  127. bic r0, r0, #CACHE_LINE_SIZE - 1
  128. 1:
  129. USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line
  130. add r0, r0, #CACHE_LINE_SIZE
  131. cmp r0, r1
  132. blo 1b
  133. #endif
  134. mov r0, #0
  135. #ifdef HARVARD_CACHE
  136. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  137. #ifndef CONFIG_ARM_ERRATA_411920
  138. mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
  139. #else
  140. b v6_flush_icache_all
  141. #endif
  142. #else
  143. mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
  144. #endif
  145. ret lr
  146. /*
  147. * Fault handling for the cache operation above. If the virtual address in r0
  148. * isn't mapped, fail with -EFAULT.
  149. */
  150. 9001:
  151. mov r0, #-EFAULT
  152. ret lr
  153. UNWIND(.fnend )
  154. ENDPROC(v6_coherent_user_range)
  155. ENDPROC(v6_coherent_kern_range)
  156. /*
  157. * v6_flush_kern_dcache_area(void *addr, size_t size)
  158. *
  159. * Ensure that the data held in the page kaddr is written back
  160. * to the page in question.
  161. *
  162. * - addr - kernel address
  163. * - size - region size
  164. */
  165. ENTRY(v6_flush_kern_dcache_area)
  166. add r1, r0, r1
  167. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  168. 1:
  169. #ifdef HARVARD_CACHE
  170. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  171. #else
  172. mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line
  173. #endif
  174. add r0, r0, #D_CACHE_LINE_SIZE
  175. cmp r0, r1
  176. blo 1b
  177. #ifdef HARVARD_CACHE
  178. mov r0, #0
  179. mcr p15, 0, r0, c7, c10, 4
  180. #endif
  181. ret lr
  182. /*
  183. * v6_dma_inv_range(start,end)
  184. *
  185. * Invalidate the data cache within the specified region; we will
  186. * be performing a DMA operation in this region and we want to
  187. * purge old data in the cache.
  188. *
  189. * - start - virtual start address of region
  190. * - end - virtual end address of region
  191. */
  192. v6_dma_inv_range:
  193. #ifdef CONFIG_DMA_CACHE_RWFO
  194. ldrb r2, [r0] @ read for ownership
  195. strb r2, [r0] @ write for ownership
  196. #endif
  197. tst r0, #D_CACHE_LINE_SIZE - 1
  198. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  199. #ifdef HARVARD_CACHE
  200. mcrne p15, 0, r0, c7, c10, 1 @ clean D line
  201. #else
  202. mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
  203. #endif
  204. tst r1, #D_CACHE_LINE_SIZE - 1
  205. #ifdef CONFIG_DMA_CACHE_RWFO
  206. ldrneb r2, [r1, #-1] @ read for ownership
  207. strneb r2, [r1, #-1] @ write for ownership
  208. #endif
  209. bic r1, r1, #D_CACHE_LINE_SIZE - 1
  210. #ifdef HARVARD_CACHE
  211. mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
  212. #else
  213. mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
  214. #endif
  215. 1:
  216. #ifdef HARVARD_CACHE
  217. mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
  218. #else
  219. mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line
  220. #endif
  221. add r0, r0, #D_CACHE_LINE_SIZE
  222. cmp r0, r1
  223. #ifdef CONFIG_DMA_CACHE_RWFO
  224. ldrlo r2, [r0] @ read for ownership
  225. strlo r2, [r0] @ write for ownership
  226. #endif
  227. blo 1b
  228. mov r0, #0
  229. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  230. ret lr
  231. /*
  232. * v6_dma_clean_range(start,end)
  233. * - start - virtual start address of region
  234. * - end - virtual end address of region
  235. */
  236. v6_dma_clean_range:
  237. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  238. 1:
  239. #ifdef CONFIG_DMA_CACHE_RWFO
  240. ldr r2, [r0] @ read for ownership
  241. #endif
  242. #ifdef HARVARD_CACHE
  243. mcr p15, 0, r0, c7, c10, 1 @ clean D line
  244. #else
  245. mcr p15, 0, r0, c7, c11, 1 @ clean unified line
  246. #endif
  247. add r0, r0, #D_CACHE_LINE_SIZE
  248. cmp r0, r1
  249. blo 1b
  250. mov r0, #0
  251. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  252. ret lr
  253. /*
  254. * v6_dma_flush_range(start,end)
  255. * - start - virtual start address of region
  256. * - end - virtual end address of region
  257. */
  258. ENTRY(v6_dma_flush_range)
  259. #ifdef CONFIG_DMA_CACHE_RWFO
  260. ldrb r2, [r0] @ read for ownership
  261. strb r2, [r0] @ write for ownership
  262. #endif
  263. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  264. 1:
  265. #ifdef HARVARD_CACHE
  266. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  267. #else
  268. mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
  269. #endif
  270. add r0, r0, #D_CACHE_LINE_SIZE
  271. cmp r0, r1
  272. #ifdef CONFIG_DMA_CACHE_RWFO
  273. ldrlob r2, [r0] @ read for ownership
  274. strlob r2, [r0] @ write for ownership
  275. #endif
  276. blo 1b
  277. mov r0, #0
  278. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  279. ret lr
  280. /*
  281. * dma_map_area(start, size, dir)
  282. * - start - kernel virtual start address
  283. * - size - size of region
  284. * - dir - DMA direction
  285. */
  286. ENTRY(v6_dma_map_area)
  287. add r1, r1, r0
  288. teq r2, #DMA_FROM_DEVICE
  289. beq v6_dma_inv_range
  290. #ifndef CONFIG_DMA_CACHE_RWFO
  291. b v6_dma_clean_range
  292. #else
  293. teq r2, #DMA_TO_DEVICE
  294. beq v6_dma_clean_range
  295. b v6_dma_flush_range
  296. #endif
  297. ENDPROC(v6_dma_map_area)
  298. /*
  299. * dma_unmap_area(start, size, dir)
  300. * - start - kernel virtual start address
  301. * - size - size of region
  302. * - dir - DMA direction
  303. */
  304. ENTRY(v6_dma_unmap_area)
  305. #ifndef CONFIG_DMA_CACHE_RWFO
  306. add r1, r1, r0
  307. teq r2, #DMA_TO_DEVICE
  308. bne v6_dma_inv_range
  309. #endif
  310. ret lr
  311. ENDPROC(v6_dma_unmap_area)
  312. .globl v6_flush_kern_cache_louis
  313. .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all
  314. __INITDATA
  315. @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
  316. define_cache_functions v6