cache-v7m.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. /*
  2. * linux/arch/arm/mm/cache-v7m.S
  3. *
  4. * Based on linux/arch/arm/mm/cache-v7.S
  5. *
  6. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  7. * Copyright (C) 2005 ARM Ltd.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This is the "shell" of the ARMv7M processor support.
  14. */
  15. #include <linux/linkage.h>
  16. #include <linux/init.h>
  17. #include <asm/assembler.h>
  18. #include <asm/errno.h>
  19. #include <asm/unwind.h>
  20. #include <asm/v7m.h>
  21. #include "proc-macros.S"
  22. /* Generic V7M read/write macros for memory mapped cache operations */
  23. .macro v7m_cache_read, rt, reg
  24. movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg
  25. movt \rt, #:upper16:BASEADDR_V7M_SCB + \reg
  26. ldr \rt, [\rt]
  27. .endm
  28. .macro v7m_cacheop, rt, tmp, op, c = al
  29. movw\c \tmp, #:lower16:BASEADDR_V7M_SCB + \op
  30. movt\c \tmp, #:upper16:BASEADDR_V7M_SCB + \op
  31. str\c \rt, [\tmp]
  32. .endm
  33. .macro read_ccsidr, rt
  34. v7m_cache_read \rt, V7M_SCB_CCSIDR
  35. .endm
  36. .macro read_clidr, rt
  37. v7m_cache_read \rt, V7M_SCB_CLIDR
  38. .endm
  39. .macro write_csselr, rt, tmp
  40. v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR
  41. .endm
  42. /*
  43. * dcisw: Invalidate data cache by set/way
  44. */
  45. .macro dcisw, rt, tmp
  46. v7m_cacheop \rt, \tmp, V7M_SCB_DCISW
  47. .endm
  48. /*
  49. * dccisw: Clean and invalidate data cache by set/way
  50. */
  51. .macro dccisw, rt, tmp
  52. v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW
  53. .endm
  54. /*
  55. * dccimvac: Clean and invalidate data cache line by MVA to PoC.
  56. */
  57. .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
  58. .macro dccimvac\c, rt, tmp
  59. v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c
  60. .endm
  61. .endr
  62. /*
  63. * dcimvac: Invalidate data cache line by MVA to PoC
  64. */
  65. .macro dcimvac, rt, tmp
  66. v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC
  67. .endm
  68. /*
  69. * dccmvau: Clean data cache line by MVA to PoU
  70. */
  71. .macro dccmvau, rt, tmp
  72. v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU
  73. .endm
  74. /*
  75. * dccmvac: Clean data cache line by MVA to PoC
  76. */
  77. .macro dccmvac, rt, tmp
  78. v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC
  79. .endm
  80. /*
  81. * icimvau: Invalidate instruction caches by MVA to PoU
  82. */
  83. .macro icimvau, rt, tmp
  84. v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU
  85. .endm
  86. /*
  87. * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP.
  88. * rt data ignored by ICIALLU(IS), so can be used for the address
  89. */
  90. .macro invalidate_icache, rt
  91. v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU
  92. mov \rt, #0
  93. .endm
  94. /*
  95. * Invalidate the BTB, inner shareable if SMP.
  96. * rt data ignored by BPIALL, so it can be used for the address
  97. */
  98. .macro invalidate_bp, rt
  99. v7m_cacheop \rt, \rt, V7M_SCB_BPIALL
  100. mov \rt, #0
  101. .endm
  102. ENTRY(v7m_invalidate_l1)
  103. mov r0, #0
  104. write_csselr r0, r1
  105. read_ccsidr r0
  106. movw r1, #0x7fff
  107. and r2, r1, r0, lsr #13
  108. movw r1, #0x3ff
  109. and r3, r1, r0, lsr #3 @ NumWays - 1
  110. add r2, r2, #1 @ NumSets
  111. and r0, r0, #0x7
  112. add r0, r0, #4 @ SetShift
  113. clz r1, r3 @ WayShift
  114. add r4, r3, #1 @ NumWays
  115. 1: sub r2, r2, #1 @ NumSets--
  116. mov r3, r4 @ Temp = NumWays
  117. 2: subs r3, r3, #1 @ Temp--
  118. mov r5, r3, lsl r1
  119. mov r6, r2, lsl r0
  120. orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
  121. dcisw r5, r6
  122. bgt 2b
  123. cmp r2, #0
  124. bgt 1b
  125. dsb st
  126. isb
  127. ret lr
  128. ENDPROC(v7m_invalidate_l1)
  129. /*
  130. * v7m_flush_icache_all()
  131. *
  132. * Flush the whole I-cache.
  133. *
  134. * Registers:
  135. * r0 - set to 0
  136. */
  137. ENTRY(v7m_flush_icache_all)
  138. invalidate_icache r0
  139. ret lr
  140. ENDPROC(v7m_flush_icache_all)
  141. /*
  142. * v7m_flush_dcache_all()
  143. *
  144. * Flush the whole D-cache.
  145. *
  146. * Corrupted registers: r0-r7, r9-r11
  147. */
  148. ENTRY(v7m_flush_dcache_all)
  149. dmb @ ensure ordering with previous memory accesses
  150. read_clidr r0
  151. mov r3, r0, lsr #23 @ move LoC into position
  152. ands r3, r3, #7 << 1 @ extract LoC*2 from clidr
  153. beq finished @ if loc is 0, then no need to clean
  154. start_flush_levels:
  155. mov r10, #0 @ start clean at cache level 0
  156. flush_levels:
  157. add r2, r10, r10, lsr #1 @ work out 3x current cache level
  158. mov r1, r0, lsr r2 @ extract cache type bits from clidr
  159. and r1, r1, #7 @ mask of the bits for current cache only
  160. cmp r1, #2 @ see what cache we have at this level
  161. blt skip @ skip if no cache, or just i-cache
  162. #ifdef CONFIG_PREEMPT
  163. save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
  164. #endif
  165. write_csselr r10, r1 @ set current cache level
  166. isb @ isb to sych the new cssr&csidr
  167. read_ccsidr r1 @ read the new csidr
  168. #ifdef CONFIG_PREEMPT
  169. restore_irqs_notrace r9
  170. #endif
  171. and r2, r1, #7 @ extract the length of the cache lines
  172. add r2, r2, #4 @ add 4 (line length offset)
  173. movw r4, #0x3ff
  174. ands r4, r4, r1, lsr #3 @ find maximum number on the way size
  175. clz r5, r4 @ find bit position of way size increment
  176. movw r7, #0x7fff
  177. ands r7, r7, r1, lsr #13 @ extract max number of the index size
  178. loop1:
  179. mov r9, r7 @ create working copy of max index
  180. loop2:
  181. lsl r6, r4, r5
  182. orr r11, r10, r6 @ factor way and cache number into r11
  183. lsl r6, r9, r2
  184. orr r11, r11, r6 @ factor index number into r11
  185. dccisw r11, r6 @ clean/invalidate by set/way
  186. subs r9, r9, #1 @ decrement the index
  187. bge loop2
  188. subs r4, r4, #1 @ decrement the way
  189. bge loop1
  190. skip:
  191. add r10, r10, #2 @ increment cache number
  192. cmp r3, r10
  193. bgt flush_levels
  194. finished:
  195. mov r10, #0 @ swith back to cache level 0
  196. write_csselr r10, r3 @ select current cache level in cssr
  197. dsb st
  198. isb
  199. ret lr
  200. ENDPROC(v7m_flush_dcache_all)
  201. /*
  202. * v7m_flush_cache_all()
  203. *
  204. * Flush the entire cache system.
  205. * The data cache flush is now achieved using atomic clean / invalidates
  206. * working outwards from L1 cache. This is done using Set/Way based cache
  207. * maintenance instructions.
  208. * The instruction cache can still be invalidated back to the point of
  209. * unification in a single instruction.
  210. *
  211. */
  212. ENTRY(v7m_flush_kern_cache_all)
  213. stmfd sp!, {r4-r7, r9-r11, lr}
  214. bl v7m_flush_dcache_all
  215. invalidate_icache r0
  216. ldmfd sp!, {r4-r7, r9-r11, lr}
  217. ret lr
  218. ENDPROC(v7m_flush_kern_cache_all)
  219. /*
  220. * v7m_flush_cache_all()
  221. *
  222. * Flush all TLB entries in a particular address space
  223. *
  224. * - mm - mm_struct describing address space
  225. */
  226. ENTRY(v7m_flush_user_cache_all)
  227. /*FALLTHROUGH*/
  228. /*
  229. * v7m_flush_cache_range(start, end, flags)
  230. *
  231. * Flush a range of TLB entries in the specified address space.
  232. *
  233. * - start - start address (may not be aligned)
  234. * - end - end address (exclusive, may not be aligned)
  235. * - flags - vm_area_struct flags describing address space
  236. *
  237. * It is assumed that:
  238. * - we have a VIPT cache.
  239. */
  240. ENTRY(v7m_flush_user_cache_range)
  241. ret lr
  242. ENDPROC(v7m_flush_user_cache_all)
  243. ENDPROC(v7m_flush_user_cache_range)
  244. /*
  245. * v7m_coherent_kern_range(start,end)
  246. *
  247. * Ensure that the I and D caches are coherent within specified
  248. * region. This is typically used when code has been written to
  249. * a memory region, and will be executed.
  250. *
  251. * - start - virtual start address of region
  252. * - end - virtual end address of region
  253. *
  254. * It is assumed that:
  255. * - the Icache does not read data from the write buffer
  256. */
  257. ENTRY(v7m_coherent_kern_range)
  258. /* FALLTHROUGH */
  259. /*
  260. * v7m_coherent_user_range(start,end)
  261. *
  262. * Ensure that the I and D caches are coherent within specified
  263. * region. This is typically used when code has been written to
  264. * a memory region, and will be executed.
  265. *
  266. * - start - virtual start address of region
  267. * - end - virtual end address of region
  268. *
  269. * It is assumed that:
  270. * - the Icache does not read data from the write buffer
  271. */
  272. ENTRY(v7m_coherent_user_range)
  273. UNWIND(.fnstart )
  274. dcache_line_size r2, r3
  275. sub r3, r2, #1
  276. bic r12, r0, r3
  277. 1:
  278. /*
  279. * We use open coded version of dccmvau otherwise USER() would
  280. * point at movw instruction.
  281. */
  282. dccmvau r12, r3
  283. add r12, r12, r2
  284. cmp r12, r1
  285. blo 1b
  286. dsb ishst
  287. icache_line_size r2, r3
  288. sub r3, r2, #1
  289. bic r12, r0, r3
  290. 2:
  291. icimvau r12, r3
  292. add r12, r12, r2
  293. cmp r12, r1
  294. blo 2b
  295. invalidate_bp r0
  296. dsb ishst
  297. isb
  298. ret lr
  299. UNWIND(.fnend )
  300. ENDPROC(v7m_coherent_kern_range)
  301. ENDPROC(v7m_coherent_user_range)
  302. /*
  303. * v7m_flush_kern_dcache_area(void *addr, size_t size)
  304. *
  305. * Ensure that the data held in the page kaddr is written back
  306. * to the page in question.
  307. *
  308. * - addr - kernel address
  309. * - size - region size
  310. */
  311. ENTRY(v7m_flush_kern_dcache_area)
  312. dcache_line_size r2, r3
  313. add r1, r0, r1
  314. sub r3, r2, #1
  315. bic r0, r0, r3
  316. 1:
  317. dccimvac r0, r3 @ clean & invalidate D line / unified line
  318. add r0, r0, r2
  319. cmp r0, r1
  320. blo 1b
  321. dsb st
  322. ret lr
  323. ENDPROC(v7m_flush_kern_dcache_area)
  324. /*
  325. * v7m_dma_inv_range(start,end)
  326. *
  327. * Invalidate the data cache within the specified region; we will
  328. * be performing a DMA operation in this region and we want to
  329. * purge old data in the cache.
  330. *
  331. * - start - virtual start address of region
  332. * - end - virtual end address of region
  333. */
  334. v7m_dma_inv_range:
  335. dcache_line_size r2, r3
  336. sub r3, r2, #1
  337. tst r0, r3
  338. bic r0, r0, r3
  339. dccimvacne r0, r3
  340. subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
  341. tst r1, r3
  342. bic r1, r1, r3
  343. dccimvacne r1, r3
  344. 1:
  345. dcimvac r0, r3
  346. add r0, r0, r2
  347. cmp r0, r1
  348. blo 1b
  349. dsb st
  350. ret lr
  351. ENDPROC(v7m_dma_inv_range)
  352. /*
  353. * v7m_dma_clean_range(start,end)
  354. * - start - virtual start address of region
  355. * - end - virtual end address of region
  356. */
  357. v7m_dma_clean_range:
  358. dcache_line_size r2, r3
  359. sub r3, r2, #1
  360. bic r0, r0, r3
  361. 1:
  362. dccmvac r0, r3 @ clean D / U line
  363. add r0, r0, r2
  364. cmp r0, r1
  365. blo 1b
  366. dsb st
  367. ret lr
  368. ENDPROC(v7m_dma_clean_range)
  369. /*
  370. * v7m_dma_flush_range(start,end)
  371. * - start - virtual start address of region
  372. * - end - virtual end address of region
  373. */
  374. ENTRY(v7m_dma_flush_range)
  375. dcache_line_size r2, r3
  376. sub r3, r2, #1
  377. bic r0, r0, r3
  378. 1:
  379. dccimvac r0, r3 @ clean & invalidate D / U line
  380. add r0, r0, r2
  381. cmp r0, r1
  382. blo 1b
  383. dsb st
  384. ret lr
  385. ENDPROC(v7m_dma_flush_range)
  386. /*
  387. * dma_map_area(start, size, dir)
  388. * - start - kernel virtual start address
  389. * - size - size of region
  390. * - dir - DMA direction
  391. */
  392. ENTRY(v7m_dma_map_area)
  393. add r1, r1, r0
  394. teq r2, #DMA_FROM_DEVICE
  395. beq v7m_dma_inv_range
  396. b v7m_dma_clean_range
  397. ENDPROC(v7m_dma_map_area)
  398. /*
  399. * dma_unmap_area(start, size, dir)
  400. * - start - kernel virtual start address
  401. * - size - size of region
  402. * - dir - DMA direction
  403. */
  404. ENTRY(v7m_dma_unmap_area)
  405. add r1, r1, r0
  406. teq r2, #DMA_TO_DEVICE
  407. bne v7m_dma_inv_range
  408. ret lr
  409. ENDPROC(v7m_dma_unmap_area)
  410. .globl v7m_flush_kern_cache_louis
  411. .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
  412. __INITDATA
  413. @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
  414. define_cache_functions v7m