cache-v7m.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. /*
  2. * linux/arch/arm/mm/cache-v7m.S
  3. *
  4. * Based on linux/arch/arm/mm/cache-v7.S
  5. *
  6. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  7. * Copyright (C) 2005 ARM Ltd.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This is the "shell" of the ARMv7M processor support.
  14. */
  15. #include <linux/linkage.h>
  16. #include <linux/init.h>
  17. #include <asm/assembler.h>
  18. #include <asm/errno.h>
  19. #include <asm/unwind.h>
  20. #include <asm/v7m.h>
  21. #include "proc-macros.S"
  22. /* Generic V7M read/write macros for memory mapped cache operations */
  23. .macro v7m_cache_read, rt, reg
  24. movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg
  25. movt \rt, #:upper16:BASEADDR_V7M_SCB + \reg
  26. ldr \rt, [\rt]
  27. .endm
  28. .macro v7m_cacheop, rt, tmp, op, c = al
  29. movw\c \tmp, #:lower16:BASEADDR_V7M_SCB + \op
  30. movt\c \tmp, #:upper16:BASEADDR_V7M_SCB + \op
  31. str\c \rt, [\tmp]
  32. .endm
  33. .macro read_ccsidr, rt
  34. v7m_cache_read \rt, V7M_SCB_CCSIDR
  35. .endm
  36. .macro read_clidr, rt
  37. v7m_cache_read \rt, V7M_SCB_CLIDR
  38. .endm
  39. .macro write_csselr, rt, tmp
  40. v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR
  41. .endm
  42. /*
  43. * dcisw: Invalidate data cache by set/way
  44. */
  45. .macro dcisw, rt, tmp
  46. v7m_cacheop \rt, \tmp, V7M_SCB_DCISW
  47. .endm
  48. /*
  49. * dccisw: Clean and invalidate data cache by set/way
  50. */
  51. .macro dccisw, rt, tmp
  52. v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW
  53. .endm
  54. /*
  55. * dccimvac: Clean and invalidate data cache line by MVA to PoC.
  56. */
  57. .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
  58. .macro dccimvac\c, rt, tmp
  59. v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c
  60. .endm
  61. .endr
  62. /*
  63. * dcimvac: Invalidate data cache line by MVA to PoC
  64. */
  65. .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
  66. .macro dcimvac\c, rt, tmp
  67. v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
  68. .endm
  69. .endr
  70. /*
  71. * dccmvau: Clean data cache line by MVA to PoU
  72. */
  73. .macro dccmvau, rt, tmp
  74. v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU
  75. .endm
  76. /*
  77. * dccmvac: Clean data cache line by MVA to PoC
  78. */
  79. .macro dccmvac, rt, tmp
  80. v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC
  81. .endm
  82. /*
  83. * icimvau: Invalidate instruction caches by MVA to PoU
  84. */
  85. .macro icimvau, rt, tmp
  86. v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU
  87. .endm
  88. /*
  89. * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP.
  90. * rt data ignored by ICIALLU(IS), so can be used for the address
  91. */
  92. .macro invalidate_icache, rt
  93. v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU
  94. mov \rt, #0
  95. .endm
  96. /*
  97. * Invalidate the BTB, inner shareable if SMP.
  98. * rt data ignored by BPIALL, so it can be used for the address
  99. */
  100. .macro invalidate_bp, rt
  101. v7m_cacheop \rt, \rt, V7M_SCB_BPIALL
  102. mov \rt, #0
  103. .endm
  104. ENTRY(v7m_invalidate_l1)
  105. mov r0, #0
  106. write_csselr r0, r1
  107. read_ccsidr r0
  108. movw r1, #0x7fff
  109. and r2, r1, r0, lsr #13
  110. movw r1, #0x3ff
  111. and r3, r1, r0, lsr #3 @ NumWays - 1
  112. add r2, r2, #1 @ NumSets
  113. and r0, r0, #0x7
  114. add r0, r0, #4 @ SetShift
  115. clz r1, r3 @ WayShift
  116. add r4, r3, #1 @ NumWays
  117. 1: sub r2, r2, #1 @ NumSets--
  118. mov r3, r4 @ Temp = NumWays
  119. 2: subs r3, r3, #1 @ Temp--
  120. mov r5, r3, lsl r1
  121. mov r6, r2, lsl r0
  122. orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
  123. dcisw r5, r6
  124. bgt 2b
  125. cmp r2, #0
  126. bgt 1b
  127. dsb st
  128. isb
  129. ret lr
  130. ENDPROC(v7m_invalidate_l1)
  131. /*
  132. * v7m_flush_icache_all()
  133. *
  134. * Flush the whole I-cache.
  135. *
  136. * Registers:
  137. * r0 - set to 0
  138. */
  139. ENTRY(v7m_flush_icache_all)
  140. invalidate_icache r0
  141. ret lr
  142. ENDPROC(v7m_flush_icache_all)
  143. /*
  144. * v7m_flush_dcache_all()
  145. *
  146. * Flush the whole D-cache.
  147. *
  148. * Corrupted registers: r0-r7, r9-r11
  149. */
  150. ENTRY(v7m_flush_dcache_all)
  151. dmb @ ensure ordering with previous memory accesses
  152. read_clidr r0
  153. mov r3, r0, lsr #23 @ move LoC into position
  154. ands r3, r3, #7 << 1 @ extract LoC*2 from clidr
  155. beq finished @ if loc is 0, then no need to clean
  156. start_flush_levels:
  157. mov r10, #0 @ start clean at cache level 0
  158. flush_levels:
  159. add r2, r10, r10, lsr #1 @ work out 3x current cache level
  160. mov r1, r0, lsr r2 @ extract cache type bits from clidr
  161. and r1, r1, #7 @ mask of the bits for current cache only
  162. cmp r1, #2 @ see what cache we have at this level
  163. blt skip @ skip if no cache, or just i-cache
  164. #ifdef CONFIG_PREEMPT
  165. save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
  166. #endif
  167. write_csselr r10, r1 @ set current cache level
  168. isb @ isb to sych the new cssr&csidr
  169. read_ccsidr r1 @ read the new csidr
  170. #ifdef CONFIG_PREEMPT
  171. restore_irqs_notrace r9
  172. #endif
  173. and r2, r1, #7 @ extract the length of the cache lines
  174. add r2, r2, #4 @ add 4 (line length offset)
  175. movw r4, #0x3ff
  176. ands r4, r4, r1, lsr #3 @ find maximum number on the way size
  177. clz r5, r4 @ find bit position of way size increment
  178. movw r7, #0x7fff
  179. ands r7, r7, r1, lsr #13 @ extract max number of the index size
  180. loop1:
  181. mov r9, r7 @ create working copy of max index
  182. loop2:
  183. lsl r6, r4, r5
  184. orr r11, r10, r6 @ factor way and cache number into r11
  185. lsl r6, r9, r2
  186. orr r11, r11, r6 @ factor index number into r11
  187. dccisw r11, r6 @ clean/invalidate by set/way
  188. subs r9, r9, #1 @ decrement the index
  189. bge loop2
  190. subs r4, r4, #1 @ decrement the way
  191. bge loop1
  192. skip:
  193. add r10, r10, #2 @ increment cache number
  194. cmp r3, r10
  195. bgt flush_levels
  196. finished:
  197. mov r10, #0 @ switch back to cache level 0
  198. write_csselr r10, r3 @ select current cache level in cssr
  199. dsb st
  200. isb
  201. ret lr
  202. ENDPROC(v7m_flush_dcache_all)
  203. /*
  204. * v7m_flush_cache_all()
  205. *
  206. * Flush the entire cache system.
  207. * The data cache flush is now achieved using atomic clean / invalidates
  208. * working outwards from L1 cache. This is done using Set/Way based cache
  209. * maintenance instructions.
  210. * The instruction cache can still be invalidated back to the point of
  211. * unification in a single instruction.
  212. *
  213. */
  214. ENTRY(v7m_flush_kern_cache_all)
  215. stmfd sp!, {r4-r7, r9-r11, lr}
  216. bl v7m_flush_dcache_all
  217. invalidate_icache r0
  218. ldmfd sp!, {r4-r7, r9-r11, lr}
  219. ret lr
  220. ENDPROC(v7m_flush_kern_cache_all)
  221. /*
  222. * v7m_flush_cache_all()
  223. *
  224. * Flush all TLB entries in a particular address space
  225. *
  226. * - mm - mm_struct describing address space
  227. */
  228. ENTRY(v7m_flush_user_cache_all)
  229. /*FALLTHROUGH*/
  230. /*
  231. * v7m_flush_cache_range(start, end, flags)
  232. *
  233. * Flush a range of TLB entries in the specified address space.
  234. *
  235. * - start - start address (may not be aligned)
  236. * - end - end address (exclusive, may not be aligned)
  237. * - flags - vm_area_struct flags describing address space
  238. *
  239. * It is assumed that:
  240. * - we have a VIPT cache.
  241. */
  242. ENTRY(v7m_flush_user_cache_range)
  243. ret lr
  244. ENDPROC(v7m_flush_user_cache_all)
  245. ENDPROC(v7m_flush_user_cache_range)
  246. /*
  247. * v7m_coherent_kern_range(start,end)
  248. *
  249. * Ensure that the I and D caches are coherent within specified
  250. * region. This is typically used when code has been written to
  251. * a memory region, and will be executed.
  252. *
  253. * - start - virtual start address of region
  254. * - end - virtual end address of region
  255. *
  256. * It is assumed that:
  257. * - the Icache does not read data from the write buffer
  258. */
  259. ENTRY(v7m_coherent_kern_range)
  260. /* FALLTHROUGH */
  261. /*
  262. * v7m_coherent_user_range(start,end)
  263. *
  264. * Ensure that the I and D caches are coherent within specified
  265. * region. This is typically used when code has been written to
  266. * a memory region, and will be executed.
  267. *
  268. * - start - virtual start address of region
  269. * - end - virtual end address of region
  270. *
  271. * It is assumed that:
  272. * - the Icache does not read data from the write buffer
  273. */
  274. ENTRY(v7m_coherent_user_range)
  275. UNWIND(.fnstart )
  276. dcache_line_size r2, r3
  277. sub r3, r2, #1
  278. bic r12, r0, r3
  279. 1:
  280. /*
  281. * We use open coded version of dccmvau otherwise USER() would
  282. * point at movw instruction.
  283. */
  284. dccmvau r12, r3
  285. add r12, r12, r2
  286. cmp r12, r1
  287. blo 1b
  288. dsb ishst
  289. icache_line_size r2, r3
  290. sub r3, r2, #1
  291. bic r12, r0, r3
  292. 2:
  293. icimvau r12, r3
  294. add r12, r12, r2
  295. cmp r12, r1
  296. blo 2b
  297. invalidate_bp r0
  298. dsb ishst
  299. isb
  300. ret lr
  301. UNWIND(.fnend )
  302. ENDPROC(v7m_coherent_kern_range)
  303. ENDPROC(v7m_coherent_user_range)
  304. /*
  305. * v7m_flush_kern_dcache_area(void *addr, size_t size)
  306. *
  307. * Ensure that the data held in the page kaddr is written back
  308. * to the page in question.
  309. *
  310. * - addr - kernel address
  311. * - size - region size
  312. */
  313. ENTRY(v7m_flush_kern_dcache_area)
  314. dcache_line_size r2, r3
  315. add r1, r0, r1
  316. sub r3, r2, #1
  317. bic r0, r0, r3
  318. 1:
  319. dccimvac r0, r3 @ clean & invalidate D line / unified line
  320. add r0, r0, r2
  321. cmp r0, r1
  322. blo 1b
  323. dsb st
  324. ret lr
  325. ENDPROC(v7m_flush_kern_dcache_area)
  326. /*
  327. * v7m_dma_inv_range(start,end)
  328. *
  329. * Invalidate the data cache within the specified region; we will
  330. * be performing a DMA operation in this region and we want to
  331. * purge old data in the cache.
  332. *
  333. * - start - virtual start address of region
  334. * - end - virtual end address of region
  335. */
  336. v7m_dma_inv_range:
  337. dcache_line_size r2, r3
  338. sub r3, r2, #1
  339. tst r0, r3
  340. bic r0, r0, r3
  341. dccimvacne r0, r3
  342. addne r0, r0, r2
  343. subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
  344. tst r1, r3
  345. bic r1, r1, r3
  346. dccimvacne r1, r3
  347. cmp r0, r1
  348. 1:
  349. dcimvaclo r0, r3
  350. addlo r0, r0, r2
  351. cmplo r0, r1
  352. blo 1b
  353. dsb st
  354. ret lr
  355. ENDPROC(v7m_dma_inv_range)
  356. /*
  357. * v7m_dma_clean_range(start,end)
  358. * - start - virtual start address of region
  359. * - end - virtual end address of region
  360. */
  361. v7m_dma_clean_range:
  362. dcache_line_size r2, r3
  363. sub r3, r2, #1
  364. bic r0, r0, r3
  365. 1:
  366. dccmvac r0, r3 @ clean D / U line
  367. add r0, r0, r2
  368. cmp r0, r1
  369. blo 1b
  370. dsb st
  371. ret lr
  372. ENDPROC(v7m_dma_clean_range)
  373. /*
  374. * v7m_dma_flush_range(start,end)
  375. * - start - virtual start address of region
  376. * - end - virtual end address of region
  377. */
  378. ENTRY(v7m_dma_flush_range)
  379. dcache_line_size r2, r3
  380. sub r3, r2, #1
  381. bic r0, r0, r3
  382. 1:
  383. dccimvac r0, r3 @ clean & invalidate D / U line
  384. add r0, r0, r2
  385. cmp r0, r1
  386. blo 1b
  387. dsb st
  388. ret lr
  389. ENDPROC(v7m_dma_flush_range)
  390. /*
  391. * dma_map_area(start, size, dir)
  392. * - start - kernel virtual start address
  393. * - size - size of region
  394. * - dir - DMA direction
  395. */
  396. ENTRY(v7m_dma_map_area)
  397. add r1, r1, r0
  398. teq r2, #DMA_FROM_DEVICE
  399. beq v7m_dma_inv_range
  400. b v7m_dma_clean_range
  401. ENDPROC(v7m_dma_map_area)
  402. /*
  403. * dma_unmap_area(start, size, dir)
  404. * - start - kernel virtual start address
  405. * - size - size of region
  406. * - dir - DMA direction
  407. */
  408. ENTRY(v7m_dma_unmap_area)
  409. add r1, r1, r0
  410. teq r2, #DMA_TO_DEVICE
  411. bne v7m_dma_inv_range
  412. ret lr
  413. ENDPROC(v7m_dma_unmap_area)
  414. .globl v7m_flush_kern_cache_louis
  415. .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
  416. __INITDATA
  417. @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
  418. define_cache_functions v7m