proc-arm940.S 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. /*
  2. * linux/arch/arm/mm/arm940.S: utility functions for ARM940T
  3. *
  4. * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. */
  11. #include <linux/linkage.h>
  12. #include <linux/init.h>
  13. #include <asm/assembler.h>
  14. #include <asm/hwcap.h>
  15. #include <asm/pgtable-hwdef.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/ptrace.h>
  18. #include "proc-macros.S"
  19. /* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
  20. #define CACHE_DLINESIZE 16
  21. #define CACHE_DSEGMENTS 4
  22. #define CACHE_DENTRIES 64
  23. .text
  24. /*
  25. * cpu_arm940_proc_init()
  26. * cpu_arm940_switch_mm()
  27. *
  28. * These are not required.
  29. */
  30. ENTRY(cpu_arm940_proc_init)
  31. ENTRY(cpu_arm940_switch_mm)
  32. ret lr
  33. /*
  34. * cpu_arm940_proc_fin()
  35. */
  36. ENTRY(cpu_arm940_proc_fin)
  37. mrc p15, 0, r0, c1, c0, 0 @ ctrl register
  38. bic r0, r0, #0x00001000 @ i-cache
  39. bic r0, r0, #0x00000004 @ d-cache
  40. mcr p15, 0, r0, c1, c0, 0 @ disable caches
  41. ret lr
  42. /*
  43. * cpu_arm940_reset(loc)
  44. * Params : r0 = address to jump to
  45. * Notes : This sets up everything for a reset
  46. */
  47. .pushsection .idmap.text, "ax"
  48. ENTRY(cpu_arm940_reset)
  49. mov ip, #0
  50. mcr p15, 0, ip, c7, c5, 0 @ flush I cache
  51. mcr p15, 0, ip, c7, c6, 0 @ flush D cache
  52. mcr p15, 0, ip, c7, c10, 4 @ drain WB
  53. mrc p15, 0, ip, c1, c0, 0 @ ctrl register
  54. bic ip, ip, #0x00000005 @ .............c.p
  55. bic ip, ip, #0x00001000 @ i-cache
  56. mcr p15, 0, ip, c1, c0, 0 @ ctrl register
  57. ret r0
  58. ENDPROC(cpu_arm940_reset)
  59. .popsection
  60. /*
  61. * cpu_arm940_do_idle()
  62. */
  63. .align 5
  64. ENTRY(cpu_arm940_do_idle)
  65. mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
  66. ret lr
  67. /*
  68. * flush_icache_all()
  69. *
  70. * Unconditionally clean and invalidate the entire icache.
  71. */
  72. ENTRY(arm940_flush_icache_all)
  73. mov r0, #0
  74. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  75. ret lr
  76. ENDPROC(arm940_flush_icache_all)
  77. /*
  78. * flush_user_cache_all()
  79. */
  80. ENTRY(arm940_flush_user_cache_all)
  81. /* FALLTHROUGH */
  82. /*
  83. * flush_kern_cache_all()
  84. *
  85. * Clean and invalidate the entire cache.
  86. */
  87. ENTRY(arm940_flush_kern_cache_all)
  88. mov r2, #VM_EXEC
  89. /* FALLTHROUGH */
  90. /*
  91. * flush_user_cache_range(start, end, flags)
  92. *
  93. * There is no efficient way to flush a range of cache entries
  94. * in the specified address range. Thus, flushes all.
  95. *
  96. * - start - start address (inclusive)
  97. * - end - end address (exclusive)
  98. * - flags - vm_flags describing address space
  99. */
  100. ENTRY(arm940_flush_user_cache_range)
  101. mov ip, #0
  102. #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
  103. mcr p15, 0, ip, c7, c6, 0 @ flush D cache
  104. #else
  105. mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
  106. 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
  107. 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
  108. subs r3, r3, #1 << 26
  109. bcs 2b @ entries 63 to 0
  110. subs r1, r1, #1 << 4
  111. bcs 1b @ segments 3 to 0
  112. #endif
  113. tst r2, #VM_EXEC
  114. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  115. mcrne p15, 0, ip, c7, c10, 4 @ drain WB
  116. ret lr
  117. /*
  118. * coherent_kern_range(start, end)
  119. *
  120. * Ensure coherency between the Icache and the Dcache in the
  121. * region described by start, end. If you have non-snooping
  122. * Harvard caches, you need to implement this function.
  123. *
  124. * - start - virtual start address
  125. * - end - virtual end address
  126. */
  127. ENTRY(arm940_coherent_kern_range)
  128. /* FALLTHROUGH */
  129. /*
  130. * coherent_user_range(start, end)
  131. *
  132. * Ensure coherency between the Icache and the Dcache in the
  133. * region described by start, end. If you have non-snooping
  134. * Harvard caches, you need to implement this function.
  135. *
  136. * - start - virtual start address
  137. * - end - virtual end address
  138. */
  139. ENTRY(arm940_coherent_user_range)
  140. /* FALLTHROUGH */
  141. /*
  142. * flush_kern_dcache_area(void *addr, size_t size)
  143. *
  144. * Ensure no D cache aliasing occurs, either with itself or
  145. * the I cache
  146. *
  147. * - addr - kernel address
  148. * - size - region size
  149. */
  150. ENTRY(arm940_flush_kern_dcache_area)
  151. mov r0, #0
  152. mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
  153. 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
  154. 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
  155. subs r3, r3, #1 << 26
  156. bcs 2b @ entries 63 to 0
  157. subs r1, r1, #1 << 4
  158. bcs 1b @ segments 7 to 0
  159. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  160. mcr p15, 0, r0, c7, c10, 4 @ drain WB
  161. ret lr
  162. /*
  163. * dma_inv_range(start, end)
  164. *
  165. * There is no efficient way to invalidate a specifid virtual
  166. * address range. Thus, invalidates all.
  167. *
  168. * - start - virtual start address
  169. * - end - virtual end address
  170. */
  171. arm940_dma_inv_range:
  172. mov ip, #0
  173. mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
  174. 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
  175. 2: mcr p15, 0, r3, c7, c6, 2 @ flush D entry
  176. subs r3, r3, #1 << 26
  177. bcs 2b @ entries 63 to 0
  178. subs r1, r1, #1 << 4
  179. bcs 1b @ segments 7 to 0
  180. mcr p15, 0, ip, c7, c10, 4 @ drain WB
  181. ret lr
  182. /*
  183. * dma_clean_range(start, end)
  184. *
  185. * There is no efficient way to clean a specifid virtual
  186. * address range. Thus, cleans all.
  187. *
  188. * - start - virtual start address
  189. * - end - virtual end address
  190. */
  191. arm940_dma_clean_range:
  192. ENTRY(cpu_arm940_dcache_clean_area)
  193. mov ip, #0
  194. #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
  195. mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
  196. 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
  197. 2: mcr p15, 0, r3, c7, c10, 2 @ clean D entry
  198. subs r3, r3, #1 << 26
  199. bcs 2b @ entries 63 to 0
  200. subs r1, r1, #1 << 4
  201. bcs 1b @ segments 7 to 0
  202. #endif
  203. mcr p15, 0, ip, c7, c10, 4 @ drain WB
  204. ret lr
  205. /*
  206. * dma_flush_range(start, end)
  207. *
  208. * There is no efficient way to clean and invalidate a specifid
  209. * virtual address range.
  210. *
  211. * - start - virtual start address
  212. * - end - virtual end address
  213. */
  214. ENTRY(arm940_dma_flush_range)
  215. mov ip, #0
  216. mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
  217. 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
  218. 2:
  219. #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
  220. mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
  221. #else
  222. mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry
  223. #endif
  224. subs r3, r3, #1 << 26
  225. bcs 2b @ entries 63 to 0
  226. subs r1, r1, #1 << 4
  227. bcs 1b @ segments 7 to 0
  228. mcr p15, 0, ip, c7, c10, 4 @ drain WB
  229. ret lr
  230. /*
  231. * dma_map_area(start, size, dir)
  232. * - start - kernel virtual start address
  233. * - size - size of region
  234. * - dir - DMA direction
  235. */
  236. ENTRY(arm940_dma_map_area)
  237. add r1, r1, r0
  238. cmp r2, #DMA_TO_DEVICE
  239. beq arm940_dma_clean_range
  240. bcs arm940_dma_inv_range
  241. b arm940_dma_flush_range
  242. ENDPROC(arm940_dma_map_area)
  243. /*
  244. * dma_unmap_area(start, size, dir)
  245. * - start - kernel virtual start address
  246. * - size - size of region
  247. * - dir - DMA direction
  248. */
  249. ENTRY(arm940_dma_unmap_area)
  250. ret lr
  251. ENDPROC(arm940_dma_unmap_area)
  252. .globl arm940_flush_kern_cache_louis
  253. .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
  254. @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
  255. define_cache_functions arm940
  256. .type __arm940_setup, #function
  257. __arm940_setup:
  258. mov r0, #0
  259. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  260. mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
  261. mcr p15, 0, r0, c7, c10, 4 @ drain WB
  262. mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7
  263. mcr p15, 0, r0, c6, c4, 0
  264. mcr p15, 0, r0, c6, c5, 0
  265. mcr p15, 0, r0, c6, c6, 0
  266. mcr p15, 0, r0, c6, c7, 0
  267. mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7
  268. mcr p15, 0, r0, c6, c4, 1
  269. mcr p15, 0, r0, c6, c5, 1
  270. mcr p15, 0, r0, c6, c6, 1
  271. mcr p15, 0, r0, c6, c7, 1
  272. mov r0, #0x0000003F @ base = 0, size = 4GB
  273. mcr p15, 0, r0, c6, c0, 0 @ set area 0, default
  274. mcr p15, 0, r0, c6, c0, 1
  275. ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
  276. ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB)
  277. pr_val r3, r0, r7, #1
  278. mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM
  279. mcr p15, 0, r3, c6, c1, 1
  280. ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
  281. ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
  282. pr_val r3, r0, r6, #1
  283. mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH
  284. mcr p15, 0, r3, c6, c2, 1
  285. mov r0, #0x06
  286. mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
  287. mcr p15, 0, r0, c2, c0, 1
  288. #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
  289. mov r0, #0x00 @ disable whole write buffer
  290. #else
  291. mov r0, #0x02 @ Region 1 write bufferred
  292. #endif
  293. mcr p15, 0, r0, c3, c0, 0
  294. mov r0, #0x10000
  295. sub r0, r0, #1 @ r0 = 0xffff
  296. mcr p15, 0, r0, c5, c0, 0 @ all read/write access
  297. mcr p15, 0, r0, c5, c0, 1
  298. mrc p15, 0, r0, c1, c0 @ get control register
  299. orr r0, r0, #0x00001000 @ I-cache
  300. orr r0, r0, #0x00000005 @ MPU/D-cache
  301. ret lr
  302. .size __arm940_setup, . - __arm940_setup
  303. __INITDATA
  304. @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
  305. define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
  306. .section ".rodata"
  307. string cpu_arch_name, "armv4t"
  308. string cpu_elf_name, "v4"
  309. string cpu_arm940_name, "ARM940T"
  310. .align
  311. .section ".proc.info.init", #alloc
  312. .type __arm940_proc_info,#object
  313. __arm940_proc_info:
  314. .long 0x41009400
  315. .long 0xff00fff0
  316. .long 0
  317. initfn __arm940_setup, __arm940_proc_info
  318. .long cpu_arch_name
  319. .long cpu_elf_name
  320. .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
  321. .long cpu_arm940_name
  322. .long arm940_processor_functions
  323. .long 0
  324. .long 0
  325. .long arm940_cache_fns
  326. .size __arm940_proc_info, . - __arm940_proc_info