cpu_setup_fsl_booke.S 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /*
  2. * This file contains low level CPU setup functions.
  3. * Kumar Gala <galak@kernel.crashing.org>
  4. * Copyright 2009 Freescale Semiconductor, Inc.
  5. *
  6. * Based on cpu_setup_6xx code by
  7. * Benjamin Herrenschmidt <benh@kernel.crashing.org>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. *
  14. */
  15. #include <asm/page.h>
  16. #include <asm/processor.h>
  17. #include <asm/cputable.h>
  18. #include <asm/ppc_asm.h>
  19. #include <asm/mmu-book3e.h>
  20. #include <asm/asm-offsets.h>
  21. #include <asm/mpc85xx.h>
  22. _GLOBAL(__e500_icache_setup)
  23. mfspr r0, SPRN_L1CSR1
  24. andi. r3, r0, L1CSR1_ICE
  25. bnelr /* Already enabled */
  26. oris r0, r0, L1CSR1_CPE@h
  27. ori r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR | L1CSR1_ICE)
  28. mtspr SPRN_L1CSR1, r0 /* Enable I-Cache */
  29. isync
  30. blr
  31. _GLOBAL(__e500_dcache_setup)
  32. mfspr r0, SPRN_L1CSR0
  33. andi. r3, r0, L1CSR0_DCE
  34. bnelr /* Already enabled */
  35. msync
  36. isync
  37. li r0, 0
  38. mtspr SPRN_L1CSR0, r0 /* Disable */
  39. msync
  40. isync
  41. li r0, (L1CSR0_DCFI | L1CSR0_CLFC)
  42. mtspr SPRN_L1CSR0, r0 /* Invalidate */
  43. isync
  44. 1: mfspr r0, SPRN_L1CSR0
  45. andi. r3, r0, L1CSR0_CLFC
  46. bne+ 1b /* Wait for lock bits reset */
  47. oris r0, r0, L1CSR0_CPE@h
  48. ori r0, r0, L1CSR0_DCE
  49. msync
  50. isync
  51. mtspr SPRN_L1CSR0, r0 /* Enable */
  52. isync
  53. blr
  54. /*
  55. * FIXME - we haven't yet done testing to determine a reasonable default
  56. * value for PW20_WAIT_IDLE_BIT.
  57. */
  58. #define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
  59. _GLOBAL(setup_pw20_idle)
  60. mfspr r3, SPRN_PWRMGTCR0
  61. /* Set PW20_WAIT bit, enable pw20 state*/
  62. ori r3, r3, PWRMGTCR0_PW20_WAIT
  63. li r11, PW20_WAIT_IDLE_BIT
  64. /* Set Automatic PW20 Core Idle Count */
  65. rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
  66. mtspr SPRN_PWRMGTCR0, r3
  67. blr
  68. /*
  69. * FIXME - we haven't yet done testing to determine a reasonable default
  70. * value for AV_WAIT_IDLE_BIT.
  71. */
  72. #define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
  73. _GLOBAL(setup_altivec_idle)
  74. mfspr r3, SPRN_PWRMGTCR0
  75. /* Enable Altivec Idle */
  76. oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
  77. li r11, AV_WAIT_IDLE_BIT
  78. /* Set Automatic AltiVec Idle Count */
  79. rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
  80. mtspr SPRN_PWRMGTCR0, r3
  81. blr
  82. #ifdef CONFIG_PPC_E500MC
  83. _GLOBAL(__setup_cpu_e6500)
  84. mflr r6
  85. #ifdef CONFIG_PPC64
  86. bl setup_altivec_ivors
  87. /* Touch IVOR42 only if the CPU supports E.HV category */
  88. mfspr r10,SPRN_MMUCFG
  89. rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
  90. beq 1f
  91. bl setup_lrat_ivor
  92. 1:
  93. #endif
  94. bl setup_pw20_idle
  95. bl setup_altivec_idle
  96. bl __setup_cpu_e5500
  97. mtlr r6
  98. blr
  99. #endif /* CONFIG_PPC_E500MC */
  100. #ifdef CONFIG_PPC32
  101. #ifdef CONFIG_E200
  102. _GLOBAL(__setup_cpu_e200)
  103. /* enable dedicated debug exception handling resources (Debug APU) */
  104. mfspr r3,SPRN_HID0
  105. ori r3,r3,HID0_DAPUEN@l
  106. mtspr SPRN_HID0,r3
  107. b __setup_e200_ivors
  108. #endif /* CONFIG_E200 */
  109. #ifdef CONFIG_E500
  110. #ifndef CONFIG_PPC_E500MC
  111. _GLOBAL(__setup_cpu_e500v1)
  112. _GLOBAL(__setup_cpu_e500v2)
  113. mflr r4
  114. bl __e500_icache_setup
  115. bl __e500_dcache_setup
  116. bl __setup_e500_ivors
  117. #if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI)
  118. /* Ensure that RFXE is set */
  119. mfspr r3,SPRN_HID1
  120. oris r3,r3,HID1_RFXE@h
  121. mtspr SPRN_HID1,r3
  122. #endif
  123. mtlr r4
  124. blr
  125. #else /* CONFIG_PPC_E500MC */
  126. _GLOBAL(__setup_cpu_e500mc)
  127. _GLOBAL(__setup_cpu_e5500)
  128. mflr r5
  129. bl __e500_icache_setup
  130. bl __e500_dcache_setup
  131. bl __setup_e500mc_ivors
  132. /*
  133. * We only want to touch IVOR38-41 if we're running on hardware
  134. * that supports category E.HV. The architectural way to determine
  135. * this is MMUCFG[LPIDSIZE].
  136. */
  137. mfspr r3, SPRN_MMUCFG
  138. rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
  139. beq 1f
  140. bl __setup_ehv_ivors
  141. b 2f
  142. 1:
  143. lwz r3, CPU_SPEC_FEATURES(r4)
  144. /* We need this check as cpu_setup is also called for
  145. * the secondary cores. So, if we have already cleared
  146. * the feature on the primary core, avoid doing it on the
  147. * secondary core.
  148. */
  149. andis. r6, r3, CPU_FTR_EMB_HV@h
  150. beq 2f
  151. rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
  152. stw r3, CPU_SPEC_FEATURES(r4)
  153. 2:
  154. mtlr r5
  155. blr
  156. #endif /* CONFIG_PPC_E500MC */
  157. #endif /* CONFIG_E500 */
  158. #endif /* CONFIG_PPC32 */
  159. #ifdef CONFIG_PPC_BOOK3E_64
  160. _GLOBAL(__restore_cpu_e6500)
  161. mflr r5
  162. bl setup_altivec_ivors
  163. /* Touch IVOR42 only if the CPU supports E.HV category */
  164. mfspr r10,SPRN_MMUCFG
  165. rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
  166. beq 1f
  167. bl setup_lrat_ivor
  168. 1:
  169. bl setup_pw20_idle
  170. bl setup_altivec_idle
  171. bl __restore_cpu_e5500
  172. mtlr r5
  173. blr
  174. _GLOBAL(__restore_cpu_e5500)
  175. mflr r4
  176. bl __e500_icache_setup
  177. bl __e500_dcache_setup
  178. bl __setup_base_ivors
  179. bl setup_perfmon_ivor
  180. bl setup_doorbell_ivors
  181. /*
  182. * We only want to touch IVOR38-41 if we're running on hardware
  183. * that supports category E.HV. The architectural way to determine
  184. * this is MMUCFG[LPIDSIZE].
  185. */
  186. mfspr r10,SPRN_MMUCFG
  187. rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
  188. beq 1f
  189. bl setup_ehv_ivors
  190. 1:
  191. mtlr r4
  192. blr
  193. _GLOBAL(__setup_cpu_e5500)
  194. mflr r5
  195. bl __e500_icache_setup
  196. bl __e500_dcache_setup
  197. bl __setup_base_ivors
  198. bl setup_perfmon_ivor
  199. bl setup_doorbell_ivors
  200. /*
  201. * We only want to touch IVOR38-41 if we're running on hardware
  202. * that supports category E.HV. The architectural way to determine
  203. * this is MMUCFG[LPIDSIZE].
  204. */
  205. mfspr r10,SPRN_MMUCFG
  206. rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
  207. beq 1f
  208. bl setup_ehv_ivors
  209. b 2f
  210. 1:
  211. ld r10,CPU_SPEC_FEATURES(r4)
  212. LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
  213. andc r10,r10,r9
  214. std r10,CPU_SPEC_FEATURES(r4)
  215. 2:
  216. mtlr r5
  217. blr
  218. #endif
  219. /* flush L1 date cache, it can apply to e500v2, e500mc and e5500 */
  220. _GLOBAL(flush_dcache_L1)
  221. mfmsr r10
  222. wrteei 0
  223. mfspr r3,SPRN_L1CFG0
  224. rlwinm r5,r3,9,3 /* Extract cache block size */
  225. twlgti r5,1 /* Only 32 and 64 byte cache blocks
  226. * are currently defined.
  227. */
  228. li r4,32
  229. subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
  230. * log2(number of ways)
  231. */
  232. slw r5,r4,r5 /* r5 = cache block size */
  233. rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
  234. mulli r7,r7,13 /* An 8-way cache will require 13
  235. * loads per set.
  236. */
  237. slw r7,r7,r6
  238. /* save off HID0 and set DCFA */
  239. mfspr r8,SPRN_HID0
  240. ori r9,r8,HID0_DCFA@l
  241. mtspr SPRN_HID0,r9
  242. isync
  243. LOAD_REG_IMMEDIATE(r6, KERNELBASE)
  244. mr r4, r6
  245. mtctr r7
  246. 1: lwz r3,0(r4) /* Load... */
  247. add r4,r4,r5
  248. bdnz 1b
  249. msync
  250. mr r4, r6
  251. mtctr r7
  252. 1: dcbf 0,r4 /* ...and flush. */
  253. add r4,r4,r5
  254. bdnz 1b
  255. /* restore HID0 */
  256. mtspr SPRN_HID0,r8
  257. isync
  258. wrtee r10
  259. blr
  260. has_L2_cache:
  261. /* skip L2 cache on P2040/P2040E as they have no L2 cache */
  262. mfspr r3, SPRN_SVR
  263. /* shift right by 8 bits and clear E bit of SVR */
  264. rlwinm r4, r3, 24, ~0x800
  265. lis r3, SVR_P2040@h
  266. ori r3, r3, SVR_P2040@l
  267. cmpw r4, r3
  268. beq 1f
  269. li r3, 1
  270. blr
  271. 1:
  272. li r3, 0
  273. blr
  274. /* flush backside L2 cache */
  275. flush_backside_L2_cache:
  276. mflr r10
  277. bl has_L2_cache
  278. mtlr r10
  279. cmpwi r3, 0
  280. beq 2f
  281. /* Flush the L2 cache */
  282. mfspr r3, SPRN_L2CSR0
  283. ori r3, r3, L2CSR0_L2FL@l
  284. msync
  285. isync
  286. mtspr SPRN_L2CSR0,r3
  287. isync
  288. /* check if it is complete */
  289. 1: mfspr r3,SPRN_L2CSR0
  290. andi. r3, r3, L2CSR0_L2FL@l
  291. bne 1b
  292. 2:
  293. blr
  294. _GLOBAL(cpu_down_flush_e500v2)
  295. mflr r0
  296. bl flush_dcache_L1
  297. mtlr r0
  298. blr
  299. _GLOBAL(cpu_down_flush_e500mc)
  300. _GLOBAL(cpu_down_flush_e5500)
  301. mflr r0
  302. bl flush_dcache_L1
  303. bl flush_backside_L2_cache
  304. mtlr r0
  305. blr
  306. /* L1 Data Cache of e6500 contains no modified data, no flush is required */
  307. _GLOBAL(cpu_down_flush_e6500)
  308. blr