sleep-sh7372.S 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /*
  2. * sh7372 lowlevel sleep code for "Core Standby Mode"
  3. *
  4. * Copyright (C) 2011 Magnus Damm
  5. *
  6. * In "Core Standby Mode" the ARM core is off, but L2 cache is still on
  7. *
  8. * Based on mach-omap2/sleep34xx.S
  9. *
  10. * (C) Copyright 2007 Texas Instruments
  11. * Karthik Dasu <karthik-dp@ti.com>
  12. *
  13. * (C) Copyright 2004 Texas Instruments, <www.ti.com>
  14. * Richard Woodruff <r-woodruff2@ti.com>
  15. *
  16. * This program is free software; you can redistribute it and/or
  17. * modify it under the terms of the GNU General Public License as
  18. * published by the Free Software Foundation; either version 2 of
  19. * the License, or (at your option) any later version.
  20. *
  21. * This program is distributed in the hope that it will be useful,
  22. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  23. * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
  24. * GNU General Public License for more details.
  25. *
  26. * You should have received a copy of the GNU General Public License
  27. * along with this program; if not, write to the Free Software
  28. * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  29. * MA 02111-1307 USA
  30. */
  31. #include <linux/linkage.h>
  32. #include <asm/assembler.h>
  33. #define SMFRAM 0xe6a70000
  34. .align
  35. kernel_flush:
  36. .word v7_flush_dcache_all
  37. .align 3
  38. ENTRY(sh7372_cpu_suspend)
  39. stmfd sp!, {r0-r12, lr} @ save registers on stack
  40. ldr r8, =SMFRAM
  41. mov r4, sp @ Store sp
  42. mrs r5, spsr @ Store spsr
  43. mov r6, lr @ Store lr
  44. stmia r8!, {r4-r6}
  45. mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
  46. mrc p15, 0, r5, c2, c0, 0 @ TTBR0
  47. mrc p15, 0, r6, c2, c0, 1 @ TTBR1
  48. mrc p15, 0, r7, c2, c0, 2 @ TTBCR
  49. stmia r8!, {r4-r7}
  50. mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
  51. mrc p15, 0, r5, c10, c2, 0 @ PRRR
  52. mrc p15, 0, r6, c10, c2, 1 @ NMRR
  53. stmia r8!,{r4-r6}
  54. mrc p15, 0, r4, c13, c0, 1 @ Context ID
  55. mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
  56. mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
  57. mrs r7, cpsr @ Store current cpsr
  58. stmia r8!, {r4-r7}
  59. mrc p15, 0, r4, c1, c0, 0 @ save control register
  60. stmia r8!, {r4}
  61. /*
  62. * jump out to kernel flush routine
  63. * - reuse that code is better
  64. * - it executes in a cached space so is faster than refetch per-block
  65. * - should be faster and will change with kernel
  66. * - 'might' have to copy address, load and jump to it
  67. * Flush all data from the L1 data cache before disabling
  68. * SCTLR.C bit.
  69. */
  70. ldr r1, kernel_flush
  71. mov lr, pc
  72. bx r1
  73. /*
  74. * Clear the SCTLR.C bit to prevent further data cache
  75. * allocation. Clearing SCTLR.C would make all the data accesses
  76. * strongly ordered and would not hit the cache.
  77. */
  78. mrc p15, 0, r0, c1, c0, 0
  79. bic r0, r0, #(1 << 2) @ Disable the C bit
  80. mcr p15, 0, r0, c1, c0, 0
  81. isb
  82. /*
  83. * Invalidate L1 data cache. Even though only invalidate is
  84. * necessary exported flush API is used here. Doing clean
  85. * on already clean cache would be almost NOP.
  86. */
  87. ldr r1, kernel_flush
  88. blx r1
  89. /*
  90. * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
  91. * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
  92. * This sequence switches back to ARM. Note that .align may insert a
  93. * nop: bx pc needs to be word-aligned in order to work.
  94. */
  95. THUMB( .thumb )
  96. THUMB( .align )
  97. THUMB( bx pc )
  98. THUMB( nop )
  99. .arm
  100. /* Data memory barrier and Data sync barrier */
  101. dsb
  102. dmb
  103. /*
  104. * ===================================
  105. * == WFI instruction => Enter idle ==
  106. * ===================================
  107. */
  108. wfi @ wait for interrupt
  109. /*
  110. * ===================================
  111. * == Resume path for non-OFF modes ==
  112. * ===================================
  113. */
  114. mrc p15, 0, r0, c1, c0, 0
  115. tst r0, #(1 << 2) @ Check C bit enabled?
  116. orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
  117. mcreq p15, 0, r0, c1, c0, 0
  118. isb
  119. /*
  120. * ===================================
  121. * == Exit point from non-OFF modes ==
  122. * ===================================
  123. */
  124. ldmfd sp!, {r0-r12, pc} @ restore regs and return
  125. .pool
  126. .align 12
  127. .text
  128. .global sh7372_cpu_resume
  129. sh7372_cpu_resume:
  130. mov r1, #0
  131. /*
  132. * Invalidate all instruction caches to PoU
  133. * and flush branch target cache
  134. */
  135. mcr p15, 0, r1, c7, c5, 0
  136. ldr r3, =SMFRAM
  137. ldmia r3!, {r4-r6}
  138. mov sp, r4 @ Restore sp
  139. msr spsr_cxsf, r5 @ Restore spsr
  140. mov lr, r6 @ Restore lr
  141. ldmia r3!, {r4-r7}
  142. mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
  143. mcr p15, 0, r5, c2, c0, 0 @ TTBR0
  144. mcr p15, 0, r6, c2, c0, 1 @ TTBR1
  145. mcr p15, 0, r7, c2, c0, 2 @ TTBCR
  146. ldmia r3!,{r4-r6}
  147. mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
  148. mcr p15, 0, r5, c10, c2, 0 @ PRRR
  149. mcr p15, 0, r6, c10, c2, 1 @ NMRR
  150. ldmia r3!,{r4-r7}
  151. mcr p15, 0, r4, c13, c0, 1 @ Context ID
  152. mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
  153. mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
  154. msr cpsr, r7 @ store cpsr
  155. /* Starting to enable MMU here */
  156. mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
  157. /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
  158. and r7, #0x7
  159. cmp r7, #0x0
  160. beq usettbr0
  161. ttbr_error:
  162. /*
  163. * More work needs to be done to support N[0:2] value other than 0
  164. * So looping here so that the error can be detected
  165. */
  166. b ttbr_error
  167. .align
  168. cache_pred_disable_mask:
  169. .word 0xFFFFE7FB
  170. ttbrbit_mask:
  171. .word 0xFFFFC000
  172. table_index_mask:
  173. .word 0xFFF00000
  174. table_entry:
  175. .word 0x00000C02
  176. usettbr0:
  177. mrc p15, 0, r2, c2, c0, 0
  178. ldr r5, ttbrbit_mask
  179. and r2, r5
  180. mov r4, pc
  181. ldr r5, table_index_mask
  182. and r4, r5 @ r4 = 31 to 20 bits of pc
  183. /* Extract the value to be written to table entry */
  184. ldr r6, table_entry
  185. /* r6 has the value to be written to table entry */
  186. add r6, r6, r4
  187. /* Getting the address of table entry to modify */
  188. lsr r4, #18
  189. /* r2 has the location which needs to be modified */
  190. add r2, r4
  191. ldr r4, [r2]
  192. str r6, [r2] /* modify the table entry */
  193. mov r7, r6
  194. mov r5, r2
  195. mov r6, r4
  196. /* r5 = original page table address */
  197. /* r6 = original page table data */
  198. mov r0, #0
  199. mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
  200. mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
  201. mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
  202. mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
  203. /*
  204. * Restore control register. This enables the MMU.
  205. * The caches and prediction are not enabled here, they
  206. * will be enabled after restoring the MMU table entry.
  207. */
  208. ldmia r3!, {r4}
  209. stmia r3!, {r5} /* save original page table address */
  210. stmia r3!, {r6} /* save original page table data */
  211. stmia r3!, {r7} /* save modified page table data */
  212. ldr r2, cache_pred_disable_mask
  213. and r4, r2
  214. mcr p15, 0, r4, c1, c0, 0
  215. dsb
  216. isb
  217. ldr r0, =restoremmu_on
  218. bx r0
  219. /*
  220. * ==============================
  221. * == Exit point from OFF mode ==
  222. * ==============================
  223. */
  224. restoremmu_on:
  225. ldmfd sp!, {r0-r12, pc} @ restore regs and return