initialize_mmu.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. /*
  2. * arch/xtensa/include/asm/initialize_mmu.h
  3. *
  4. * Initializes MMU:
  5. *
  6. * For the new V3 MMU we remap the TLB from virtual == physical
  7. * to the standard Linux mapping used in earlier MMU's.
  8. *
  9. * The the MMU we also support a new configuration register that
  10. * specifies how the S32C1I instruction operates with the cache
  11. * controller.
  12. *
  13. * This file is subject to the terms and conditions of the GNU General
  14. * Public License. See the file "COPYING" in the main directory of
  15. * this archive for more details.
  16. *
  17. * Copyright (C) 2008 - 2012 Tensilica, Inc.
  18. *
  19. * Marc Gauthier <marc@tensilica.com>
  20. * Pete Delaney <piet@tensilica.com>
  21. */
  22. #ifndef _XTENSA_INITIALIZE_MMU_H
  23. #define _XTENSA_INITIALIZE_MMU_H
  24. #include <asm/pgtable.h>
  25. #include <asm/vectors.h>
  26. #if XCHAL_HAVE_PTP_MMU
  27. #define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
  28. #define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
  29. #else
  30. #define CA_WRITEBACK (0x4)
  31. #endif
  32. #ifndef XCHAL_SPANNING_WAY
  33. #define XCHAL_SPANNING_WAY 0
  34. #endif
  35. #ifdef __ASSEMBLY__
  36. #define XTENSA_HWVERSION_RC_2009_0 230000
  37. .macro initialize_mmu
  38. #if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
  39. /*
  40. * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
  41. * For details see Documentation/xtensa/atomctl.txt
  42. */
  43. #if XCHAL_DCACHE_IS_COHERENT
  44. movi a3, 0x25 /* For SMP/MX -- internal for writeback,
  45. * RCW otherwise
  46. */
  47. #else
  48. movi a3, 0x29 /* non-MX -- Most cores use Std Memory
  49. * Controlers which usually can't use RCW
  50. */
  51. #endif
  52. wsr a3, atomctl
  53. #endif /* XCHAL_HAVE_S32C1I &&
  54. * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
  55. */
  56. #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
  57. /*
  58. * Have MMU v3
  59. */
  60. #if !XCHAL_HAVE_VECBASE
  61. # error "MMU v3 requires reloc vectors"
  62. #endif
  63. movi a1, 0
  64. _call0 1f
  65. _j 2f
  66. .align 4
  67. 1: movi a2, 0x10000000
  68. #if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
  69. #define TEMP_MAPPING_VADDR 0x40000000
  70. #else
  71. #define TEMP_MAPPING_VADDR 0x00000000
  72. #endif
  73. /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
  74. movi a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
  75. idtlb a2
  76. iitlb a2
  77. isync
  78. /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
  79. * and jump to the new mapping.
  80. */
  81. srli a3, a0, 27
  82. slli a3, a3, 27
  83. addi a3, a3, CA_BYPASS
  84. addi a7, a2, 5 - XCHAL_SPANNING_WAY
  85. wdtlb a3, a7
  86. witlb a3, a7
  87. isync
  88. slli a4, a0, 5
  89. srli a4, a4, 5
  90. addi a5, a2, -XCHAL_SPANNING_WAY
  91. add a4, a4, a5
  92. jx a4
  93. /* Step 3: unmap everything other than current area.
  94. * Start at 0x60000000, wrap around, and end with 0x20000000
  95. */
  96. 2: movi a4, 0x20000000
  97. add a5, a2, a4
  98. 3: idtlb a5
  99. iitlb a5
  100. add a5, a5, a4
  101. bne a5, a2, 3b
  102. /* Step 4: Setup MMU with the requested static mappings. */
  103. movi a6, 0x01000000
  104. wsr a6, ITLBCFG
  105. wsr a6, DTLBCFG
  106. isync
  107. movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
  108. movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
  109. wdtlb a4, a5
  110. witlb a4, a5
  111. movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
  112. movi a4, XCHAL_KSEG_PADDR + CA_BYPASS
  113. wdtlb a4, a5
  114. witlb a4, a5
  115. #ifdef CONFIG_XTENSA_KSEG_512M
  116. movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
  117. movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
  118. wdtlb a4, a5
  119. witlb a4, a5
  120. movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
  121. movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
  122. wdtlb a4, a5
  123. witlb a4, a5
  124. #endif
  125. movi a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
  126. movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
  127. wdtlb a4, a5
  128. witlb a4, a5
  129. movi a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
  130. movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
  131. wdtlb a4, a5
  132. witlb a4, a5
  133. isync
  134. /* Jump to self, using final mappings. */
  135. movi a4, 1f
  136. jx a4
  137. 1:
  138. /* Step 5: remove temporary mapping. */
  139. idtlb a7
  140. iitlb a7
  141. isync
  142. movi a0, 0
  143. wsr a0, ptevaddr
  144. rsync
  145. #endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
  146. XCHAL_HAVE_SPANNING_WAY */
  147. #if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS && \
  148. (XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE)
  149. /* Enable data and instruction cache in the DEFAULT_MEMORY region
  150. * if the processor has DTLB and ITLB.
  151. */
  152. movi a5, PLATFORM_DEFAULT_MEM_START | XCHAL_SPANNING_WAY
  153. movi a6, ~_PAGE_ATTRIB_MASK
  154. movi a7, CA_WRITEBACK
  155. movi a8, 0x20000000
  156. movi a9, PLATFORM_DEFAULT_MEM_SIZE
  157. j 2f
  158. 1:
  159. sub a9, a9, a8
  160. 2:
  161. #if XCHAL_DCACHE_SIZE
  162. rdtlb1 a3, a5
  163. and a3, a3, a6
  164. or a3, a3, a7
  165. wdtlb a3, a5
  166. #endif
  167. #if XCHAL_ICACHE_SIZE
  168. ritlb1 a4, a5
  169. and a4, a4, a6
  170. or a4, a4, a7
  171. witlb a4, a5
  172. #endif
  173. add a5, a5, a8
  174. bltu a8, a9, 1b
  175. #endif
  176. .endm
  177. #endif /*__ASSEMBLY__*/
  178. #endif /* _XTENSA_INITIALIZE_MMU_H */