tlbex.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. /*
  2. * TLB Exception Handling for ARC
  3. *
  4. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Vineetg: April 2011 :
  11. * -MMU v1: moved out legacy code into a seperate file
  12. * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
  13. * helps avoid a shift when preparing PD0 from PTE
  14. *
  15. * Vineetg: July 2009
  16. * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
  17. * entry, so that it doesn't knock out it's I-TLB entry
  18. * -Some more fine tuning:
  19. * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
  20. *
  21. * Vineetg: July 2009
  22. * -Practically rewrote the I/D TLB Miss handlers
  23. * Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
  24. * Hence Leaner by 1.5 K
  25. * Used Conditional arithmetic to replace excessive branching
  26. * Also used short instructions wherever possible
  27. *
  28. * Vineetg: Aug 13th 2008
  29. * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing
  30. * more information in case of a Fatality
  31. *
  32. * Vineetg: March 25th Bug #92690
  33. * -Added Debug Code to check if sw-ASID == hw-ASID
  34. * Rahul Trivedi, Amit Bhor: Codito Technologies 2004
  35. */
  36. #include <linux/linkage.h>
  37. #include <asm/entry.h>
  38. #include <asm/mmu.h>
  39. #include <asm/pgtable.h>
  40. #include <asm/arcregs.h>
  41. #include <asm/cache.h>
  42. #include <asm/processor.h>
  43. #include <asm/tlb-mmu1.h>
  44. #ifdef CONFIG_ISA_ARCOMPACT
  45. ;-----------------------------------------------------------------
  46. ; ARC700 Exception Handling doesn't auto-switch stack and it only provides
  47. ; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
  48. ;
  49. ; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
  50. ; "global" is used to free-up FIRST core reg to be able to code the rest of
  51. ; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
  52. ; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
  53. ; need to be saved as well by extending the "global" to be 4 words. Hence
  54. ; ".size ex_saved_reg1, 16"
  55. ; [All of this dance is to avoid stack switching for each TLB Miss, since we
  56. ; only need to save only a handful of regs, as opposed to complete reg file]
  57. ;
  58. ; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
  59. ; core reg as it will not be SMP safe.
  60. ; Thus scratch AUX reg is used (and no longer used to cache task PGD).
  61. ; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
  62. ; Epilogue thus has to locate the "per-cpu" storage for regs.
  63. ; To avoid cache line bouncing the per-cpu global is aligned/sized per
  64. ; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
  65. ; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
  66. ; As simple as that....
  67. ;--------------------------------------------------------------------------
  68. ; scratch memory to save [r0-r3] used to code TLB refill Handler
  69. ARCFP_DATA ex_saved_reg1
  70. .align 1 << L1_CACHE_SHIFT
  71. .type ex_saved_reg1, @object
  72. #ifdef CONFIG_SMP
  73. .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
  74. ex_saved_reg1:
  75. .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
  76. #else
  77. .size ex_saved_reg1, 16
  78. ex_saved_reg1:
  79. .zero 16
  80. #endif
  81. .macro TLBMISS_FREEUP_REGS
  82. #ifdef CONFIG_SMP
  83. sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
  84. GET_CPU_ID r0 ; get to per cpu scratch mem,
  85. asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
  86. add r0, @ex_saved_reg1, r0
  87. #else
  88. st r0, [@ex_saved_reg1]
  89. mov_s r0, @ex_saved_reg1
  90. #endif
  91. st_s r1, [r0, 4]
  92. st_s r2, [r0, 8]
  93. st_s r3, [r0, 12]
  94. ; VERIFY if the ASID in MMU-PID Reg is same as
  95. ; one in Linux data structures
  96. tlb_paranoid_check_asm
  97. .endm
  98. .macro TLBMISS_RESTORE_REGS
  99. #ifdef CONFIG_SMP
  100. GET_CPU_ID r0 ; get to per cpu scratch mem
  101. asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
  102. add r0, @ex_saved_reg1, r0
  103. ld_s r3, [r0,12]
  104. ld_s r2, [r0, 8]
  105. ld_s r1, [r0, 4]
  106. lr r0, [ARC_REG_SCRATCH_DATA0]
  107. #else
  108. mov_s r0, @ex_saved_reg1
  109. ld_s r3, [r0,12]
  110. ld_s r2, [r0, 8]
  111. ld_s r1, [r0, 4]
  112. ld_s r0, [r0]
  113. #endif
  114. .endm
  115. #else /* ARCv2 */
  116. .macro TLBMISS_FREEUP_REGS
  117. PUSH r0
  118. PUSH r1
  119. PUSH r2
  120. PUSH r3
  121. .endm
  122. .macro TLBMISS_RESTORE_REGS
  123. POP r3
  124. POP r2
  125. POP r1
  126. POP r0
  127. .endm
  128. #endif
  129. ;============================================================================
  130. ; Troubleshooting Stuff
  131. ;============================================================================
  132. ; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid
  133. ; When Creating TLB Entries, instead of doing 3 dependent loads from memory,
  134. ; we use the MMU PID Reg to get current ASID.
  135. ; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble.
  136. ; So we try to detect this in TLB Mis shandler
  137. .macro tlb_paranoid_check_asm
  138. #ifdef CONFIG_ARC_DBG_TLB_PARANOIA
  139. GET_CURR_TASK_ON_CPU r3
  140. ld r0, [r3, TASK_ACT_MM]
  141. ld r0, [r0, MM_CTXT+MM_CTXT_ASID]
  142. breq r0, 0, 55f ; Error if no ASID allocated
  143. lr r1, [ARC_REG_PID]
  144. and r1, r1, 0xFF
  145. and r2, r0, 0xFF ; MMU PID bits only for comparison
  146. breq r1, r2, 5f
  147. 55:
  148. ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode
  149. lr r2, [erstatus]
  150. bbit0 r2, STATUS_U_BIT, 5f
  151. ; We sure are in troubled waters, Flag the error, but to do so
  152. ; need to switch to kernel mode stack to call error routine
  153. GET_TSK_STACK_BASE r3, sp
  154. ; Call printk to shoutout aloud
  155. mov r2, 1
  156. j print_asid_mismatch
  157. 5: ; ASIDs match so proceed normally
  158. nop
  159. #endif
  160. .endm
  161. ;============================================================================
  162. ;TLB Miss handling Code
  163. ;============================================================================
  164. ;-----------------------------------------------------------------------------
  165. ; This macro does the page-table lookup for the faulting address.
  166. ; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address
  167. .macro LOAD_FAULT_PTE
  168. lr r2, [efa]
  169. #ifndef CONFIG_SMP
  170. lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
  171. #else
  172. GET_CURR_TASK_ON_CPU r1
  173. ld r1, [r1, TASK_ACT_MM]
  174. ld r1, [r1, MM_PGD]
  175. #endif
  176. lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
  177. ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr
  178. tst r3, r3
  179. bz do_slow_path_pf ; if no Page Table, do page fault
  180. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  181. and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp)
  182. add2.nz r1, r1, r0
  183. bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk
  184. mov.nz r0, r3
  185. #endif
  186. and r1, r3, PAGE_MASK
  187. ; Get the PTE entry: The idea is
  188. ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
  189. ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
  190. ; (3) z = (pgtbl + y * 4)
  191. #ifdef CONFIG_ARC_HAS_PAE40
  192. #define PTE_SIZE_LOG 3 /* 8 == 2 ^ 3 */
  193. #else
  194. #define PTE_SIZE_LOG 2 /* 4 == 2 ^ 2 */
  195. #endif
  196. ; multiply in step (3) above avoided by shifting lesser in step (1)
  197. lsr r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG )
  198. and r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG )
  199. ld.aw r0, [r1, r0] ; r0: PTE (lower word only for PAE40)
  200. ; r1: PTE ptr
  201. 2:
  202. .endm
  203. ;-----------------------------------------------------------------
  204. ; Convert Linux PTE entry into TLB entry
  205. ; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
  206. ; (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI])
  207. ; IN: r0 = PTE, r1 = ptr to PTE
  208. .macro CONV_PTE_TO_TLB
  209. and r3, r0, PTE_BITS_RWX ; r w x
  210. asl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only)
  211. and.f 0, r0, _PAGE_GLOBAL
  212. or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page)
  213. and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
  214. or r3, r3, r2
  215. sr r3, [ARC_REG_TLBPD1] ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C
  216. #ifdef CONFIG_ARC_HAS_PAE40
  217. ld r3, [r1, 4] ; paddr[39..32]
  218. sr r3, [ARC_REG_TLBPD1HI]
  219. #endif
  220. and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
  221. lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid
  222. or r3, r3, r2 ; S | vaddr | {sasid|asid}
  223. sr r3,[ARC_REG_TLBPD0] ; rewrite PD0
  224. .endm
  225. ;-----------------------------------------------------------------
  226. ; Commit the TLB entry into MMU
  227. .macro COMMIT_ENTRY_TO_MMU
  228. #if (CONFIG_ARC_MMU_VER < 4)
  229. /* Get free TLB slot: Set = computed from vaddr, way = random */
  230. sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
  231. /* Commit the Write */
  232. #if (CONFIG_ARC_MMU_VER >= 2) /* introduced in v2 */
  233. sr TLBWriteNI, [ARC_REG_TLBCOMMAND]
  234. #else
  235. sr TLBWrite, [ARC_REG_TLBCOMMAND]
  236. #endif
  237. #else
  238. sr TLBInsertEntry, [ARC_REG_TLBCOMMAND]
  239. #endif
  240. .endm
  241. ARCFP_CODE ;Fast Path Code, candidate for ICCM
  242. ;-----------------------------------------------------------------------------
  243. ; I-TLB Miss Exception Handler
  244. ;-----------------------------------------------------------------------------
  245. ENTRY(EV_TLBMissI)
  246. TLBMISS_FREEUP_REGS
  247. ;----------------------------------------------------------------
  248. ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
  249. LOAD_FAULT_PTE
  250. ;----------------------------------------------------------------
  251. ; VERIFY_PTE: Check if PTE permissions approp for executing code
  252. cmp_s r2, VMALLOC_START
  253. mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE)
  254. or.hs r2, r2, _PAGE_GLOBAL
  255. and r3, r0, r2 ; Mask out NON Flag bits from PTE
  256. xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
  257. bnz do_slow_path_pf
  258. ; Let Linux VM know that the page was accessed
  259. or r0, r0, _PAGE_ACCESSED ; set Accessed Bit
  260. st_s r0, [r1] ; Write back PTE
  261. CONV_PTE_TO_TLB
  262. COMMIT_ENTRY_TO_MMU
  263. TLBMISS_RESTORE_REGS
  264. EV_TLBMissI_fast_ret: ; additional label for VDK OS-kit instrumentation
  265. rtie
  266. END(EV_TLBMissI)
  267. ;-----------------------------------------------------------------------------
  268. ; D-TLB Miss Exception Handler
  269. ;-----------------------------------------------------------------------------
  270. ENTRY(EV_TLBMissD)
  271. TLBMISS_FREEUP_REGS
  272. ;----------------------------------------------------------------
  273. ; Get the PTE corresponding to V-addr accessed
  274. ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
  275. LOAD_FAULT_PTE
  276. ;----------------------------------------------------------------
  277. ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
  278. cmp_s r2, VMALLOC_START
  279. mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE
  280. or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only
  281. ; Linux PTE [RWX] bits are semantically overloaded:
  282. ; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc)
  283. ; -Otherwise they are user-mode permissions, and those are exactly
  284. ; same for kernel mode as well (e.g. copy_(to|from)_user)
  285. lr r3, [ecr]
  286. btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
  287. or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
  288. btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
  289. or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
  290. ; Above laddering takes care of XCHG access (both R and W)
  291. ; By now, r2 setup with all the Flags we need to check in PTE
  292. and r3, r0, r2 ; Mask out NON Flag bits from PTE
  293. brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test)
  294. ;----------------------------------------------------------------
  295. ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
  296. lr r3, [ecr]
  297. or r0, r0, _PAGE_ACCESSED ; Accessed bit always
  298. btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
  299. or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well
  300. st_s r0, [r1] ; Write back PTE
  301. CONV_PTE_TO_TLB
  302. #if (CONFIG_ARC_MMU_VER == 1)
  303. ; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of
  304. ; memcpy where 3 parties contend for 2 ways, ensuing a livelock.
  305. ; But only for old MMU or one with Metal Fix
  306. TLB_WRITE_HEURISTICS
  307. #endif
  308. COMMIT_ENTRY_TO_MMU
  309. TLBMISS_RESTORE_REGS
  310. EV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation
  311. rtie
  312. ;-------- Common routine to call Linux Page Fault Handler -----------
  313. do_slow_path_pf:
  314. ; Restore the 4-scratch regs saved by fast path miss handler
  315. TLBMISS_RESTORE_REGS
  316. ; Slow path TLB Miss handled as a regular ARC Exception
  317. ; (stack switching / save the complete reg-file).
  318. b call_do_page_fault
  319. END(EV_TLBMissD)