head.S 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749
  1. /*
  2. * OpenRISC head.S
  3. *
  4. * Linux architectural port borrowing liberally from similar works of
  5. * others. All original copyrights apply as per the original source
  6. * declaration.
  7. *
  8. * Modifications for the OpenRISC architecture:
  9. * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  10. * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. */
  17. #include <linux/linkage.h>
  18. #include <linux/threads.h>
  19. #include <linux/errno.h>
  20. #include <linux/init.h>
  21. #include <linux/serial_reg.h>
  22. #include <asm/processor.h>
  23. #include <asm/page.h>
  24. #include <asm/mmu.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/thread_info.h>
  27. #include <asm/cache.h>
  28. #include <asm/spr_defs.h>
  29. #include <asm/asm-offsets.h>
  30. #include <linux/of_fdt.h>
  31. #define tophys(rd,rs) \
  32. l.movhi rd,hi(-KERNELBASE) ;\
  33. l.add rd,rd,rs
  34. #define CLEAR_GPR(gpr) \
  35. l.movhi gpr,0x0
  36. #define LOAD_SYMBOL_2_GPR(gpr,symbol) \
  37. l.movhi gpr,hi(symbol) ;\
  38. l.ori gpr,gpr,lo(symbol)
  39. #define UART_BASE_ADD 0x90000000
  40. #define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
  41. #define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
  42. /* ============================================[ tmp store locations ]=== */
  43. #define SPR_SHADOW_GPR(x) ((x) + SPR_GPR_BASE + 32)
  44. /*
  45. * emergency_print temporary stores
  46. */
  47. #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
  48. #define EMERGENCY_PRINT_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(14)
  49. #define EMERGENCY_PRINT_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(14)
  50. #define EMERGENCY_PRINT_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(15)
  51. #define EMERGENCY_PRINT_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(15)
  52. #define EMERGENCY_PRINT_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(16)
  53. #define EMERGENCY_PRINT_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(16)
  54. #define EMERGENCY_PRINT_STORE_GPR7 l.mtspr r0,r7,SPR_SHADOW_GPR(7)
  55. #define EMERGENCY_PRINT_LOAD_GPR7 l.mfspr r7,r0,SPR_SHADOW_GPR(7)
  56. #define EMERGENCY_PRINT_STORE_GPR8 l.mtspr r0,r8,SPR_SHADOW_GPR(8)
  57. #define EMERGENCY_PRINT_LOAD_GPR8 l.mfspr r8,r0,SPR_SHADOW_GPR(8)
  58. #define EMERGENCY_PRINT_STORE_GPR9 l.mtspr r0,r9,SPR_SHADOW_GPR(9)
  59. #define EMERGENCY_PRINT_LOAD_GPR9 l.mfspr r9,r0,SPR_SHADOW_GPR(9)
  60. #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
  61. #define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4
  62. #define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0)
  63. #define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5
  64. #define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0)
  65. #define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6
  66. #define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0)
  67. #define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7
  68. #define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0)
  69. #define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8
  70. #define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0)
  71. #define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9
  72. #define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0)
  73. #endif
  74. /*
  75. * TLB miss handlers temorary stores
  76. */
  77. #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
  78. #define EXCEPTION_STORE_GPR2 l.mtspr r0,r2,SPR_SHADOW_GPR(2)
  79. #define EXCEPTION_LOAD_GPR2 l.mfspr r2,r0,SPR_SHADOW_GPR(2)
  80. #define EXCEPTION_STORE_GPR3 l.mtspr r0,r3,SPR_SHADOW_GPR(3)
  81. #define EXCEPTION_LOAD_GPR3 l.mfspr r3,r0,SPR_SHADOW_GPR(3)
  82. #define EXCEPTION_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(4)
  83. #define EXCEPTION_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(4)
  84. #define EXCEPTION_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(5)
  85. #define EXCEPTION_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(5)
  86. #define EXCEPTION_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(6)
  87. #define EXCEPTION_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(6)
  88. #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
  89. #define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2
  90. #define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0)
  91. #define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3
  92. #define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0)
  93. #define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4
  94. #define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0)
  95. #define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5
  96. #define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0)
  97. #define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6
  98. #define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0)
  99. #endif
  100. /*
  101. * EXCEPTION_HANDLE temporary stores
  102. */
  103. #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
  104. #define EXCEPTION_T_STORE_GPR30 l.mtspr r0,r30,SPR_SHADOW_GPR(30)
  105. #define EXCEPTION_T_LOAD_GPR30(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(30)
  106. #define EXCEPTION_T_STORE_GPR10 l.mtspr r0,r10,SPR_SHADOW_GPR(10)
  107. #define EXCEPTION_T_LOAD_GPR10(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(10)
  108. #define EXCEPTION_T_STORE_SP l.mtspr r0,r1,SPR_SHADOW_GPR(1)
  109. #define EXCEPTION_T_LOAD_SP(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(1)
  110. #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
  111. #define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30
  112. #define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0)
  113. #define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10
  114. #define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0)
  115. #define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1
  116. #define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0)
  117. #endif
  118. /* =========================================================[ macros ]=== */
  119. #ifdef CONFIG_SMP
  120. #define GET_CURRENT_PGD(reg,t1) \
  121. LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
  122. l.mfspr t1,r0,SPR_COREID ;\
  123. l.slli t1,t1,2 ;\
  124. l.add reg,reg,t1 ;\
  125. tophys (t1,reg) ;\
  126. l.lwz reg,0(t1)
  127. #else
  128. #define GET_CURRENT_PGD(reg,t1) \
  129. LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
  130. tophys (t1,reg) ;\
  131. l.lwz reg,0(t1)
  132. #endif
  133. /* Load r10 from current_thread_info_set - clobbers r1 and r30 */
  134. #ifdef CONFIG_SMP
  135. #define GET_CURRENT_THREAD_INFO \
  136. LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
  137. tophys (r30,r1) ;\
  138. l.mfspr r10,r0,SPR_COREID ;\
  139. l.slli r10,r10,2 ;\
  140. l.add r30,r30,r10 ;\
  141. /* r10: current_thread_info */ ;\
  142. l.lwz r10,0(r30)
  143. #else
  144. #define GET_CURRENT_THREAD_INFO \
  145. LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
  146. tophys (r30,r1) ;\
  147. /* r10: current_thread_info */ ;\
  148. l.lwz r10,0(r30)
  149. #endif
  150. /*
  151. * DSCR: this is a common hook for handling exceptions. it will save
  152. * the needed registers, set up stack and pointer to current
  153. * then jump to the handler while enabling MMU
  154. *
  155. * PRMS: handler - a function to jump to. it has to save the
  156. * remaining registers to kernel stack, call
  157. * appropriate arch-independant exception handler
  158. * and finaly jump to ret_from_except
  159. *
  160. * PREQ: unchanged state from the time exception happened
  161. *
  162. * POST: SAVED the following registers original value
  163. * to the new created exception frame pointed to by r1
  164. *
  165. * r1 - ksp pointing to the new (exception) frame
  166. * r4 - EEAR exception EA
  167. * r10 - current pointing to current_thread_info struct
  168. * r12 - syscall 0, since we didn't come from syscall
  169. * r30 - handler address of the handler we'll jump to
  170. *
  171. * handler has to save remaining registers to the exception
  172. * ksp frame *before* tainting them!
  173. *
  174. * NOTE: this function is not reentrant per se. reentrancy is guaranteed
  175. * by processor disabling all exceptions/interrupts when exception
  176. * accours.
  177. *
  178. * OPTM: no need to make it so wasteful to extract ksp when in user mode
  179. */
  180. #define EXCEPTION_HANDLE(handler) \
  181. EXCEPTION_T_STORE_GPR30 ;\
  182. l.mfspr r30,r0,SPR_ESR_BASE ;\
  183. l.andi r30,r30,SPR_SR_SM ;\
  184. l.sfeqi r30,0 ;\
  185. EXCEPTION_T_STORE_GPR10 ;\
  186. l.bnf 2f /* kernel_mode */ ;\
  187. EXCEPTION_T_STORE_SP /* delay slot */ ;\
  188. 1: /* user_mode: */ ;\
  189. GET_CURRENT_THREAD_INFO ;\
  190. tophys (r30,r10) ;\
  191. l.lwz r1,(TI_KSP)(r30) ;\
  192. /* fall through */ ;\
  193. 2: /* kernel_mode: */ ;\
  194. /* create new stack frame, save only needed gprs */ ;\
  195. /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\
  196. /* r12: temp, syscall indicator */ ;\
  197. l.addi r1,r1,-(INT_FRAME_SIZE) ;\
  198. /* r1 is KSP, r30 is __pa(KSP) */ ;\
  199. tophys (r30,r1) ;\
  200. l.sw PT_GPR12(r30),r12 ;\
  201. /* r4 use for tmp before EA */ ;\
  202. l.mfspr r12,r0,SPR_EPCR_BASE ;\
  203. l.sw PT_PC(r30),r12 ;\
  204. l.mfspr r12,r0,SPR_ESR_BASE ;\
  205. l.sw PT_SR(r30),r12 ;\
  206. /* save r30 */ ;\
  207. EXCEPTION_T_LOAD_GPR30(r12) ;\
  208. l.sw PT_GPR30(r30),r12 ;\
  209. /* save r10 as was prior to exception */ ;\
  210. EXCEPTION_T_LOAD_GPR10(r12) ;\
  211. l.sw PT_GPR10(r30),r12 ;\
  212. /* save PT_SP as was prior to exception */ ;\
  213. EXCEPTION_T_LOAD_SP(r12) ;\
  214. l.sw PT_SP(r30),r12 ;\
  215. /* save exception r4, set r4 = EA */ ;\
  216. l.sw PT_GPR4(r30),r4 ;\
  217. l.mfspr r4,r0,SPR_EEAR_BASE ;\
  218. /* r12 == 1 if we come from syscall */ ;\
  219. CLEAR_GPR(r12) ;\
  220. /* ----- turn on MMU ----- */ ;\
  221. /* Carry DSX into exception SR */ ;\
  222. l.mfspr r30,r0,SPR_SR ;\
  223. l.andi r30,r30,SPR_SR_DSX ;\
  224. l.ori r30,r30,(EXCEPTION_SR) ;\
  225. l.mtspr r0,r30,SPR_ESR_BASE ;\
  226. /* r30: EA address of handler */ ;\
  227. LOAD_SYMBOL_2_GPR(r30,handler) ;\
  228. l.mtspr r0,r30,SPR_EPCR_BASE ;\
  229. l.rfe
  230. /*
  231. * this doesn't work
  232. *
  233. *
  234. * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
  235. * #define UNHANDLED_EXCEPTION(handler) \
  236. * l.ori r3,r0,0x1 ;\
  237. * l.mtspr r0,r3,SPR_SR ;\
  238. * l.movhi r3,hi(0xf0000100) ;\
  239. * l.ori r3,r3,lo(0xf0000100) ;\
  240. * l.jr r3 ;\
  241. * l.nop 1
  242. *
  243. * #endif
  244. */
  245. /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
  246. * a bit more carefull (if we have a PT_SP or current pointer
  247. * corruption) and set them up from 'current_set'
  248. *
  249. */
  250. #define UNHANDLED_EXCEPTION(handler) \
  251. EXCEPTION_T_STORE_GPR30 ;\
  252. EXCEPTION_T_STORE_GPR10 ;\
  253. EXCEPTION_T_STORE_SP ;\
  254. /* temporary store r3, r9 into r1, r10 */ ;\
  255. l.addi r1,r3,0x0 ;\
  256. l.addi r10,r9,0x0 ;\
  257. /* the string referenced by r3 must be low enough */ ;\
  258. l.jal _emergency_print ;\
  259. l.ori r3,r0,lo(_string_unhandled_exception) ;\
  260. l.mfspr r3,r0,SPR_NPC ;\
  261. l.jal _emergency_print_nr ;\
  262. l.andi r3,r3,0x1f00 ;\
  263. /* the string referenced by r3 must be low enough */ ;\
  264. l.jal _emergency_print ;\
  265. l.ori r3,r0,lo(_string_epc_prefix) ;\
  266. l.jal _emergency_print_nr ;\
  267. l.mfspr r3,r0,SPR_EPCR_BASE ;\
  268. l.jal _emergency_print ;\
  269. l.ori r3,r0,lo(_string_nl) ;\
  270. /* end of printing */ ;\
  271. l.addi r3,r1,0x0 ;\
  272. l.addi r9,r10,0x0 ;\
  273. /* extract current, ksp from current_set */ ;\
  274. LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\
  275. LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\
  276. /* create new stack frame, save only needed gprs */ ;\
  277. /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\
  278. /* r12: temp, syscall indicator, r13 temp */ ;\
  279. l.addi r1,r1,-(INT_FRAME_SIZE) ;\
  280. /* r1 is KSP, r30 is __pa(KSP) */ ;\
  281. tophys (r30,r1) ;\
  282. l.sw PT_GPR12(r30),r12 ;\
  283. l.mfspr r12,r0,SPR_EPCR_BASE ;\
  284. l.sw PT_PC(r30),r12 ;\
  285. l.mfspr r12,r0,SPR_ESR_BASE ;\
  286. l.sw PT_SR(r30),r12 ;\
  287. /* save r31 */ ;\
  288. EXCEPTION_T_LOAD_GPR30(r12) ;\
  289. l.sw PT_GPR30(r30),r12 ;\
  290. /* save r10 as was prior to exception */ ;\
  291. EXCEPTION_T_LOAD_GPR10(r12) ;\
  292. l.sw PT_GPR10(r30),r12 ;\
  293. /* save PT_SP as was prior to exception */ ;\
  294. EXCEPTION_T_LOAD_SP(r12) ;\
  295. l.sw PT_SP(r30),r12 ;\
  296. l.sw PT_GPR13(r30),r13 ;\
  297. /* --> */ ;\
  298. /* save exception r4, set r4 = EA */ ;\
  299. l.sw PT_GPR4(r30),r4 ;\
  300. l.mfspr r4,r0,SPR_EEAR_BASE ;\
  301. /* r12 == 1 if we come from syscall */ ;\
  302. CLEAR_GPR(r12) ;\
  303. /* ----- play a MMU trick ----- */ ;\
  304. l.ori r30,r0,(EXCEPTION_SR) ;\
  305. l.mtspr r0,r30,SPR_ESR_BASE ;\
  306. /* r31: EA address of handler */ ;\
  307. LOAD_SYMBOL_2_GPR(r30,handler) ;\
  308. l.mtspr r0,r30,SPR_EPCR_BASE ;\
  309. l.rfe
  310. /* =====================================================[ exceptions] === */
  311. /* ---[ 0x100: RESET exception ]----------------------------------------- */
  312. .org 0x100
  313. /* Jump to .init code at _start which lives in the .head section
  314. * and will be discarded after boot.
  315. */
  316. LOAD_SYMBOL_2_GPR(r15, _start)
  317. tophys (r13,r15) /* MMU disabled */
  318. l.jr r13
  319. l.nop
  320. /* ---[ 0x200: BUS exception ]------------------------------------------- */
  321. .org 0x200
  322. _dispatch_bus_fault:
  323. EXCEPTION_HANDLE(_bus_fault_handler)
  324. /* ---[ 0x300: Data Page Fault exception ]------------------------------- */
  325. .org 0x300
  326. _dispatch_do_dpage_fault:
  327. // totaly disable timer interrupt
  328. // l.mtspr r0,r0,SPR_TTMR
  329. // DEBUG_TLB_PROBE(0x300)
  330. // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
  331. EXCEPTION_HANDLE(_data_page_fault_handler)
  332. /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
  333. .org 0x400
  334. _dispatch_do_ipage_fault:
  335. // totaly disable timer interrupt
  336. // l.mtspr r0,r0,SPR_TTMR
  337. // DEBUG_TLB_PROBE(0x400)
  338. // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
  339. EXCEPTION_HANDLE(_insn_page_fault_handler)
  340. /* ---[ 0x500: Timer exception ]----------------------------------------- */
  341. .org 0x500
  342. EXCEPTION_HANDLE(_timer_handler)
  343. /* ---[ 0x600: Alignment exception ]-------------------------------------- */
  344. .org 0x600
  345. EXCEPTION_HANDLE(_alignment_handler)
  346. /* ---[ 0x700: Illegal insn exception ]---------------------------------- */
  347. .org 0x700
  348. EXCEPTION_HANDLE(_illegal_instruction_handler)
  349. /* ---[ 0x800: External interrupt exception ]---------------------------- */
  350. .org 0x800
  351. EXCEPTION_HANDLE(_external_irq_handler)
  352. /* ---[ 0x900: DTLB miss exception ]------------------------------------- */
  353. .org 0x900
  354. l.j boot_dtlb_miss_handler
  355. l.nop
  356. /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
  357. .org 0xa00
  358. l.j boot_itlb_miss_handler
  359. l.nop
  360. /* ---[ 0xb00: Range exception ]----------------------------------------- */
  361. .org 0xb00
  362. UNHANDLED_EXCEPTION(_vector_0xb00)
  363. /* ---[ 0xc00: Syscall exception ]--------------------------------------- */
  364. .org 0xc00
  365. EXCEPTION_HANDLE(_sys_call_handler)
  366. /* ---[ 0xd00: Trap exception ]------------------------------------------ */
  367. .org 0xd00
  368. UNHANDLED_EXCEPTION(_vector_0xd00)
  369. /* ---[ 0xe00: Trap exception ]------------------------------------------ */
  370. .org 0xe00
  371. // UNHANDLED_EXCEPTION(_vector_0xe00)
  372. EXCEPTION_HANDLE(_trap_handler)
  373. /* ---[ 0xf00: Reserved exception ]-------------------------------------- */
  374. .org 0xf00
  375. UNHANDLED_EXCEPTION(_vector_0xf00)
  376. /* ---[ 0x1000: Reserved exception ]------------------------------------- */
  377. .org 0x1000
  378. UNHANDLED_EXCEPTION(_vector_0x1000)
  379. /* ---[ 0x1100: Reserved exception ]------------------------------------- */
  380. .org 0x1100
  381. UNHANDLED_EXCEPTION(_vector_0x1100)
  382. /* ---[ 0x1200: Reserved exception ]------------------------------------- */
  383. .org 0x1200
  384. UNHANDLED_EXCEPTION(_vector_0x1200)
  385. /* ---[ 0x1300: Reserved exception ]------------------------------------- */
  386. .org 0x1300
  387. UNHANDLED_EXCEPTION(_vector_0x1300)
  388. /* ---[ 0x1400: Reserved exception ]------------------------------------- */
  389. .org 0x1400
  390. UNHANDLED_EXCEPTION(_vector_0x1400)
  391. /* ---[ 0x1500: Reserved exception ]------------------------------------- */
  392. .org 0x1500
  393. UNHANDLED_EXCEPTION(_vector_0x1500)
  394. /* ---[ 0x1600: Reserved exception ]------------------------------------- */
  395. .org 0x1600
  396. UNHANDLED_EXCEPTION(_vector_0x1600)
  397. /* ---[ 0x1700: Reserved exception ]------------------------------------- */
  398. .org 0x1700
  399. UNHANDLED_EXCEPTION(_vector_0x1700)
  400. /* ---[ 0x1800: Reserved exception ]------------------------------------- */
  401. .org 0x1800
  402. UNHANDLED_EXCEPTION(_vector_0x1800)
  403. /* ---[ 0x1900: Reserved exception ]------------------------------------- */
  404. .org 0x1900
  405. UNHANDLED_EXCEPTION(_vector_0x1900)
  406. /* ---[ 0x1a00: Reserved exception ]------------------------------------- */
  407. .org 0x1a00
  408. UNHANDLED_EXCEPTION(_vector_0x1a00)
  409. /* ---[ 0x1b00: Reserved exception ]------------------------------------- */
  410. .org 0x1b00
  411. UNHANDLED_EXCEPTION(_vector_0x1b00)
  412. /* ---[ 0x1c00: Reserved exception ]------------------------------------- */
  413. .org 0x1c00
  414. UNHANDLED_EXCEPTION(_vector_0x1c00)
  415. /* ---[ 0x1d00: Reserved exception ]------------------------------------- */
  416. .org 0x1d00
  417. UNHANDLED_EXCEPTION(_vector_0x1d00)
  418. /* ---[ 0x1e00: Reserved exception ]------------------------------------- */
  419. .org 0x1e00
  420. UNHANDLED_EXCEPTION(_vector_0x1e00)
  421. /* ---[ 0x1f00: Reserved exception ]------------------------------------- */
  422. .org 0x1f00
  423. UNHANDLED_EXCEPTION(_vector_0x1f00)
  424. .org 0x2000
  425. /* ===================================================[ kernel start ]=== */
  426. /* .text*/
  427. /* This early stuff belongs in HEAD, but some of the functions below definitely
  428. * don't... */
  429. __HEAD
  430. .global _start
  431. _start:
  432. /* Init r0 to zero as per spec */
  433. CLEAR_GPR(r0)
  434. /* save kernel parameters */
  435. l.or r25,r0,r3 /* pointer to fdt */
  436. /*
  437. * ensure a deterministic start
  438. */
  439. l.ori r3,r0,0x1
  440. l.mtspr r0,r3,SPR_SR
  441. CLEAR_GPR(r1)
  442. CLEAR_GPR(r2)
  443. CLEAR_GPR(r3)
  444. CLEAR_GPR(r4)
  445. CLEAR_GPR(r5)
  446. CLEAR_GPR(r6)
  447. CLEAR_GPR(r7)
  448. CLEAR_GPR(r8)
  449. CLEAR_GPR(r9)
  450. CLEAR_GPR(r10)
  451. CLEAR_GPR(r11)
  452. CLEAR_GPR(r12)
  453. CLEAR_GPR(r13)
  454. CLEAR_GPR(r14)
  455. CLEAR_GPR(r15)
  456. CLEAR_GPR(r16)
  457. CLEAR_GPR(r17)
  458. CLEAR_GPR(r18)
  459. CLEAR_GPR(r19)
  460. CLEAR_GPR(r20)
  461. CLEAR_GPR(r21)
  462. CLEAR_GPR(r22)
  463. CLEAR_GPR(r23)
  464. CLEAR_GPR(r24)
  465. CLEAR_GPR(r26)
  466. CLEAR_GPR(r27)
  467. CLEAR_GPR(r28)
  468. CLEAR_GPR(r29)
  469. CLEAR_GPR(r30)
  470. CLEAR_GPR(r31)
  471. #ifdef CONFIG_SMP
  472. l.mfspr r26,r0,SPR_COREID
  473. l.sfeq r26,r0
  474. l.bnf secondary_wait
  475. l.nop
  476. #endif
  477. /*
  478. * set up initial ksp and current
  479. */
  480. /* setup kernel stack */
  481. LOAD_SYMBOL_2_GPR(r1,init_thread_union + THREAD_SIZE)
  482. LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current
  483. tophys (r31,r10)
  484. l.sw TI_KSP(r31), r1
  485. l.ori r4,r0,0x0
  486. /*
  487. * .data contains initialized data,
  488. * .bss contains uninitialized data - clear it up
  489. */
  490. clear_bss:
  491. LOAD_SYMBOL_2_GPR(r24, __bss_start)
  492. LOAD_SYMBOL_2_GPR(r26, _end)
  493. tophys(r28,r24)
  494. tophys(r30,r26)
  495. CLEAR_GPR(r24)
  496. CLEAR_GPR(r26)
  497. 1:
  498. l.sw (0)(r28),r0
  499. l.sfltu r28,r30
  500. l.bf 1b
  501. l.addi r28,r28,4
  502. enable_ic:
  503. l.jal _ic_enable
  504. l.nop
  505. enable_dc:
  506. l.jal _dc_enable
  507. l.nop
  508. flush_tlb:
  509. l.jal _flush_tlb
  510. l.nop
  511. /* The MMU needs to be enabled before or32_early_setup is called */
  512. enable_mmu:
  513. /*
  514. * enable dmmu & immu
  515. * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
  516. */
  517. l.mfspr r30,r0,SPR_SR
  518. l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
  519. l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
  520. l.or r30,r30,r28
  521. l.mtspr r0,r30,SPR_SR
  522. l.nop
  523. l.nop
  524. l.nop
  525. l.nop
  526. l.nop
  527. l.nop
  528. l.nop
  529. l.nop
  530. l.nop
  531. l.nop
  532. l.nop
  533. l.nop
  534. l.nop
  535. l.nop
  536. l.nop
  537. l.nop
  538. // reset the simulation counters
  539. l.nop 5
  540. /* check fdt header magic word */
  541. l.lwz r3,0(r25) /* load magic from fdt into r3 */
  542. l.movhi r4,hi(OF_DT_HEADER)
  543. l.ori r4,r4,lo(OF_DT_HEADER)
  544. l.sfeq r3,r4
  545. l.bf _fdt_found
  546. l.nop
  547. /* magic number mismatch, set fdt pointer to null */
  548. l.or r25,r0,r0
  549. _fdt_found:
  550. /* pass fdt pointer to or32_early_setup in r3 */
  551. l.or r3,r0,r25
  552. LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
  553. l.jalr r24
  554. l.nop
  555. clear_regs:
  556. /*
  557. * clear all GPRS to increase determinism
  558. */
  559. CLEAR_GPR(r2)
  560. CLEAR_GPR(r3)
  561. CLEAR_GPR(r4)
  562. CLEAR_GPR(r5)
  563. CLEAR_GPR(r6)
  564. CLEAR_GPR(r7)
  565. CLEAR_GPR(r8)
  566. CLEAR_GPR(r9)
  567. CLEAR_GPR(r11)
  568. CLEAR_GPR(r12)
  569. CLEAR_GPR(r13)
  570. CLEAR_GPR(r14)
  571. CLEAR_GPR(r15)
  572. CLEAR_GPR(r16)
  573. CLEAR_GPR(r17)
  574. CLEAR_GPR(r18)
  575. CLEAR_GPR(r19)
  576. CLEAR_GPR(r20)
  577. CLEAR_GPR(r21)
  578. CLEAR_GPR(r22)
  579. CLEAR_GPR(r23)
  580. CLEAR_GPR(r24)
  581. CLEAR_GPR(r25)
  582. CLEAR_GPR(r26)
  583. CLEAR_GPR(r27)
  584. CLEAR_GPR(r28)
  585. CLEAR_GPR(r29)
  586. CLEAR_GPR(r30)
  587. CLEAR_GPR(r31)
  588. jump_start_kernel:
  589. /*
  590. * jump to kernel entry (start_kernel)
  591. */
  592. LOAD_SYMBOL_2_GPR(r30, start_kernel)
  593. l.jr r30
  594. l.nop
  595. _flush_tlb:
  596. /*
  597. * I N V A L I D A T E T L B e n t r i e s
  598. */
  599. LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
  600. LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
  601. l.addi r7,r0,128 /* Maximum number of sets */
  602. 1:
  603. l.mtspr r5,r0,0x0
  604. l.mtspr r6,r0,0x0
  605. l.addi r5,r5,1
  606. l.addi r6,r6,1
  607. l.sfeq r7,r0
  608. l.bnf 1b
  609. l.addi r7,r7,-1
  610. l.jr r9
  611. l.nop
  612. #ifdef CONFIG_SMP
  613. secondary_wait:
  614. /* Doze the cpu until we are asked to run */
  615. /* If we dont have power management skip doze */
  616. l.mfspr r25,r0,SPR_UPR
  617. l.andi r25,r25,SPR_UPR_PMP
  618. l.sfeq r25,r0
  619. l.bf secondary_check_release
  620. l.nop
  621. /* Setup special secondary exception handler */
  622. LOAD_SYMBOL_2_GPR(r3, _secondary_evbar)
  623. tophys(r25,r3)
  624. l.mtspr r0,r25,SPR_EVBAR
  625. /* Enable Interrupts */
  626. l.mfspr r25,r0,SPR_SR
  627. l.ori r25,r25,SPR_SR_IEE
  628. l.mtspr r0,r25,SPR_SR
  629. /* Unmask interrupts interrupts */
  630. l.mfspr r25,r0,SPR_PICMR
  631. l.ori r25,r25,0xffff
  632. l.mtspr r0,r25,SPR_PICMR
  633. /* Doze */
  634. l.mfspr r25,r0,SPR_PMR
  635. LOAD_SYMBOL_2_GPR(r3, SPR_PMR_DME)
  636. l.or r25,r25,r3
  637. l.mtspr r0,r25,SPR_PMR
  638. /* Wakeup - Restore exception handler */
  639. l.mtspr r0,r0,SPR_EVBAR
  640. secondary_check_release:
  641. /*
  642. * Check if we actually got the release signal, if not go-back to
  643. * sleep.
  644. */
  645. l.mfspr r25,r0,SPR_COREID
  646. LOAD_SYMBOL_2_GPR(r3, secondary_release)
  647. tophys(r4, r3)
  648. l.lwz r3,0(r4)
  649. l.sfeq r25,r3
  650. l.bnf secondary_wait
  651. l.nop
  652. /* fall through to secondary_init */
  653. secondary_init:
  654. /*
  655. * set up initial ksp and current
  656. */
  657. LOAD_SYMBOL_2_GPR(r10, secondary_thread_info)
  658. tophys (r30,r10)
  659. l.lwz r10,0(r30)
  660. l.addi r1,r10,THREAD_SIZE
  661. tophys (r30,r10)
  662. l.sw TI_KSP(r30),r1
  663. l.jal _ic_enable
  664. l.nop
  665. l.jal _dc_enable
  666. l.nop
  667. l.jal _flush_tlb
  668. l.nop
  669. /*
  670. * enable dmmu & immu
  671. */
  672. l.mfspr r30,r0,SPR_SR
  673. l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
  674. l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
  675. l.or r30,r30,r28
  676. /*
  677. * This is a bit tricky, we need to switch over from physical addresses
  678. * to virtual addresses on the fly.
  679. * To do that, we first set up ESR with the IME and DME bits set.
  680. * Then EPCR is set to secondary_start and then a l.rfe is issued to
  681. * "jump" to that.
  682. */
  683. l.mtspr r0,r30,SPR_ESR_BASE
  684. LOAD_SYMBOL_2_GPR(r30, secondary_start)
  685. l.mtspr r0,r30,SPR_EPCR_BASE
  686. l.rfe
  687. secondary_start:
  688. LOAD_SYMBOL_2_GPR(r30, secondary_start_kernel)
  689. l.jr r30
  690. l.nop
  691. #endif
  692. /* ========================================[ cache ]=== */
  693. /* alignment here so we don't change memory offsets with
  694. * memory controller defined
  695. */
  696. .align 0x2000
  697. _ic_enable:
  698. /* Check if IC present and skip enabling otherwise */
  699. l.mfspr r24,r0,SPR_UPR
  700. l.andi r26,r24,SPR_UPR_ICP
  701. l.sfeq r26,r0
  702. l.bf 9f
  703. l.nop
  704. /* Disable IC */
  705. l.mfspr r6,r0,SPR_SR
  706. l.addi r5,r0,-1
  707. l.xori r5,r5,SPR_SR_ICE
  708. l.and r5,r6,r5
  709. l.mtspr r0,r5,SPR_SR
  710. /* Establish cache block size
  711. If BS=0, 16;
  712. If BS=1, 32;
  713. r14 contain block size
  714. */
  715. l.mfspr r24,r0,SPR_ICCFGR
  716. l.andi r26,r24,SPR_ICCFGR_CBS
  717. l.srli r28,r26,7
  718. l.ori r30,r0,16
  719. l.sll r14,r30,r28
  720. /* Establish number of cache sets
  721. r16 contains number of cache sets
  722. r28 contains log(# of cache sets)
  723. */
  724. l.andi r26,r24,SPR_ICCFGR_NCS
  725. l.srli r28,r26,3
  726. l.ori r30,r0,1
  727. l.sll r16,r30,r28
  728. /* Invalidate IC */
  729. l.addi r6,r0,0
  730. l.sll r5,r14,r28
  731. // l.mul r5,r14,r16
  732. // l.trap 1
  733. // l.addi r5,r0,IC_SIZE
  734. 1:
  735. l.mtspr r0,r6,SPR_ICBIR
  736. l.sfne r6,r5
  737. l.bf 1b
  738. l.add r6,r6,r14
  739. // l.addi r6,r6,IC_LINE
  740. /* Enable IC */
  741. l.mfspr r6,r0,SPR_SR
  742. l.ori r6,r6,SPR_SR_ICE
  743. l.mtspr r0,r6,SPR_SR
  744. l.nop
  745. l.nop
  746. l.nop
  747. l.nop
  748. l.nop
  749. l.nop
  750. l.nop
  751. l.nop
  752. l.nop
  753. l.nop
  754. 9:
  755. l.jr r9
  756. l.nop
  757. _dc_enable:
  758. /* Check if DC present and skip enabling otherwise */
  759. l.mfspr r24,r0,SPR_UPR
  760. l.andi r26,r24,SPR_UPR_DCP
  761. l.sfeq r26,r0
  762. l.bf 9f
  763. l.nop
  764. /* Disable DC */
  765. l.mfspr r6,r0,SPR_SR
  766. l.addi r5,r0,-1
  767. l.xori r5,r5,SPR_SR_DCE
  768. l.and r5,r6,r5
  769. l.mtspr r0,r5,SPR_SR
  770. /* Establish cache block size
  771. If BS=0, 16;
  772. If BS=1, 32;
  773. r14 contain block size
  774. */
  775. l.mfspr r24,r0,SPR_DCCFGR
  776. l.andi r26,r24,SPR_DCCFGR_CBS
  777. l.srli r28,r26,7
  778. l.ori r30,r0,16
  779. l.sll r14,r30,r28
  780. /* Establish number of cache sets
  781. r16 contains number of cache sets
  782. r28 contains log(# of cache sets)
  783. */
  784. l.andi r26,r24,SPR_DCCFGR_NCS
  785. l.srli r28,r26,3
  786. l.ori r30,r0,1
  787. l.sll r16,r30,r28
  788. /* Invalidate DC */
  789. l.addi r6,r0,0
  790. l.sll r5,r14,r28
  791. 1:
  792. l.mtspr r0,r6,SPR_DCBIR
  793. l.sfne r6,r5
  794. l.bf 1b
  795. l.add r6,r6,r14
  796. /* Enable DC */
  797. l.mfspr r6,r0,SPR_SR
  798. l.ori r6,r6,SPR_SR_DCE
  799. l.mtspr r0,r6,SPR_SR
  800. 9:
  801. l.jr r9
  802. l.nop
  803. /* ===============================================[ page table masks ]=== */
  804. #define DTLB_UP_CONVERT_MASK 0x3fa
  805. #define ITLB_UP_CONVERT_MASK 0x3a
  806. /* for SMP we'd have (this is a bit subtle, CC must be always set
  807. * for SMP, but since we have _PAGE_PRESENT bit always defined
  808. * we can just modify the mask)
  809. */
  810. #define DTLB_SMP_CONVERT_MASK 0x3fb
  811. #define ITLB_SMP_CONVERT_MASK 0x3b
  812. /* ---[ boot dtlb miss handler ]----------------------------------------- */
  813. boot_dtlb_miss_handler:
  814. /* mask for DTLB_MR register: - (0) sets V (valid) bit,
  815. * - (31-12) sets bits belonging to VPN (31-12)
  816. */
  817. #define DTLB_MR_MASK 0xfffff001
  818. /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
  819. * - (4) sets A (access) bit,
  820. * - (5) sets D (dirty) bit,
  821. * - (8) sets SRE (superuser read) bit
  822. * - (9) sets SWE (superuser write) bit
  823. * - (31-12) sets bits belonging to VPN (31-12)
  824. */
  825. #define DTLB_TR_MASK 0xfffff332
  826. /* These are for masking out the VPN/PPN value from the MR/TR registers...
  827. * it's not the same as the PFN */
  828. #define VPN_MASK 0xfffff000
  829. #define PPN_MASK 0xfffff000
  830. EXCEPTION_STORE_GPR6
  831. #if 0
  832. l.mfspr r6,r0,SPR_ESR_BASE //
  833. l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
  834. l.sfeqi r6,0 // r6 == 0x1 --> SM
  835. l.bf exit_with_no_dtranslation //
  836. l.nop
  837. #endif
  838. /* this could be optimized by moving storing of
  839. * non r6 registers here, and jumping r6 restore
  840. * if not in supervisor mode
  841. */
  842. EXCEPTION_STORE_GPR2
  843. EXCEPTION_STORE_GPR3
  844. EXCEPTION_STORE_GPR4
  845. EXCEPTION_STORE_GPR5
  846. l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
  847. immediate_translation:
  848. CLEAR_GPR(r6)
  849. l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
  850. l.mfspr r6, r0, SPR_DMMUCFGR
  851. l.andi r6, r6, SPR_DMMUCFGR_NTS
  852. l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
  853. l.ori r5, r0, 0x1
  854. l.sll r5, r5, r6 // r5 = number DMMU sets
  855. l.addi r6, r5, -1 // r6 = nsets mask
  856. l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
  857. l.or r6,r6,r4 // r6 <- r4
  858. l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
  859. l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000
  860. l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
  861. l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry
  862. l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR
  863. /* set up DTLB with no translation for EA <= 0xbfffffff */
  864. LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
  865. l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA)
  866. l.bf 1f // goto out
  867. l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
  868. tophys(r3,r4) // r3 <- PA
  869. 1:
  870. l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
  871. l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000
  872. l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
  873. l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry
  874. l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR
  875. EXCEPTION_LOAD_GPR6
  876. EXCEPTION_LOAD_GPR5
  877. EXCEPTION_LOAD_GPR4
  878. EXCEPTION_LOAD_GPR3
  879. EXCEPTION_LOAD_GPR2
  880. l.rfe // SR <- ESR, PC <- EPC
  881. exit_with_no_dtranslation:
  882. /* EA out of memory or not in supervisor mode */
  883. EXCEPTION_LOAD_GPR6
  884. EXCEPTION_LOAD_GPR4
  885. l.j _dispatch_bus_fault
  886. /* ---[ boot itlb miss handler ]----------------------------------------- */
  887. boot_itlb_miss_handler:
  888. /* mask for ITLB_MR register: - sets V (valid) bit,
  889. * - sets bits belonging to VPN (15-12)
  890. */
  891. #define ITLB_MR_MASK 0xfffff001
  892. /* mask for ITLB_TR register: - sets A (access) bit,
  893. * - sets SXE (superuser execute) bit
  894. * - sets bits belonging to VPN (15-12)
  895. */
  896. #define ITLB_TR_MASK 0xfffff050
  897. /*
  898. #define VPN_MASK 0xffffe000
  899. #define PPN_MASK 0xffffe000
  900. */
  901. EXCEPTION_STORE_GPR2
  902. EXCEPTION_STORE_GPR3
  903. EXCEPTION_STORE_GPR4
  904. EXCEPTION_STORE_GPR5
  905. EXCEPTION_STORE_GPR6
  906. #if 0
  907. l.mfspr r6,r0,SPR_ESR_BASE //
  908. l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
  909. l.sfeqi r6,0 // r6 == 0x1 --> SM
  910. l.bf exit_with_no_itranslation
  911. l.nop
  912. #endif
  913. l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
  914. earlyearly:
  915. CLEAR_GPR(r6)
  916. l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
  917. l.mfspr r6, r0, SPR_IMMUCFGR
  918. l.andi r6, r6, SPR_IMMUCFGR_NTS
  919. l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
  920. l.ori r5, r0, 0x1
  921. l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR
  922. l.addi r6, r5, -1 // r6 = nsets mask
  923. l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
  924. l.or r6,r6,r4 // r6 <- r4
  925. l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
  926. l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000
  927. l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
  928. l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry
  929. l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR
  930. /*
  931. * set up ITLB with no translation for EA <= 0x0fffffff
  932. *
  933. * we need this for head.S mapping (EA = PA). if we move all functions
  934. * which run with mmu enabled into entry.S, we might be able to eliminate this.
  935. *
  936. */
  937. LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
  938. l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA)
  939. l.bf 1f // goto out
  940. l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
  941. tophys(r3,r4) // r3 <- PA
  942. 1:
  943. l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
  944. l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000
  945. l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
  946. l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry
  947. l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR
  948. EXCEPTION_LOAD_GPR6
  949. EXCEPTION_LOAD_GPR5
  950. EXCEPTION_LOAD_GPR4
  951. EXCEPTION_LOAD_GPR3
  952. EXCEPTION_LOAD_GPR2
  953. l.rfe // SR <- ESR, PC <- EPC
  954. exit_with_no_itranslation:
  955. EXCEPTION_LOAD_GPR4
  956. EXCEPTION_LOAD_GPR6
  957. l.j _dispatch_bus_fault
  958. l.nop
  959. /* ====================================================================== */
  960. /*
  961. * Stuff below here shouldn't go into .head section... maybe this stuff
  962. * can be moved to entry.S ???
  963. */
  964. /* ==============================================[ DTLB miss handler ]=== */
  965. /*
  966. * Comments:
  967. * Exception handlers are entered with MMU off so the following handler
  968. * needs to use physical addressing
  969. *
  970. */
  971. .text
  972. ENTRY(dtlb_miss_handler)
  973. EXCEPTION_STORE_GPR2
  974. EXCEPTION_STORE_GPR3
  975. EXCEPTION_STORE_GPR4
  976. /*
  977. * get EA of the miss
  978. */
  979. l.mfspr r2,r0,SPR_EEAR_BASE
  980. /*
  981. * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
  982. */
  983. GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r4 is temp
  984. l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
  985. l.slli r4,r4,0x2 // to get address << 2
  986. l.add r3,r4,r3 // r4 is pgd_index(daddr)
  987. /*
  988. * if (pmd_none(*pmd))
  989. * goto pmd_none:
  990. */
  991. tophys (r4,r3)
  992. l.lwz r3,0x0(r4) // get *pmd value
  993. l.sfne r3,r0
  994. l.bnf d_pmd_none
  995. l.addi r3,r0,0xffffe000 // PAGE_MASK
  996. d_pmd_good:
  997. /*
  998. * pte = *pte_offset(pmd, daddr);
  999. */
  1000. l.lwz r4,0x0(r4) // get **pmd value
  1001. l.and r4,r4,r3 // & PAGE_MASK
  1002. l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
  1003. l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
  1004. l.slli r3,r3,0x2 // to get address << 2
  1005. l.add r3,r3,r4
  1006. l.lwz r3,0x0(r3) // this is pte at last
  1007. /*
  1008. * if (!pte_present(pte))
  1009. */
  1010. l.andi r4,r3,0x1
  1011. l.sfne r4,r0 // is pte present
  1012. l.bnf d_pte_not_present
  1013. l.addi r4,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK
  1014. /*
  1015. * fill DTLB TR register
  1016. */
  1017. l.and r4,r3,r4 // apply the mask
  1018. // Determine number of DMMU sets
  1019. l.mfspr r2, r0, SPR_DMMUCFGR
  1020. l.andi r2, r2, SPR_DMMUCFGR_NTS
  1021. l.srli r2, r2, SPR_DMMUCFGR_NTS_OFF
  1022. l.ori r3, r0, 0x1
  1023. l.sll r3, r3, r2 // r3 = number DMMU sets DMMUCFGR
  1024. l.addi r2, r3, -1 // r2 = nsets mask
  1025. l.mfspr r3, r0, SPR_EEAR_BASE
  1026. l.srli r3, r3, 0xd // >> PAGE_SHIFT
  1027. l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1)
  1028. //NUM_TLB_ENTRIES
  1029. l.mtspr r2,r4,SPR_DTLBTR_BASE(0)
  1030. /*
  1031. * fill DTLB MR register
  1032. */
  1033. l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */
  1034. l.ori r4,r3,0x1 // set hardware valid bit: DTBL_MR entry
  1035. l.mtspr r2,r4,SPR_DTLBMR_BASE(0)
  1036. EXCEPTION_LOAD_GPR2
  1037. EXCEPTION_LOAD_GPR3
  1038. EXCEPTION_LOAD_GPR4
  1039. l.rfe
  1040. d_pmd_none:
  1041. d_pte_not_present:
  1042. EXCEPTION_LOAD_GPR2
  1043. EXCEPTION_LOAD_GPR3
  1044. EXCEPTION_LOAD_GPR4
  1045. EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler)
  1046. /* ==============================================[ ITLB miss handler ]=== */
  1047. ENTRY(itlb_miss_handler)
  1048. EXCEPTION_STORE_GPR2
  1049. EXCEPTION_STORE_GPR3
  1050. EXCEPTION_STORE_GPR4
  1051. /*
  1052. * get EA of the miss
  1053. */
  1054. l.mfspr r2,r0,SPR_EEAR_BASE
  1055. /*
  1056. * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
  1057. *
  1058. */
  1059. GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r5 is temp
  1060. l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
  1061. l.slli r4,r4,0x2 // to get address << 2
  1062. l.add r3,r4,r3 // r4 is pgd_index(daddr)
  1063. /*
  1064. * if (pmd_none(*pmd))
  1065. * goto pmd_none:
  1066. */
  1067. tophys (r4,r3)
  1068. l.lwz r3,0x0(r4) // get *pmd value
  1069. l.sfne r3,r0
  1070. l.bnf i_pmd_none
  1071. l.addi r3,r0,0xffffe000 // PAGE_MASK
  1072. i_pmd_good:
  1073. /*
  1074. * pte = *pte_offset(pmd, iaddr);
  1075. *
  1076. */
  1077. l.lwz r4,0x0(r4) // get **pmd value
  1078. l.and r4,r4,r3 // & PAGE_MASK
  1079. l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
  1080. l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
  1081. l.slli r3,r3,0x2 // to get address << 2
  1082. l.add r3,r3,r4
  1083. l.lwz r3,0x0(r3) // this is pte at last
  1084. /*
  1085. * if (!pte_present(pte))
  1086. *
  1087. */
  1088. l.andi r4,r3,0x1
  1089. l.sfne r4,r0 // is pte present
  1090. l.bnf i_pte_not_present
  1091. l.addi r4,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK
  1092. /*
  1093. * fill ITLB TR register
  1094. */
  1095. l.and r4,r3,r4 // apply the mask
  1096. l.andi r3,r3,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE
  1097. l.sfeq r3,r0
  1098. l.bf itlb_tr_fill //_workaround
  1099. // Determine number of IMMU sets
  1100. l.mfspr r2, r0, SPR_IMMUCFGR
  1101. l.andi r2, r2, SPR_IMMUCFGR_NTS
  1102. l.srli r2, r2, SPR_IMMUCFGR_NTS_OFF
  1103. l.ori r3, r0, 0x1
  1104. l.sll r3, r3, r2 // r3 = number IMMU sets IMMUCFGR
  1105. l.addi r2, r3, -1 // r2 = nsets mask
  1106. l.mfspr r3, r0, SPR_EEAR_BASE
  1107. l.srli r3, r3, 0xd // >> PAGE_SHIFT
  1108. l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1)
  1109. /*
  1110. * __PHX__ :: fixme
  1111. * we should not just blindly set executable flags,
  1112. * but it does help with ping. the clean way would be to find out
  1113. * (and fix it) why stack doesn't have execution permissions
  1114. */
  1115. itlb_tr_fill_workaround:
  1116. l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
  1117. itlb_tr_fill:
  1118. l.mtspr r2,r4,SPR_ITLBTR_BASE(0)
  1119. /*
  1120. * fill DTLB MR register
  1121. */
  1122. l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */
  1123. l.ori r4,r3,0x1 // set hardware valid bit: ITBL_MR entry
  1124. l.mtspr r2,r4,SPR_ITLBMR_BASE(0)
  1125. EXCEPTION_LOAD_GPR2
  1126. EXCEPTION_LOAD_GPR3
  1127. EXCEPTION_LOAD_GPR4
  1128. l.rfe
  1129. i_pmd_none:
  1130. i_pte_not_present:
  1131. EXCEPTION_LOAD_GPR2
  1132. EXCEPTION_LOAD_GPR3
  1133. EXCEPTION_LOAD_GPR4
  1134. EXCEPTION_HANDLE(_itlb_miss_page_fault_handler)
  1135. /* ==============================================[ boot tlb handlers ]=== */
  1136. /* =================================================[ debugging aids ]=== */
  1137. .align 64
  1138. _immu_trampoline:
  1139. .space 64
  1140. _immu_trampoline_top:
  1141. #define TRAMP_SLOT_0 (0x0)
  1142. #define TRAMP_SLOT_1 (0x4)
  1143. #define TRAMP_SLOT_2 (0x8)
  1144. #define TRAMP_SLOT_3 (0xc)
  1145. #define TRAMP_SLOT_4 (0x10)
  1146. #define TRAMP_SLOT_5 (0x14)
  1147. #define TRAMP_FRAME_SIZE (0x18)
  1148. ENTRY(_immu_trampoline_workaround)
  1149. // r2 EEA
  1150. // r6 is physical EEA
  1151. tophys(r6,r2)
  1152. LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
  1153. tophys (r3,r5) // r3 is trampoline (physical)
  1154. LOAD_SYMBOL_2_GPR(r4,0x15000000)
  1155. l.sw TRAMP_SLOT_0(r3),r4
  1156. l.sw TRAMP_SLOT_1(r3),r4
  1157. l.sw TRAMP_SLOT_4(r3),r4
  1158. l.sw TRAMP_SLOT_5(r3),r4
  1159. // EPC = EEA - 0x4
  1160. l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address)
  1161. l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data
  1162. l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address)
  1163. l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data
  1164. l.srli r5,r4,26 // check opcode for write access
  1165. l.sfeqi r5,0 // l.j
  1166. l.bf 0f
  1167. l.sfeqi r5,0x11 // l.jr
  1168. l.bf 1f
  1169. l.sfeqi r5,1 // l.jal
  1170. l.bf 2f
  1171. l.sfeqi r5,0x12 // l.jalr
  1172. l.bf 3f
  1173. l.sfeqi r5,3 // l.bnf
  1174. l.bf 4f
  1175. l.sfeqi r5,4 // l.bf
  1176. l.bf 5f
  1177. 99:
  1178. l.nop
  1179. l.j 99b // should never happen
  1180. l.nop 1
  1181. // r2 is EEA
  1182. // r3 is trampoline address (physical)
  1183. // r4 is instruction
  1184. // r6 is physical(EEA)
  1185. //
  1186. // r5
  1187. 2: // l.jal
  1188. /* 19 20 aa aa l.movhi r9,0xaaaa
  1189. * a9 29 bb bb l.ori r9,0xbbbb
  1190. *
  1191. * where 0xaaaabbbb is EEA + 0x4 shifted right 2
  1192. */
  1193. l.addi r6,r2,0x4 // this is 0xaaaabbbb
  1194. // l.movhi r9,0xaaaa
  1195. l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
  1196. l.sh (TRAMP_SLOT_0+0x0)(r3),r5
  1197. l.srli r5,r6,16
  1198. l.sh (TRAMP_SLOT_0+0x2)(r3),r5
  1199. // l.ori r9,0xbbbb
  1200. l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
  1201. l.sh (TRAMP_SLOT_1+0x0)(r3),r5
  1202. l.andi r5,r6,0xffff
  1203. l.sh (TRAMP_SLOT_1+0x2)(r3),r5
  1204. /* falthrough, need to set up new jump offset */
  1205. 0: // l.j
  1206. l.slli r6,r4,6 // original offset shifted left 6 - 2
  1207. // l.srli r6,r6,6 // original offset shifted right 2
  1208. l.slli r4,r2,4 // old jump position: EEA shifted left 4
  1209. // l.srli r4,r4,6 // old jump position: shifted right 2
  1210. l.addi r5,r3,0xc // new jump position (physical)
  1211. l.slli r5,r5,4 // new jump position: shifted left 4
  1212. // calculate new jump offset
  1213. // new_off = old_off + (old_jump - new_jump)
  1214. l.sub r5,r4,r5 // old_jump - new_jump
  1215. l.add r5,r6,r5 // orig_off + (old_jump - new_jump)
  1216. l.srli r5,r5,6 // new offset shifted right 2
  1217. // r5 is new jump offset
  1218. // l.j has opcode 0x0...
  1219. l.sw TRAMP_SLOT_2(r3),r5 // write it back
  1220. l.j trampoline_out
  1221. l.nop
  1222. /* ----------------------------- */
  1223. 3: // l.jalr
  1224. /* 19 20 aa aa l.movhi r9,0xaaaa
  1225. * a9 29 bb bb l.ori r9,0xbbbb
  1226. *
  1227. * where 0xaaaabbbb is EEA + 0x4 shifted right 2
  1228. */
  1229. l.addi r6,r2,0x4 // this is 0xaaaabbbb
  1230. // l.movhi r9,0xaaaa
  1231. l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
  1232. l.sh (TRAMP_SLOT_0+0x0)(r3),r5
  1233. l.srli r5,r6,16
  1234. l.sh (TRAMP_SLOT_0+0x2)(r3),r5
  1235. // l.ori r9,0xbbbb
  1236. l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
  1237. l.sh (TRAMP_SLOT_1+0x0)(r3),r5
  1238. l.andi r5,r6,0xffff
  1239. l.sh (TRAMP_SLOT_1+0x2)(r3),r5
  1240. l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction
  1241. l.andi r5,r5,0x3ff // clear out opcode part
  1242. l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr
  1243. l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
  1244. /* falthrough */
  1245. 1: // l.jr
  1246. l.j trampoline_out
  1247. l.nop
  1248. /* ----------------------------- */
  1249. 4: // l.bnf
  1250. 5: // l.bf
  1251. l.slli r6,r4,6 // original offset shifted left 6 - 2
  1252. // l.srli r6,r6,6 // original offset shifted right 2
  1253. l.slli r4,r2,4 // old jump position: EEA shifted left 4
  1254. // l.srli r4,r4,6 // old jump position: shifted right 2
  1255. l.addi r5,r3,0xc // new jump position (physical)
  1256. l.slli r5,r5,4 // new jump position: shifted left 4
  1257. // calculate new jump offset
  1258. // new_off = old_off + (old_jump - new_jump)
  1259. l.add r6,r6,r4 // (orig_off + old_jump)
  1260. l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump
  1261. l.srli r6,r6,6 // new offset shifted right 2
  1262. // r6 is new jump offset
  1263. l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction
  1264. l.srli r4,r4,16
  1265. l.andi r4,r4,0xfc00 // get opcode part
  1266. l.slli r4,r4,16
  1267. l.or r6,r4,r6 // l.b(n)f new offset
  1268. l.sw TRAMP_SLOT_2(r3),r6 // write it back
  1269. /* we need to add l.j to EEA + 0x8 */
  1270. tophys (r4,r2) // may not be needed (due to shifts down_
  1271. l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8)
  1272. // jump position = r5 + 0x8 (0x8 compensated)
  1273. l.sub r4,r4,r5 // jump offset = target - new_position + 0x8
  1274. l.slli r4,r4,4 // the amount of info in imediate of jump
  1275. l.srli r4,r4,6 // jump instruction with offset
  1276. l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot
  1277. /* fallthrough */
  1278. trampoline_out:
  1279. // set up new EPC to point to our trampoline code
  1280. LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
  1281. l.mtspr r0,r5,SPR_EPCR_BASE
  1282. // immu_trampoline is (4x) CACHE_LINE aligned
  1283. // and only 6 instructions long,
  1284. // so we need to invalidate only 2 lines
  1285. /* Establish cache block size
  1286. If BS=0, 16;
  1287. If BS=1, 32;
  1288. r14 contain block size
  1289. */
  1290. l.mfspr r21,r0,SPR_ICCFGR
  1291. l.andi r21,r21,SPR_ICCFGR_CBS
  1292. l.srli r21,r21,7
  1293. l.ori r23,r0,16
  1294. l.sll r14,r23,r21
  1295. l.mtspr r0,r5,SPR_ICBIR
  1296. l.add r5,r5,r14
  1297. l.mtspr r0,r5,SPR_ICBIR
  1298. l.jr r9
  1299. l.nop
  1300. /*
  1301. * DSCR: prints a string referenced by r3.
  1302. *
  1303. * PRMS: r3 - address of the first character of null
  1304. * terminated string to be printed
  1305. *
  1306. * PREQ: UART at UART_BASE_ADD has to be initialized
  1307. *
  1308. * POST: caller should be aware that r3, r9 are changed
  1309. */
  1310. ENTRY(_emergency_print)
  1311. EMERGENCY_PRINT_STORE_GPR4
  1312. EMERGENCY_PRINT_STORE_GPR5
  1313. EMERGENCY_PRINT_STORE_GPR6
  1314. EMERGENCY_PRINT_STORE_GPR7
  1315. 2:
  1316. l.lbz r7,0(r3)
  1317. l.sfeq r7,r0
  1318. l.bf 9f
  1319. l.nop
  1320. // putc:
  1321. l.movhi r4,hi(UART_BASE_ADD)
  1322. l.addi r6,r0,0x20
  1323. 1: l.lbz r5,5(r4)
  1324. l.andi r5,r5,0x20
  1325. l.sfeq r5,r6
  1326. l.bnf 1b
  1327. l.nop
  1328. l.sb 0(r4),r7
  1329. l.addi r6,r0,0x60
  1330. 1: l.lbz r5,5(r4)
  1331. l.andi r5,r5,0x60
  1332. l.sfeq r5,r6
  1333. l.bnf 1b
  1334. l.nop
  1335. /* next character */
  1336. l.j 2b
  1337. l.addi r3,r3,0x1
  1338. 9:
  1339. EMERGENCY_PRINT_LOAD_GPR7
  1340. EMERGENCY_PRINT_LOAD_GPR6
  1341. EMERGENCY_PRINT_LOAD_GPR5
  1342. EMERGENCY_PRINT_LOAD_GPR4
  1343. l.jr r9
  1344. l.nop
  1345. ENTRY(_emergency_print_nr)
  1346. EMERGENCY_PRINT_STORE_GPR4
  1347. EMERGENCY_PRINT_STORE_GPR5
  1348. EMERGENCY_PRINT_STORE_GPR6
  1349. EMERGENCY_PRINT_STORE_GPR7
  1350. EMERGENCY_PRINT_STORE_GPR8
  1351. l.addi r8,r0,32 // shift register
  1352. 1: /* remove leading zeros */
  1353. l.addi r8,r8,-0x4
  1354. l.srl r7,r3,r8
  1355. l.andi r7,r7,0xf
  1356. /* don't skip the last zero if number == 0x0 */
  1357. l.sfeqi r8,0x4
  1358. l.bf 2f
  1359. l.nop
  1360. l.sfeq r7,r0
  1361. l.bf 1b
  1362. l.nop
  1363. 2:
  1364. l.srl r7,r3,r8
  1365. l.andi r7,r7,0xf
  1366. l.sflts r8,r0
  1367. l.bf 9f
  1368. l.sfgtui r7,0x9
  1369. l.bnf 8f
  1370. l.nop
  1371. l.addi r7,r7,0x27
  1372. 8:
  1373. l.addi r7,r7,0x30
  1374. // putc:
  1375. l.movhi r4,hi(UART_BASE_ADD)
  1376. l.addi r6,r0,0x20
  1377. 1: l.lbz r5,5(r4)
  1378. l.andi r5,r5,0x20
  1379. l.sfeq r5,r6
  1380. l.bnf 1b
  1381. l.nop
  1382. l.sb 0(r4),r7
  1383. l.addi r6,r0,0x60
  1384. 1: l.lbz r5,5(r4)
  1385. l.andi r5,r5,0x60
  1386. l.sfeq r5,r6
  1387. l.bnf 1b
  1388. l.nop
  1389. /* next character */
  1390. l.j 2b
  1391. l.addi r8,r8,-0x4
  1392. 9:
  1393. EMERGENCY_PRINT_LOAD_GPR8
  1394. EMERGENCY_PRINT_LOAD_GPR7
  1395. EMERGENCY_PRINT_LOAD_GPR6
  1396. EMERGENCY_PRINT_LOAD_GPR5
  1397. EMERGENCY_PRINT_LOAD_GPR4
  1398. l.jr r9
  1399. l.nop
  1400. /*
  1401. * This should be used for debugging only.
  1402. * It messes up the Linux early serial output
  1403. * somehow, so use it sparingly and essentially
  1404. * only if you need to debug something that goes wrong
  1405. * before Linux gets the early serial going.
  1406. *
  1407. * Furthermore, you'll have to make sure you set the
  1408. * UART_DEVISOR correctly according to the system
  1409. * clock rate.
  1410. *
  1411. *
  1412. */
  1413. #define SYS_CLK 20000000
  1414. //#define SYS_CLK 1843200
  1415. #define OR32_CONSOLE_BAUD 115200
  1416. #define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD)
  1417. ENTRY(_early_uart_init)
  1418. l.movhi r3,hi(UART_BASE_ADD)
  1419. l.addi r4,r0,0x7
  1420. l.sb 0x2(r3),r4
  1421. l.addi r4,r0,0x0
  1422. l.sb 0x1(r3),r4
  1423. l.addi r4,r0,0x3
  1424. l.sb 0x3(r3),r4
  1425. l.lbz r5,3(r3)
  1426. l.ori r4,r5,0x80
  1427. l.sb 0x3(r3),r4
  1428. l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
  1429. l.sb UART_DLM(r3),r4
  1430. l.addi r4,r0,((UART_DIVISOR) & 0x000000ff)
  1431. l.sb UART_DLL(r3),r4
  1432. l.sb 0x3(r3),r5
  1433. l.jr r9
  1434. l.nop
  1435. .align 0x1000
  1436. .global _secondary_evbar
  1437. _secondary_evbar:
  1438. .space 0x800
  1439. /* Just disable interrupts and Return */
  1440. l.ori r3,r0,SPR_SR_SM
  1441. l.mtspr r0,r3,SPR_ESR_BASE
  1442. l.rfe
  1443. .section .rodata
  1444. _string_unhandled_exception:
  1445. .string "\n\rRunarunaround: Unhandled exception 0x\0"
  1446. _string_epc_prefix:
  1447. .string ": EPC=0x\0"
  1448. _string_nl:
  1449. .string "\n\r\0"
  1450. /* ========================================[ page aligned structures ]=== */
  1451. /*
  1452. * .data section should be page aligned
  1453. * (look into arch/openrisc/kernel/vmlinux.lds.S)
  1454. */
  1455. .section .data,"aw"
  1456. .align 8192
  1457. .global empty_zero_page
  1458. empty_zero_page:
  1459. .space 8192
  1460. .global swapper_pg_dir
  1461. swapper_pg_dir:
  1462. .space 8192
  1463. .global _unhandled_stack
  1464. _unhandled_stack:
  1465. .space 8192
  1466. _unhandled_stack_top:
  1467. /* ============================================================[ EOF ]=== */