entry-nommu.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623
  1. /*
  2. * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
  3. * Copyright (C) 2007-2009 PetaLogix
  4. * Copyright (C) 2006 Atmark Techno, Inc.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #include <linux/linkage.h>
  11. #include <asm/thread_info.h>
  12. #include <linux/errno.h>
  13. #include <asm/entry.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/registers.h>
  16. #include <asm/unistd.h>
  17. #include <asm/percpu.h>
  18. #include <asm/signal.h>
  19. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  20. .macro disable_irq
  21. msrclr r0, MSR_IE
  22. .endm
  23. .macro enable_irq
  24. msrset r0, MSR_IE
  25. .endm
  26. .macro clear_bip
  27. msrclr r0, MSR_BIP
  28. .endm
  29. #else
  30. .macro disable_irq
  31. mfs r11, rmsr
  32. andi r11, r11, ~MSR_IE
  33. mts rmsr, r11
  34. .endm
  35. .macro enable_irq
  36. mfs r11, rmsr
  37. ori r11, r11, MSR_IE
  38. mts rmsr, r11
  39. .endm
  40. .macro clear_bip
  41. mfs r11, rmsr
  42. andi r11, r11, ~MSR_BIP
  43. mts rmsr, r11
  44. .endm
  45. #endif
  46. ENTRY(_interrupt)
  47. swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
  48. swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
  49. lwi r11, r0, PER_CPU(KM) /* load mode indicator */
  50. beqid r11, 1f
  51. nop
  52. brid 2f /* jump over */
  53. addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */
  54. 1: /* switch to kernel stack */
  55. lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
  56. lwi r1, r1, TS_THREAD_INFO /* get the thread info */
  57. /* calculate kernel stack pointer */
  58. addik r1, r1, THREAD_SIZE - PT_SIZE
  59. 2:
  60. swi r11, r1, PT_MODE /* store the mode */
  61. lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
  62. swi r2, r1, PT_R2
  63. swi r3, r1, PT_R3
  64. swi r4, r1, PT_R4
  65. swi r5, r1, PT_R5
  66. swi r6, r1, PT_R6
  67. swi r7, r1, PT_R7
  68. swi r8, r1, PT_R8
  69. swi r9, r1, PT_R9
  70. swi r10, r1, PT_R10
  71. swi r11, r1, PT_R11
  72. swi r12, r1, PT_R12
  73. swi r13, r1, PT_R13
  74. swi r14, r1, PT_R14
  75. swi r14, r1, PT_PC
  76. swi r15, r1, PT_R15
  77. swi r16, r1, PT_R16
  78. swi r17, r1, PT_R17
  79. swi r18, r1, PT_R18
  80. swi r19, r1, PT_R19
  81. swi r20, r1, PT_R20
  82. swi r21, r1, PT_R21
  83. swi r22, r1, PT_R22
  84. swi r23, r1, PT_R23
  85. swi r24, r1, PT_R24
  86. swi r25, r1, PT_R25
  87. swi r26, r1, PT_R26
  88. swi r27, r1, PT_R27
  89. swi r28, r1, PT_R28
  90. swi r29, r1, PT_R29
  91. swi r30, r1, PT_R30
  92. swi r31, r1, PT_R31
  93. /* special purpose registers */
  94. mfs r11, rmsr
  95. swi r11, r1, PT_MSR
  96. mfs r11, rear
  97. swi r11, r1, PT_EAR
  98. mfs r11, resr
  99. swi r11, r1, PT_ESR
  100. mfs r11, rfsr
  101. swi r11, r1, PT_FSR
  102. /* reload original stack pointer and save it */
  103. lwi r11, r0, PER_CPU(ENTRY_SP)
  104. swi r11, r1, PT_R1
  105. /* update mode indicator we are in kernel mode */
  106. addik r11, r0, 1
  107. swi r11, r0, PER_CPU(KM)
  108. /* restore r31 */
  109. lwi r31, r0, PER_CPU(CURRENT_SAVE)
  110. /* prepare the link register, the argument and jump */
  111. addik r15, r0, ret_from_intr - 8
  112. addk r6, r0, r15
  113. braid do_IRQ
  114. add r5, r0, r1
  115. ret_from_intr:
  116. lwi r11, r1, PT_MODE
  117. bneid r11, no_intr_resched
  118. 3:
  119. lwi r6, r31, TS_THREAD_INFO /* get thread info */
  120. lwi r19, r6, TI_FLAGS /* get flags in thread info */
  121. /* do an extra work if any bits are set */
  122. andi r11, r19, _TIF_NEED_RESCHED
  123. beqi r11, 1f
  124. bralid r15, schedule
  125. nop
  126. bri 3b
  127. 1: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
  128. beqid r11, no_intr_resched
  129. addk r5, r1, r0
  130. bralid r15, do_notify_resume
  131. addk r6, r0, r0
  132. bri 3b
  133. no_intr_resched:
  134. /* Disable interrupts, we are now committed to the state restore */
  135. disable_irq
  136. /* save mode indicator */
  137. lwi r11, r1, PT_MODE
  138. swi r11, r0, PER_CPU(KM)
  139. /* save r31 */
  140. swi r31, r0, PER_CPU(CURRENT_SAVE)
  141. restore_context:
  142. /* special purpose registers */
  143. lwi r11, r1, PT_FSR
  144. mts rfsr, r11
  145. lwi r11, r1, PT_ESR
  146. mts resr, r11
  147. lwi r11, r1, PT_EAR
  148. mts rear, r11
  149. lwi r11, r1, PT_MSR
  150. mts rmsr, r11
  151. lwi r31, r1, PT_R31
  152. lwi r30, r1, PT_R30
  153. lwi r29, r1, PT_R29
  154. lwi r28, r1, PT_R28
  155. lwi r27, r1, PT_R27
  156. lwi r26, r1, PT_R26
  157. lwi r25, r1, PT_R25
  158. lwi r24, r1, PT_R24
  159. lwi r23, r1, PT_R23
  160. lwi r22, r1, PT_R22
  161. lwi r21, r1, PT_R21
  162. lwi r20, r1, PT_R20
  163. lwi r19, r1, PT_R19
  164. lwi r18, r1, PT_R18
  165. lwi r17, r1, PT_R17
  166. lwi r16, r1, PT_R16
  167. lwi r15, r1, PT_R15
  168. lwi r14, r1, PT_PC
  169. lwi r13, r1, PT_R13
  170. lwi r12, r1, PT_R12
  171. lwi r11, r1, PT_R11
  172. lwi r10, r1, PT_R10
  173. lwi r9, r1, PT_R9
  174. lwi r8, r1, PT_R8
  175. lwi r7, r1, PT_R7
  176. lwi r6, r1, PT_R6
  177. lwi r5, r1, PT_R5
  178. lwi r4, r1, PT_R4
  179. lwi r3, r1, PT_R3
  180. lwi r2, r1, PT_R2
  181. lwi r1, r1, PT_R1
  182. rtid r14, 0
  183. nop
  184. ENTRY(_reset)
  185. brai 0;
  186. ENTRY(_user_exception)
  187. swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
  188. swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
  189. lwi r11, r0, PER_CPU(KM) /* load mode indicator */
  190. beqid r11, 1f /* Already in kernel mode? */
  191. nop
  192. brid 2f /* jump over */
  193. addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */
  194. 1: /* Switch to kernel stack */
  195. lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
  196. lwi r1, r1, TS_THREAD_INFO /* get the thread info */
  197. /* calculate kernel stack pointer */
  198. addik r1, r1, THREAD_SIZE - PT_SIZE
  199. 2:
  200. swi r11, r1, PT_MODE /* store the mode */
  201. lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
  202. /* save them on stack */
  203. swi r2, r1, PT_R2
  204. swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
  205. swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
  206. swi r5, r1, PT_R5
  207. swi r6, r1, PT_R6
  208. swi r7, r1, PT_R7
  209. swi r8, r1, PT_R8
  210. swi r9, r1, PT_R9
  211. swi r10, r1, PT_R10
  212. swi r11, r1, PT_R11
  213. /* r12: _always_ in clobber list; see unistd.h */
  214. swi r12, r1, PT_R12
  215. swi r13, r1, PT_R13
  216. /* r14: _always_ in clobber list; see unistd.h */
  217. swi r14, r1, PT_R14
  218. /* but we want to return to the next inst. */
  219. addik r14, r14, 0x4
  220. swi r14, r1, PT_PC /* increment by 4 and store in pc */
  221. swi r15, r1, PT_R15
  222. swi r16, r1, PT_R16
  223. swi r17, r1, PT_R17
  224. swi r18, r1, PT_R18
  225. swi r19, r1, PT_R19
  226. swi r20, r1, PT_R20
  227. swi r21, r1, PT_R21
  228. swi r22, r1, PT_R22
  229. swi r23, r1, PT_R23
  230. swi r24, r1, PT_R24
  231. swi r25, r1, PT_R25
  232. swi r26, r1, PT_R26
  233. swi r27, r1, PT_R27
  234. swi r28, r1, PT_R28
  235. swi r29, r1, PT_R29
  236. swi r30, r1, PT_R30
  237. swi r31, r1, PT_R31
  238. disable_irq
  239. nop /* make sure IE bit is in effect */
  240. clear_bip /* once IE is in effect it is safe to clear BIP */
  241. nop
  242. /* special purpose registers */
  243. mfs r11, rmsr
  244. swi r11, r1, PT_MSR
  245. mfs r11, rear
  246. swi r11, r1, PT_EAR
  247. mfs r11, resr
  248. swi r11, r1, PT_ESR
  249. mfs r11, rfsr
  250. swi r11, r1, PT_FSR
  251. /* reload original stack pointer and save it */
  252. lwi r11, r0, PER_CPU(ENTRY_SP)
  253. swi r11, r1, PT_R1
  254. /* update mode indicator we are in kernel mode */
  255. addik r11, r0, 1
  256. swi r11, r0, PER_CPU(KM)
  257. /* restore r31 */
  258. lwi r31, r0, PER_CPU(CURRENT_SAVE)
  259. /* re-enable interrupts now we are in kernel mode */
  260. enable_irq
  261. /* See if the system call number is valid. */
  262. addi r11, r12, -__NR_syscalls
  263. bgei r11, 1f /* return to user if not valid */
  264. /* Figure out which function to use for this system call. */
  265. /* Note Microblaze barrel shift is optional, so don't rely on it */
  266. add r12, r12, r12 /* convert num -> ptr */
  267. addik r30, r0, 1 /* restarts allowed */
  268. add r12, r12, r12
  269. lwi r12, r12, sys_call_table /* Get function pointer */
  270. addik r15, r0, ret_to_user-8 /* set return address */
  271. bra r12 /* Make the system call. */
  272. bri 0 /* won't reach here */
  273. 1:
  274. brid ret_to_user /* jump to syscall epilogue */
  275. addi r3, r0, -ENOSYS /* set errno in delay slot */
  276. /*
  277. * Debug traps are like a system call, but entered via brki r14, 0x60
  278. * All we need to do is send the SIGTRAP signal to current, ptrace and
  279. * do_notify_resume will handle the rest
  280. */
  281. ENTRY(_debug_exception)
  282. swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
  283. lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
  284. lwi r1, r1, TS_THREAD_INFO /* get the thread info */
  285. addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */
  286. swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
  287. lwi r11, r0, PER_CPU(KM) /* load mode indicator */
  288. //save_context:
  289. swi r11, r1, PT_MODE /* store the mode */
  290. lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
  291. /* save them on stack */
  292. swi r2, r1, PT_R2
  293. swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
  294. swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
  295. swi r5, r1, PT_R5
  296. swi r6, r1, PT_R6
  297. swi r7, r1, PT_R7
  298. swi r8, r1, PT_R8
  299. swi r9, r1, PT_R9
  300. swi r10, r1, PT_R10
  301. swi r11, r1, PT_R11
  302. /* r12: _always_ in clobber list; see unistd.h */
  303. swi r12, r1, PT_R12
  304. swi r13, r1, PT_R13
  305. /* r14: _always_ in clobber list; see unistd.h */
  306. swi r14, r1, PT_R14
  307. swi r14, r1, PT_PC /* Will return to interrupted instruction */
  308. swi r15, r1, PT_R15
  309. swi r16, r1, PT_R16
  310. swi r17, r1, PT_R17
  311. swi r18, r1, PT_R18
  312. swi r19, r1, PT_R19
  313. swi r20, r1, PT_R20
  314. swi r21, r1, PT_R21
  315. swi r22, r1, PT_R22
  316. swi r23, r1, PT_R23
  317. swi r24, r1, PT_R24
  318. swi r25, r1, PT_R25
  319. swi r26, r1, PT_R26
  320. swi r27, r1, PT_R27
  321. swi r28, r1, PT_R28
  322. swi r29, r1, PT_R29
  323. swi r30, r1, PT_R30
  324. swi r31, r1, PT_R31
  325. disable_irq
  326. nop /* make sure IE bit is in effect */
  327. clear_bip /* once IE is in effect it is safe to clear BIP */
  328. nop
  329. /* special purpose registers */
  330. mfs r11, rmsr
  331. swi r11, r1, PT_MSR
  332. mfs r11, rear
  333. swi r11, r1, PT_EAR
  334. mfs r11, resr
  335. swi r11, r1, PT_ESR
  336. mfs r11, rfsr
  337. swi r11, r1, PT_FSR
  338. /* reload original stack pointer and save it */
  339. lwi r11, r0, PER_CPU(ENTRY_SP)
  340. swi r11, r1, PT_R1
  341. /* update mode indicator we are in kernel mode */
  342. addik r11, r0, 1
  343. swi r11, r0, PER_CPU(KM)
  344. /* restore r31 */
  345. lwi r31, r0, PER_CPU(CURRENT_SAVE)
  346. /* re-enable interrupts now we are in kernel mode */
  347. enable_irq
  348. addi r5, r0, SIGTRAP /* sending the trap signal */
  349. add r6, r0, r31 /* to current */
  350. bralid r15, send_sig
  351. add r7, r0, r0 /* 3rd param zero */
  352. addik r30, r0, 1 /* restarts allowed ??? */
  353. /* Restore r3/r4 to work around how ret_to_user works */
  354. lwi r3, r1, PT_R3
  355. lwi r4, r1, PT_R4
  356. bri ret_to_user
  357. ENTRY(_break)
  358. bri 0
  359. /* struct task_struct *_switch_to(struct thread_info *prev,
  360. struct thread_info *next); */
  361. ENTRY(_switch_to)
  362. /* prepare return value */
  363. addk r3, r0, r31
  364. /* save registers in cpu_context */
  365. /* use r11 and r12, volatile registers, as temp register */
  366. addik r11, r5, TI_CPU_CONTEXT
  367. swi r1, r11, CC_R1
  368. swi r2, r11, CC_R2
  369. /* skip volatile registers.
  370. * they are saved on stack when we jumped to _switch_to() */
  371. /* dedicated registers */
  372. swi r13, r11, CC_R13
  373. swi r14, r11, CC_R14
  374. swi r15, r11, CC_R15
  375. swi r16, r11, CC_R16
  376. swi r17, r11, CC_R17
  377. swi r18, r11, CC_R18
  378. /* save non-volatile registers */
  379. swi r19, r11, CC_R19
  380. swi r20, r11, CC_R20
  381. swi r21, r11, CC_R21
  382. swi r22, r11, CC_R22
  383. swi r23, r11, CC_R23
  384. swi r24, r11, CC_R24
  385. swi r25, r11, CC_R25
  386. swi r26, r11, CC_R26
  387. swi r27, r11, CC_R27
  388. swi r28, r11, CC_R28
  389. swi r29, r11, CC_R29
  390. swi r30, r11, CC_R30
  391. /* special purpose registers */
  392. mfs r12, rmsr
  393. swi r12, r11, CC_MSR
  394. mfs r12, rear
  395. swi r12, r11, CC_EAR
  396. mfs r12, resr
  397. swi r12, r11, CC_ESR
  398. mfs r12, rfsr
  399. swi r12, r11, CC_FSR
  400. /* update r31, the current */
  401. lwi r31, r6, TI_TASK
  402. swi r31, r0, PER_CPU(CURRENT_SAVE)
  403. /* get new process' cpu context and restore */
  404. addik r11, r6, TI_CPU_CONTEXT
  405. /* special purpose registers */
  406. lwi r12, r11, CC_FSR
  407. mts rfsr, r12
  408. lwi r12, r11, CC_ESR
  409. mts resr, r12
  410. lwi r12, r11, CC_EAR
  411. mts rear, r12
  412. lwi r12, r11, CC_MSR
  413. mts rmsr, r12
  414. /* non-volatile registers */
  415. lwi r30, r11, CC_R30
  416. lwi r29, r11, CC_R29
  417. lwi r28, r11, CC_R28
  418. lwi r27, r11, CC_R27
  419. lwi r26, r11, CC_R26
  420. lwi r25, r11, CC_R25
  421. lwi r24, r11, CC_R24
  422. lwi r23, r11, CC_R23
  423. lwi r22, r11, CC_R22
  424. lwi r21, r11, CC_R21
  425. lwi r20, r11, CC_R20
  426. lwi r19, r11, CC_R19
  427. /* dedicated registers */
  428. lwi r18, r11, CC_R18
  429. lwi r17, r11, CC_R17
  430. lwi r16, r11, CC_R16
  431. lwi r15, r11, CC_R15
  432. lwi r14, r11, CC_R14
  433. lwi r13, r11, CC_R13
  434. /* skip volatile registers */
  435. lwi r2, r11, CC_R2
  436. lwi r1, r11, CC_R1
  437. rtsd r15, 8
  438. nop
  439. ENTRY(ret_from_fork)
  440. addk r5, r0, r3
  441. brlid r15, schedule_tail
  442. nop
  443. swi r31, r1, PT_R31 /* save r31 in user context. */
  444. /* will soon be restored to r31 in ret_to_user */
  445. addk r3, r0, r0
  446. brid ret_to_user
  447. nop
  448. ENTRY(ret_from_kernel_thread)
  449. brlid r15, schedule_tail
  450. addk r5, r0, r3
  451. brald r15, r20
  452. addk r5, r0, r19
  453. brid ret_to_user
  454. addk r3, r0, r0
  455. work_pending:
  456. lwi r11, r1, PT_MODE
  457. bneid r11, 2f
  458. 3:
  459. enable_irq
  460. andi r11, r19, _TIF_NEED_RESCHED
  461. beqi r11, 1f
  462. bralid r15, schedule
  463. nop
  464. bri 4f
  465. 1: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
  466. beqi r11, no_work_pending
  467. addk r5, r30, r0
  468. bralid r15, do_notify_resume
  469. addik r6, r0, 1
  470. addk r30, r0, r0 /* no restarts from now on */
  471. 4:
  472. disable_irq
  473. lwi r6, r31, TS_THREAD_INFO /* get thread info */
  474. lwi r19, r6, TI_FLAGS /* get flags in thread info */
  475. bri 3b
  476. ENTRY(ret_to_user)
  477. disable_irq
  478. swi r4, r1, PT_R4 /* return val */
  479. swi r3, r1, PT_R3 /* return val */
  480. lwi r6, r31, TS_THREAD_INFO /* get thread info */
  481. lwi r19, r6, TI_FLAGS /* get flags in thread info */
  482. bnei r19, work_pending /* do an extra work if any bits are set */
  483. no_work_pending:
  484. disable_irq
  485. 2:
  486. /* save r31 */
  487. swi r31, r0, PER_CPU(CURRENT_SAVE)
  488. /* save mode indicator */
  489. lwi r18, r1, PT_MODE
  490. swi r18, r0, PER_CPU(KM)
  491. //restore_context:
  492. /* special purpose registers */
  493. lwi r18, r1, PT_FSR
  494. mts rfsr, r18
  495. lwi r18, r1, PT_ESR
  496. mts resr, r18
  497. lwi r18, r1, PT_EAR
  498. mts rear, r18
  499. lwi r18, r1, PT_MSR
  500. mts rmsr, r18
  501. lwi r31, r1, PT_R31
  502. lwi r30, r1, PT_R30
  503. lwi r29, r1, PT_R29
  504. lwi r28, r1, PT_R28
  505. lwi r27, r1, PT_R27
  506. lwi r26, r1, PT_R26
  507. lwi r25, r1, PT_R25
  508. lwi r24, r1, PT_R24
  509. lwi r23, r1, PT_R23
  510. lwi r22, r1, PT_R22
  511. lwi r21, r1, PT_R21
  512. lwi r20, r1, PT_R20
  513. lwi r19, r1, PT_R19
  514. lwi r18, r1, PT_R18
  515. lwi r17, r1, PT_R17
  516. lwi r16, r1, PT_R16
  517. lwi r15, r1, PT_R15
  518. lwi r14, r1, PT_PC
  519. lwi r13, r1, PT_R13
  520. lwi r12, r1, PT_R12
  521. lwi r11, r1, PT_R11
  522. lwi r10, r1, PT_R10
  523. lwi r9, r1, PT_R9
  524. lwi r8, r1, PT_R8
  525. lwi r7, r1, PT_R7
  526. lwi r6, r1, PT_R6
  527. lwi r5, r1, PT_R5
  528. lwi r4, r1, PT_R4 /* return val */
  529. lwi r3, r1, PT_R3 /* return val */
  530. lwi r2, r1, PT_R2
  531. lwi r1, r1, PT_R1
  532. rtid r14, 0
  533. nop
  534. sys_rt_sigreturn_wrapper:
  535. addk r30, r0, r0 /* no restarts for this one */
  536. brid sys_rt_sigreturn
  537. addk r5, r1, r0
  538. /* Interrupt vector table */
  539. .section .init.ivt, "ax"
  540. .org 0x0
  541. brai _reset
  542. brai _user_exception
  543. brai _interrupt
  544. brai _break
  545. brai _hw_exception_handler
  546. .org 0x60
  547. brai _debug_exception
  548. .section .rodata,"a"
  549. #include "syscall_table.S"
  550. syscall_table_size=(.-sys_call_table)
  551. type_SYSCALL:
  552. .ascii "SYSCALL\0"
  553. type_IRQ:
  554. .ascii "IRQ\0"
  555. type_IRQ_PREEMPT:
  556. .ascii "IRQ (PREEMPTED)\0"
  557. type_SYSCALL_PREEMPT:
  558. .ascii " SYSCALL (PREEMPTED)\0"
  559. /*
  560. * Trap decoding for stack unwinder
  561. * Tuples are (start addr, end addr, string)
  562. * If return address lies on [start addr, end addr],
  563. * unwinder displays 'string'
  564. */
  565. .align 4
  566. .global microblaze_trap_handlers
  567. microblaze_trap_handlers:
  568. /* Exact matches come first */
  569. .word ret_to_user ; .word ret_to_user ; .word type_SYSCALL
  570. .word ret_from_intr; .word ret_from_intr ; .word type_IRQ
  571. /* Fuzzy matches go here */
  572. .word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
  573. .word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
  574. /* End of table */
  575. .word 0 ; .word 0 ; .word 0