interrupt.S 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. /*
  2. * Interrupt Entries
  3. *
  4. * Copyright 2005-2009 Analog Devices Inc.
  5. * D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
  6. * Kenneth Albanowski <kjahds@kjahds.com>
  7. *
  8. * Licensed under the GPL-2 or later.
  9. */
  10. #include <asm/blackfin.h>
  11. #include <mach/irq.h>
  12. #include <linux/linkage.h>
  13. #include <asm/entry.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/trace.h>
  16. #include <asm/traps.h>
  17. #include <asm/thread_info.h>
  18. #include <asm/context.S>
  19. .extern _ret_from_exception
  20. #ifdef CONFIG_I_ENTRY_L1
  21. .section .l1.text
  22. #else
  23. .text
  24. #endif
  25. .align 4 /* just in case */
  26. /* Common interrupt entry code. First we do CLI, then push
  27. * RETI, to keep interrupts disabled, but to allow this state to be changed
  28. * by local_bh_enable.
  29. * R0 contains the interrupt number, while R1 may contain the value of IPEND,
  30. * or garbage if IPEND won't be needed by the ISR. */
  31. __common_int_entry:
  32. [--sp] = fp;
  33. [--sp] = usp;
  34. [--sp] = i0;
  35. [--sp] = i1;
  36. [--sp] = i2;
  37. [--sp] = i3;
  38. [--sp] = m0;
  39. [--sp] = m1;
  40. [--sp] = m2;
  41. [--sp] = m3;
  42. [--sp] = l0;
  43. [--sp] = l1;
  44. [--sp] = l2;
  45. [--sp] = l3;
  46. [--sp] = b0;
  47. [--sp] = b1;
  48. [--sp] = b2;
  49. [--sp] = b3;
  50. [--sp] = a0.x;
  51. [--sp] = a0.w;
  52. [--sp] = a1.x;
  53. [--sp] = a1.w;
  54. [--sp] = LC0;
  55. [--sp] = LC1;
  56. [--sp] = LT0;
  57. [--sp] = LT1;
  58. [--sp] = LB0;
  59. [--sp] = LB1;
  60. [--sp] = ASTAT;
  61. [--sp] = r0; /* Skip reserved */
  62. [--sp] = RETS;
  63. r2 = RETI;
  64. [--sp] = r2;
  65. [--sp] = RETX;
  66. [--sp] = RETN;
  67. [--sp] = RETE;
  68. [--sp] = SEQSTAT;
  69. [--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */
  70. /* Switch to other method of keeping interrupts disabled. */
  71. #ifdef CONFIG_DEBUG_HWERR
  72. r1 = 0x3f;
  73. sti r1;
  74. #else
  75. cli r1;
  76. #endif
  77. #ifdef CONFIG_TRACE_IRQFLAGS
  78. [--sp] = r0;
  79. sp += -12;
  80. call _trace_hardirqs_off;
  81. sp += 12;
  82. r0 = [sp++];
  83. #endif
  84. [--sp] = RETI; /* orig_pc */
  85. /* Clear all L registers. */
  86. r1 = 0 (x);
  87. l0 = r1;
  88. l1 = r1;
  89. l2 = r1;
  90. l3 = r1;
  91. #ifdef CONFIG_FRAME_POINTER
  92. fp = 0;
  93. #endif
  94. ANOMALY_283_315_WORKAROUND(p5, r7)
  95. r1 = sp;
  96. SP += -12;
  97. #ifdef CONFIG_IPIPE
  98. call ___ipipe_grab_irq
  99. SP += 12;
  100. cc = r0 == 0;
  101. if cc jump .Lcommon_restore_context;
  102. #else /* CONFIG_IPIPE */
  103. #ifdef CONFIG_PREEMPT
  104. r7 = sp;
  105. r4.l = lo(ALIGN_PAGE_MASK);
  106. r4.h = hi(ALIGN_PAGE_MASK);
  107. r7 = r7 & r4;
  108. p5 = r7;
  109. r7 = [p5 + TI_PREEMPT]; /* get preempt count */
  110. r7 += 1; /* increment it */
  111. [p5 + TI_PREEMPT] = r7;
  112. #endif
  113. pseudo_long_call _do_irq, p2;
  114. #ifdef CONFIG_PREEMPT
  115. r7 += -1;
  116. [p5 + TI_PREEMPT] = r7; /* restore preempt count */
  117. #endif
  118. SP += 12;
  119. #endif /* CONFIG_IPIPE */
  120. pseudo_long_call _return_from_int, p2;
  121. .Lcommon_restore_context:
  122. RESTORE_CONTEXT
  123. rti;
  124. /* interrupt routine for ivhw - 5 */
  125. ENTRY(_evt_ivhw)
  126. /* In case a single action kicks off multiple memory transactions, (like
  127. * a cache line fetch, - this can cause multiple hardware errors, let's
  128. * catch them all. First - make sure all the actions are complete, and
  129. * the core sees the hardware errors.
  130. */
  131. SSYNC;
  132. SSYNC;
  133. SAVE_ALL_SYS
  134. #ifdef CONFIG_FRAME_POINTER
  135. fp = 0;
  136. #endif
  137. ANOMALY_283_315_WORKAROUND(p5, r7)
  138. /* Handle all stacked hardware errors
  139. * To make sure we don't hang forever, only do it 10 times
  140. */
  141. R0 = 0;
  142. R2 = 10;
  143. 1:
  144. P0.L = LO(ILAT);
  145. P0.H = HI(ILAT);
  146. R1 = [P0];
  147. CC = BITTST(R1, EVT_IVHW_P);
  148. IF ! CC JUMP 2f;
  149. /* OK a hardware error is pending - clear it */
  150. R1 = EVT_IVHW_P;
  151. [P0] = R1;
  152. R0 += 1;
  153. CC = R1 == R2;
  154. if CC JUMP 2f;
  155. JUMP 1b;
  156. 2:
  157. # We are going to dump something out, so make sure we print IPEND properly
  158. p2.l = lo(IPEND);
  159. p2.h = hi(IPEND);
  160. r0 = [p2];
  161. [sp + PT_IPEND] = r0;
  162. /* set the EXCAUSE to HWERR for trap_c */
  163. r0 = [sp + PT_SEQSTAT];
  164. R1.L = LO(VEC_HWERR);
  165. R1.H = HI(VEC_HWERR);
  166. R0 = R0 | R1;
  167. [sp + PT_SEQSTAT] = R0;
  168. r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
  169. SP += -12;
  170. pseudo_long_call _trap_c, p5;
  171. SP += 12;
  172. #ifdef EBIU_ERRMST
  173. /* make sure EBIU_ERRMST is clear */
  174. p0.l = LO(EBIU_ERRMST);
  175. p0.h = HI(EBIU_ERRMST);
  176. r0.l = (CORE_ERROR | CORE_MERROR);
  177. w[p0] = r0.l;
  178. #endif
  179. pseudo_long_call _ret_from_exception, p2;
  180. .Lcommon_restore_all_sys:
  181. RESTORE_ALL_SYS
  182. rti;
  183. ENDPROC(_evt_ivhw)
  184. /* Interrupt routine for evt2 (NMI).
  185. * For inner circle type details, please see:
  186. * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
  187. */
  188. ENTRY(_evt_nmi)
  189. #ifndef CONFIG_NMI_WATCHDOG
  190. .weak _evt_nmi
  191. #else
  192. /* Not take account of CPLBs, this handler will not return */
  193. SAVE_ALL_SYS
  194. r0 = sp;
  195. r1 = retn;
  196. [sp + PT_PC] = r1;
  197. trace_buffer_save(p4,r5);
  198. ANOMALY_283_315_WORKAROUND(p4, r5)
  199. SP += -12;
  200. call _do_nmi;
  201. SP += 12;
  202. 1:
  203. jump 1b;
  204. #endif
  205. rtn;
  206. ENDPROC(_evt_nmi)
  207. /* interrupt routine for core timer - 6 */
  208. ENTRY(_evt_timer)
  209. TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)
  210. /* interrupt routine for evt7 - 7 */
  211. ENTRY(_evt_evt7)
  212. INTERRUPT_ENTRY(EVT_IVG7_P)
  213. ENTRY(_evt_evt8)
  214. INTERRUPT_ENTRY(EVT_IVG8_P)
  215. ENTRY(_evt_evt9)
  216. INTERRUPT_ENTRY(EVT_IVG9_P)
  217. ENTRY(_evt_evt10)
  218. INTERRUPT_ENTRY(EVT_IVG10_P)
  219. ENTRY(_evt_evt11)
  220. INTERRUPT_ENTRY(EVT_IVG11_P)
  221. ENTRY(_evt_evt12)
  222. INTERRUPT_ENTRY(EVT_IVG12_P)
  223. ENTRY(_evt_evt13)
  224. INTERRUPT_ENTRY(EVT_IVG13_P)
  225. /* interrupt routine for system_call - 15 */
  226. ENTRY(_evt_system_call)
  227. SAVE_CONTEXT_SYSCALL
  228. #ifdef CONFIG_FRAME_POINTER
  229. fp = 0;
  230. #endif
  231. pseudo_long_call _system_call, p2;
  232. jump .Lcommon_restore_context;
  233. ENDPROC(_evt_system_call)
  234. #ifdef CONFIG_IPIPE
  235. /*
  236. * __ipipe_call_irqtail: lowers the current priority level to EVT15
  237. * before running a user-defined routine, then raises the priority
  238. * level to EVT14 to prepare the caller for a normal interrupt
  239. * return through RTI.
  240. *
  241. * We currently use this feature in two occasions:
  242. *
  243. * - before branching to __ipipe_irq_tail_hook as requested by a high
  244. * priority domain after the pipeline delivered an interrupt,
  245. * e.g. such as Xenomai, in order to start its rescheduling
  246. * procedure, since we may not switch tasks when IRQ levels are
  247. * nested on the Blackfin, so we have to fake an interrupt return
  248. * so that we may reschedule immediately.
  249. *
  250. * - before branching to __ipipe_sync_root(), in order to play any interrupt
  251. * pending for the root domain (i.e. the Linux kernel). This lowers
  252. * the core priority level enough so that Linux IRQ handlers may
  253. * never delay interrupts handled by high priority domains; we defer
  254. * those handlers until this point instead. This is a substitute
  255. * to using a threaded interrupt model for the Linux kernel.
  256. *
  257. * r0: address of user-defined routine
  258. * context: caller must have preempted EVT15, hw interrupts must be off.
  259. */
  260. ENTRY(___ipipe_call_irqtail)
  261. p0 = r0;
  262. r0.l = 1f;
  263. r0.h = 1f;
  264. reti = r0;
  265. rti;
  266. 1:
  267. [--sp] = rets;
  268. [--sp] = ( r7:4, p5:3 );
  269. sp += -12;
  270. call (p0);
  271. sp += 12;
  272. ( r7:4, p5:3 ) = [sp++];
  273. rets = [sp++];
  274. #ifdef CONFIG_DEBUG_HWERR
  275. /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
  276. r0 = (EVT_IVG14 | EVT_IVHW | \
  277. EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
  278. #else
  279. /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
  280. r0 = (EVT_IVG14 | \
  281. EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
  282. #endif
  283. sti r0;
  284. raise 14; /* Branches to _evt_evt14 */
  285. 2:
  286. jump 2b; /* Likely paranoid. */
  287. ENDPROC(___ipipe_call_irqtail)
  288. #endif /* CONFIG_IPIPE */