i80321_intr.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. /* $OpenBSD: i80321_intr.c,v 1.16 2014/04/03 10:17:34 mpi Exp $ */
  2. /*
  3. * Copyright (c) 2006 Dale Rahn <drahn@openbsd.org>
  4. *
  5. * Permission to use, copy, modify, and distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <sys/param.h>
  18. #include <sys/systm.h>
  19. #include <sys/malloc.h>
  20. #include <sys/evcount.h>
  21. #include <uvm/uvm_extern.h>
  22. #include <machine/intr.h>
  23. #include <arm/cpufunc.h>
  24. #include <arm/xscale/i80321reg.h>
  25. #include <arm/xscale/i80321var.h>
  26. /*
  27. * autoconf glue
  28. */
  29. int i80321intc_match(struct device *, void *, void *);
  30. void i80321intc_attach(struct device *, struct device *, void *);
  31. /* internal functions */
  32. static void i80321intc_write_intctl(uint32_t mask);
  33. void i80321intc_write_steer(uint32_t mask);
  34. uint32_t i80321intc_read_intsrc(void);
  35. void i80321intc_calc_mask(void);
  36. void i80321intc_init(void);
  37. void i80321intc_intr_init(void);
  38. static void i80321intc_setipl(int new);
  39. void i80321intc_do_pending(void);
  40. uint32_t i80321intc_imask[NIPL];
  41. uint32_t i80321intc_smask[NIPL];
  42. #define SI_TO_IRQBIT(x) (1 << (x))
  43. volatile int current_ipl_level;
  44. volatile int softint_pending;
  45. struct cfattach i80321intc_ca = {
  46. sizeof(struct device), i80321intc_match, i80321intc_attach
  47. };
  48. struct cfdriver i80321intc_cd = {
  49. NULL, "i80321intc", DV_DULL
  50. };
  51. int i80321intc_attached = 0;
  52. int
  53. i80321intc_match(struct device *parent, void *v, void *aux)
  54. {
  55. if (i80321intc_attached == 0)
  56. return 1;
  57. i80321intc_attached = 1;
  58. return 0;
  59. }
  60. void
  61. i80321intc_attach(struct device *parent, struct device *self, void *args)
  62. {
  63. i80321intc_init();
  64. }
  65. static inline void
  66. i80321intc_write_intctl(uint32_t mask)
  67. {
  68. __asm__ volatile ("mcr p6, 0, %0, c0, c0, 0" : : "r" (mask));
  69. }
  70. void
  71. i80321intc_write_steer(uint32_t mask)
  72. {
  73. __asm__ volatile ("mcr p6, 0, %0, c4, c0, 0" : : "r" (mask));
  74. }
  75. uint32_t
  76. i80321intc_read_intsrc(void)
  77. {
  78. uint32_t mask;
  79. __asm__ volatile ("mrc p6, 0, %0, c8, c0, 0" : "=r" (mask));
  80. return mask;
  81. }
  82. static inline void
  83. i80321intc_setipl(int new)
  84. {
  85. int psw;
  86. psw = disable_interrupts(I32_bit);
  87. current_ipl_level = new;
  88. i80321intc_write_intctl(i80321intc_imask[new]);
  89. restore_interrupts(psw);
  90. }
  91. struct intrq i80321_handler[NIRQ];
  92. /*
  93. * Recompute the irq mask bits.
  94. * Must be called with interrupts disabled.
  95. */
  96. void
  97. i80321intc_calc_mask(void)
  98. {
  99. int irq;
  100. struct intrhand *ih;
  101. int i;
  102. for (irq = 0; irq < NIRQ; irq++) {
  103. int i;
  104. int max = IPL_NONE;
  105. int min = IPL_HIGH;
  106. TAILQ_FOREACH(ih, &i80321_handler[irq].iq_list, ih_list) {
  107. if (ih->ih_ipl > max)
  108. max = ih->ih_ipl;
  109. if (ih->ih_ipl < min)
  110. min = ih->ih_ipl;
  111. }
  112. i80321_handler[irq].iq_irq = max;
  113. if (max == IPL_NONE)
  114. min = IPL_NONE; /* interrupt not enabled */
  115. #if 0
  116. printf("irq %d: min %x max %x\n", irq, min, max);
  117. #endif
  118. /* Enable interrupts at lower levels */
  119. for (i = 0; i < min; i++)
  120. i80321intc_imask[i] |= (1 << irq);
  121. /* Disable interrupts at upper levels */
  122. for (;i <= IPL_HIGH; i++)
  123. i80321intc_imask[i] &= ~(1 << irq);
  124. }
  125. /* initialize soft interrupt mask */
  126. for (i = IPL_NONE; i <= IPL_HIGH; i++) {
  127. i80321intc_smask[i] = 0;
  128. if (i < IPL_SOFT)
  129. i80321intc_smask[i] |= SI_TO_IRQBIT(SI_SOFT);
  130. if (i < IPL_SOFTCLOCK)
  131. i80321intc_smask[i] |= SI_TO_IRQBIT(SI_SOFTCLOCK);
  132. if (i < IPL_SOFTNET)
  133. i80321intc_smask[i] |= SI_TO_IRQBIT(SI_SOFTNET);
  134. if (i < IPL_SOFTTTY)
  135. i80321intc_smask[i] |= SI_TO_IRQBIT(SI_SOFTTTY);
  136. #if 0
  137. printf("mask[%d]: %x %x\n", i, i80321intc_smask[i],
  138. i80321intc_imask[i]);
  139. #endif
  140. }
  141. i80321intc_setipl(current_ipl_level);
  142. }
  143. void
  144. i80321intc_do_pending(void)
  145. {
  146. static int processing = 0;
  147. int oldirqstate, spl_save;
  148. oldirqstate = disable_interrupts(I32_bit);
  149. spl_save = current_ipl_level;
  150. if (processing == 1) {
  151. restore_interrupts(oldirqstate);
  152. return;
  153. }
  154. #define DO_SOFTINT(si, ipl) \
  155. if ((softint_pending & i80321intc_smask[current_ipl_level]) & \
  156. SI_TO_IRQBIT(si)) { \
  157. softint_pending &= ~SI_TO_IRQBIT(si); \
  158. if (current_ipl_level < ipl) \
  159. i80321intc_setipl(ipl); \
  160. restore_interrupts(oldirqstate); \
  161. softintr_dispatch(si); \
  162. oldirqstate = disable_interrupts(I32_bit); \
  163. i80321intc_setipl(spl_save); \
  164. }
  165. do {
  166. DO_SOFTINT(SI_SOFTTTY, IPL_SOFTTTY);
  167. DO_SOFTINT(SI_SOFTNET, IPL_SOFTNET);
  168. DO_SOFTINT(SI_SOFTCLOCK, IPL_SOFTCLOCK);
  169. DO_SOFTINT(SI_SOFT, IPL_SOFT);
  170. } while (softint_pending & i80321intc_smask[current_ipl_level]);
  171. processing = 0;
  172. restore_interrupts(oldirqstate);
  173. }
  174. void
  175. splx(int new)
  176. {
  177. i80321intc_setipl(new);
  178. if (softint_pending & i80321intc_smask[current_ipl_level])
  179. i80321intc_do_pending();
  180. }
  181. int
  182. _spllower(int new)
  183. {
  184. int old = current_ipl_level;
  185. splx(new);
  186. return (old);
  187. }
  188. int
  189. _splraise(int new)
  190. {
  191. int old;
  192. old = current_ipl_level;
  193. /*
  194. * setipl must always be called because there is a race window
  195. * where the variable is updated before the mask is set
  196. * an interrupt occurs in that window without the mask always
  197. * being set, the hardware might not get updated on the next
  198. * splraise completely messing up spl protection.
  199. */
  200. if (old > new)
  201. new = old;
  202. i80321intc_setipl(new);
  203. return (old);
  204. }
  205. void
  206. _setsoftintr(int si)
  207. {
  208. int oldirqstate;
  209. oldirqstate = disable_interrupts(I32_bit);
  210. softint_pending |= SI_TO_IRQBIT(si);
  211. restore_interrupts(oldirqstate);
  212. /* Process unmasked pending soft interrupts. */
  213. if (softint_pending & i80321intc_smask[current_ipl_level])
  214. i80321intc_do_pending();
  215. }
  216. /*
  217. * i80321_icu_init:
  218. *
  219. * Initialize the i80321 ICU. Called early in bootstrap
  220. * to make sure the ICU is in a pristine state.
  221. */
  222. void
  223. i80321intc_intr_init(void)
  224. {
  225. i80321intc_write_intctl(0);
  226. i80321intc_write_steer(0);
  227. }
  228. /*
  229. * i80321_intr_init:
  230. *
  231. * Initialize the rest of the interrupt subsystem, making it
  232. * ready to handle interrupts from devices.
  233. */
  234. void
  235. i80321intc_init(void)
  236. {
  237. struct intrq *iq;
  238. int i;
  239. for (i = 0; i < NIRQ; i++) {
  240. iq = &i80321_handler[i];
  241. TAILQ_INIT(&iq->iq_list);
  242. }
  243. i80321intc_calc_mask();
  244. /* Enable IRQs (don't yet use FIQs). */
  245. enable_interrupts(I32_bit);
  246. }
  247. void *
  248. i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg,
  249. const char *name)
  250. {
  251. struct intrq *iq;
  252. struct intrhand *ih;
  253. uint32_t oldirqstate;
  254. if (irq < 0 || irq > NIRQ)
  255. panic("i80321_intr_establish: IRQ %d out of range", irq);
  256. ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
  257. if (ih == NULL)
  258. return (NULL);
  259. ih->ih_func = func;
  260. ih->ih_arg = arg;
  261. ih->ih_ipl = ipl;
  262. ih->ih_name = name;
  263. ih->ih_irq = irq;
  264. iq = &i80321_handler[irq];
  265. if (name != NULL)
  266. evcount_attach(&ih->ih_count, name, &ih->ih_irq);
  267. /* All IOP321 interrupts are level-triggered. */
  268. iq->iq_ist = IST_LEVEL;
  269. oldirqstate = disable_interrupts(I32_bit);
  270. TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
  271. i80321intc_calc_mask();
  272. restore_interrupts(oldirqstate);
  273. return (ih);
  274. }
  275. void
  276. i80321_intr_disestablish(void *cookie)
  277. {
  278. struct intrhand *ih = cookie;
  279. struct intrq *iq = &i80321_handler[ih->ih_irq];
  280. int oldirqstate;
  281. oldirqstate = disable_interrupts(I32_bit);
  282. TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
  283. if (ih->ih_name != NULL)
  284. evcount_detach(&ih->ih_count);
  285. i80321intc_calc_mask();
  286. restore_interrupts(oldirqstate);
  287. }
  288. void
  289. i80321_irq_handler(void *arg)
  290. {
  291. struct clockframe *frame = arg;
  292. uint32_t hwpend;
  293. int irq;
  294. int saved_spl_level;
  295. struct intrhand *ih;
  296. saved_spl_level = current_ipl_level;
  297. /* get pending IRQs */
  298. hwpend = i80321intc_read_intsrc();
  299. while ((irq = find_first_bit(hwpend)) >= 0) {
  300. /* XXX: Should we handle IRQs in priority order? */
  301. /* raise spl to stop interrupts of lower priorities */
  302. if (saved_spl_level < i80321_handler[irq].iq_irq)
  303. i80321intc_setipl(i80321_handler[irq].iq_irq);
  304. /* Enable interrupt */
  305. enable_interrupts(I32_bit);
  306. TAILQ_FOREACH(ih, &i80321_handler[irq].iq_list, ih_list) {
  307. if ((ih->ih_func)( ih->ih_arg == 0
  308. ? frame : ih->ih_arg))
  309. ih->ih_count.ec_count++;
  310. }
  311. /* Disable interrupt */
  312. disable_interrupts(I32_bit);
  313. hwpend &= ~(1<<irq);
  314. }
  315. uvmexp.intrs++;
  316. /* restore spl to that was when this interrupt happen */
  317. i80321intc_setipl(saved_spl_level);
  318. if(softint_pending & i80321intc_smask[current_ipl_level])
  319. i80321intc_do_pending();
  320. }
  321. #ifdef DIAGNOSTIC
  322. void
  323. i80321_splassert_check(int wantipl, const char *func)
  324. {
  325. int oldipl = current_ipl_level;
  326. if (oldipl < wantipl) {
  327. splassert_fail(wantipl, oldipl, func);
  328. /*
  329. * If the splassert_ctl is set to not panic, raise the ipl
  330. * in a feeble attempt to reduce damage.
  331. */
  332. i80321intc_setipl(wantipl);
  333. }
  334. }
  335. #endif