int.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540
  1. /* $OpenBSD: int.c,v 1.11 2014/09/30 06:51:58 jmatthew Exp $ */
  2. /* $NetBSD: int.c,v 1.24 2011/07/01 18:53:46 dyoung Exp $ */
  3. /*
  4. * Copyright (c) 2009 Stephen M. Rumble
  5. * Copyright (c) 2004 Christopher SEKIYA
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. The name of the author may not be used to endorse or promote products
  17. * derived from this software without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  20. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  21. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  22. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  23. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  24. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  28. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. */
  30. /*
  31. * INT2 (IP20, IP22) / INT3 (IP24) interrupt controllers
  32. */
  33. #include <sys/param.h>
  34. #include <sys/systm.h>
  35. #include <sys/kernel.h>
  36. #include <sys/device.h>
  37. #include <sys/malloc.h>
  38. #include <sys/proc.h>
  39. #include <sys/atomic.h>
  40. #include <mips64/archtype.h>
  41. #include <machine/autoconf.h>
  42. #include <machine/bus.h>
  43. #include <machine/cpu.h>
  44. #include <mips64/mips_cpu.h>
  45. #include <machine/intr.h>
  46. #include <dev/ic/i8253reg.h>
  47. #include <sgi/localbus/intreg.h>
  48. #include <sgi/localbus/intvar.h>
  49. #include <sgi/sgi/ip22.h>
  50. int int2_match(struct device *, void *, void *);
  51. void int2_attach(struct device *, struct device *, void *);
  52. int int2_mappable_intr(void *);
  53. const struct cfattach int_ca = {
  54. sizeof(struct device), int2_match, int2_attach
  55. };
  56. struct cfdriver int_cd = {
  57. NULL, "int", DV_DULL
  58. };
  59. paddr_t int2_base;
  60. paddr_t int2_get_base(void);
  61. #define int2_read(r) *(volatile uint8_t *)(int2_base + (r))
  62. #define int2_write(r, v) *(volatile uint8_t *)(int2_base + (r)) = (v)
  63. void int_8254_cal(void);
  64. void int_8254_startclock(struct cpu_info *);
  65. uint32_t int_8254_intr0(uint32_t, struct trap_frame *);
  66. /*
  67. * INT2 Interrupt handling declarations: 16 local sources on 2 levels.
  68. *
  69. * In addition to this, INT3 provides 8 so-called mappable interrupts, which
  70. * are cascaded to either one of the unused two INT2 VME interrupts.
  71. * To make things easier from a software viewpoint, we pretend there are
  72. * 16 of them - one set of 8 per cascaded interrupt. This allows for
  73. * faster recognition on where to connect these interrupts - as long as
  74. * interrupt vector assignment makes sure no mappable interrupt is
  75. * registered on both cascaded interrupts.
  76. */
  77. struct int2_intrhand {
  78. struct intrhand ih;
  79. uint32_t flags;
  80. #define IH_FL_DISABLED 0x01
  81. };
  82. #define INT2_NINTS (8 + 8 + 2 * 8)
  83. struct int2_intrhand *int2_intrhand[INT2_NINTS];
  84. uint32_t int2_intem;
  85. uint8_t int2_l0imask[NIPLS], int2_l1imask[NIPLS];
  86. void int2_splx(int);
  87. uint32_t int2_l0intr(uint32_t, struct trap_frame *);
  88. void int2_l0makemasks(void);
  89. uint32_t int2_l1intr(uint32_t, struct trap_frame *);
  90. void int2_l1makemasks(void);
  91. /*
  92. * Level 0 interrupt handler.
  93. */
  94. uint32_t save_l0imr, save_l0isr, save_l0ipl;
  95. #define INTR_FUNCTIONNAME int2_l0intr
  96. #define MASK_FUNCTIONNAME int2_l0makemasks
  97. #define INTR_LOCAL_DECLS
  98. #define MASK_LOCAL_DECLS
  99. #define INTR_GETMASKS \
  100. do { \
  101. isr = int2_read(INT2_LOCAL0_STATUS); \
  102. imr = int2_read(INT2_LOCAL0_MASK); \
  103. bit = 7; \
  104. save_l0isr = isr; save_l0imr = imr; save_l0ipl = frame->ipl; \
  105. } while (0)
  106. #define INTR_MASKPENDING \
  107. int2_write(INT2_LOCAL0_MASK, imr & ~isr)
  108. #define INTR_IMASK(ipl) int2_l0imask[ipl]
  109. #define INTR_HANDLER(bit) (struct intrhand *)int2_intrhand[bit + 0]
  110. #define INTR_SPURIOUS(bit) \
  111. do { \
  112. printf("spurious int2 interrupt %d\n", bit); \
  113. } while (0)
  114. /* explicit masking with int2_intem to cope with handlers disabling themselves */
  115. #define INTR_MASKRESTORE \
  116. int2_write(INT2_LOCAL0_MASK, int2_intem & imr)
  117. #define INTR_MASKSIZE 8
  118. #define INTR_HANDLER_SKIP(ih) \
  119. (((struct int2_intrhand *)(ih))->flags /* & IH_FL_DISABLED */)
  120. #include <sgi/sgi/intr_template.c>
  121. /*
  122. * Level 1 interrupt handler.
  123. */
  124. uint32_t save_l1imr, save_l1isr, save_l1ipl;
  125. #define INTR_FUNCTIONNAME int2_l1intr
  126. #define MASK_FUNCTIONNAME int2_l1makemasks
  127. #define INTR_LOCAL_DECLS
  128. #define MASK_LOCAL_DECLS
  129. #define INTR_GETMASKS \
  130. do { \
  131. isr = int2_read(INT2_LOCAL1_STATUS); \
  132. imr = int2_read(INT2_LOCAL1_MASK); \
  133. bit = 7; \
  134. save_l1isr = isr; save_l1imr = imr; save_l1ipl = frame->ipl; \
  135. } while (0)
  136. #define INTR_MASKPENDING \
  137. int2_write(INT2_LOCAL1_MASK, imr & ~isr)
  138. #define INTR_IMASK(ipl) int2_l1imask[ipl]
  139. #define INTR_HANDLER(bit) (struct intrhand *)int2_intrhand[bit + 8]
  140. #define INTR_SPURIOUS(bit) \
  141. do { \
  142. printf("spurious int2 interrupt %d\n", bit + 8); \
  143. } while (0)
  144. /* explicit masking with int2_intem to cope with handlers disabling themselves */
  145. #define INTR_MASKRESTORE \
  146. int2_write(INT2_LOCAL1_MASK, (int2_intem >> 8) & imr)
  147. #define INTR_MASKSIZE 8
  148. #define INTR_HANDLER_SKIP(ih) \
  149. (((struct int2_intrhand *)(ih))->flags /* & IH_FL_DISABLED */)
  150. #include <sgi/sgi/intr_template.c>
  151. void *
  152. int2_intr_establish(int irq, int level, int (*ih_fun) (void *),
  153. void *ih_arg, const char *ih_what)
  154. {
  155. struct int2_intrhand **p, *q, *ih;
  156. int s;
  157. #ifdef DIAGNOSTIC
  158. if (irq < 0 || irq >= INT2_NINTS)
  159. panic("int2_intr_establish: illegal irq %d", irq);
  160. /* Mappable interrupts can't be above IPL_TTY */
  161. if ((irq >> 3) >= 2 && level > IPL_TTY)
  162. return NULL;
  163. #endif
  164. ih = malloc(sizeof *ih, M_DEVBUF, M_NOWAIT);
  165. if (ih == NULL)
  166. return NULL;
  167. ih->ih.ih_next = NULL;
  168. ih->ih.ih_fun = ih_fun;
  169. ih->ih.ih_arg = ih_arg;
  170. ih->ih.ih_level = level;
  171. ih->ih.ih_irq = irq;
  172. if (ih_what != NULL)
  173. evcount_attach(&ih->ih.ih_count, ih_what, &ih->ih.ih_irq);
  174. ih->flags = 0;
  175. s = splhigh();
  176. for (p = &int2_intrhand[irq]; (q = *p) != NULL;
  177. p = (struct int2_intrhand **)&q->ih.ih_next)
  178. continue;
  179. *p = ih;
  180. int2_intem |= 1 << irq;
  181. switch (irq >> 3) {
  182. case 0:
  183. int2_l0makemasks();
  184. break;
  185. case 1:
  186. int2_l1makemasks();
  187. break;
  188. /*
  189. * We do not maintain masks for mappable interrupts. They are
  190. * masked as a whole, by the level 0 or 1 interrupt they cascade to.
  191. */
  192. case 2:
  193. int2_write(INT2_IP22_MAP_MASK0,
  194. int2_read(INT2_IP22_MAP_MASK0) | (1 << (irq & 7)));
  195. break;
  196. case 3:
  197. int2_write(INT2_IP22_MAP_MASK1,
  198. int2_read(INT2_IP22_MAP_MASK1) | (1 << (irq & 7)));
  199. break;
  200. }
  201. splx(s); /* will cause hardware mask update */
  202. return ih;
  203. }
  204. void
  205. int2_splx(int newipl)
  206. {
  207. struct cpu_info *ci = curcpu();
  208. register_t sr;
  209. __asm__ (".set noreorder");
  210. ci->ci_ipl = newipl;
  211. mips_sync();
  212. __asm__ (".set reorder\n");
  213. sr = disableintr(); /* XXX overkill? */
  214. int2_write(INT2_LOCAL1_MASK, (int2_intem >> 8) & ~int2_l1imask[newipl]);
  215. int2_write(INT2_LOCAL0_MASK, int2_intem & ~int2_l0imask[newipl]);
  216. setsr(sr);
  217. if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
  218. setsoftintr0();
  219. }
  220. /*
  221. * Mappable interrupts handler.
  222. */
  223. int
  224. int2_mappable_intr(void *arg)
  225. {
  226. uint which = (unsigned long)arg;
  227. vaddr_t imrreg;
  228. uint64_t imr, isr;
  229. uint i, intnum;
  230. struct int2_intrhand *ih;
  231. int rc, ret;
  232. imrreg = which == 0 ? INT2_IP22_MAP_MASK0 : INT2_IP22_MAP_MASK1;
  233. isr = int2_read(INT2_IP22_MAP_STATUS);
  234. imr = int2_read(imrreg);
  235. isr &= imr;
  236. if (isr == 0)
  237. return 0; /* not for us */
  238. /*
  239. * Don't bother masking sources here - all mappable interrupts are
  240. * tied to either a level 1 or level 0 interrupt, and the dispatcher
  241. * is registered at IPL_TTY, so we can safely assume we are running
  242. * at IPL_TTY now.
  243. */
  244. for (i = 0; i < 8; i++) {
  245. intnum = i + 16 + (which << 3);
  246. if (isr & (1 << i)) {
  247. rc = 0;
  248. for (ih = int2_intrhand[intnum]; ih != NULL;
  249. ih = (struct int2_intrhand *)ih->ih.ih_next) {
  250. if (ih->flags /* & IH_FL_DISABLED */)
  251. continue;
  252. ret = (*ih->ih.ih_fun)(ih->ih.ih_arg);
  253. if (ret != 0) {
  254. rc = 1;
  255. atomic_inc_long((unsigned long *)
  256. &ih->ih.ih_count.ec_count);
  257. }
  258. if (ret == 1)
  259. break;
  260. }
  261. if (rc == 0)
  262. printf("spurious int2 mapped interrupt %d\n",
  263. i);
  264. }
  265. }
  266. return 1;
  267. }
  268. int
  269. int2_match(struct device *parent, void *match, void *aux)
  270. {
  271. struct mainbus_attach_args *maa = (void *)aux;
  272. switch (sys_config.system_type) {
  273. case SGI_IP20:
  274. case SGI_IP22:
  275. case SGI_IP26:
  276. case SGI_IP28:
  277. break;
  278. default:
  279. return 0;
  280. }
  281. return !strcmp(maa->maa_name, int_cd.cd_name);
  282. }
  283. void
  284. int2_attach(struct device *parent, struct device *self, void *aux)
  285. {
  286. if (int2_base == 0)
  287. int2_base = int2_get_base();
  288. printf(" addr 0x%lx\n", XKPHYS_TO_PHYS(int2_base));
  289. /* Clean out interrupt masks */
  290. int2_write(INT2_LOCAL0_MASK, 0);
  291. int2_write(INT2_LOCAL1_MASK, 0);
  292. int2_write(INT2_IP22_MAP_MASK0, 0);
  293. int2_write(INT2_IP22_MAP_MASK1, 0);
  294. /* Reset timer interrupts */
  295. int2_write(INT2_TIMER_CONTROL,
  296. TIMER_SEL0 | TIMER_16BIT | TIMER_SWSTROBE);
  297. int2_write(INT2_TIMER_CONTROL,
  298. TIMER_SEL1 | TIMER_16BIT | TIMER_SWSTROBE);
  299. int2_write(INT2_TIMER_CONTROL,
  300. TIMER_SEL2 | TIMER_16BIT | TIMER_SWSTROBE);
  301. mips_sync();
  302. delay(4);
  303. int2_write(INT2_TIMER_CLEAR, 0x03);
  304. set_intr(INTPRI_L1, CR_INT_1, int2_l1intr);
  305. set_intr(INTPRI_L0, CR_INT_0, int2_l0intr);
  306. register_splx_handler(int2_splx);
  307. if (sys_config.system_type != SGI_IP20) {
  308. /* Wire mappable interrupt handlers */
  309. int2_intr_establish(INT2_L0_INTR(INT2_L0_IP22_MAP0), IPL_TTY,
  310. int2_mappable_intr, (void *)0, NULL);
  311. int2_intr_establish(INT2_L1_INTR(INT2_L1_IP22_MAP1), IPL_TTY,
  312. int2_mappable_intr, (void *)1, NULL);
  313. }
  314. /*
  315. * The 8254 timer does not interrupt on (some?) IP24 systems.
  316. */
  317. if (sys_config.system_type == SGI_IP20 ||
  318. sys_config.system_subtype == IP22_INDIGO2)
  319. int_8254_cal();
  320. }
  321. paddr_t
  322. int2_get_base(void)
  323. {
  324. uint32_t address;
  325. switch (sys_config.system_type) {
  326. case SGI_IP20:
  327. address = INT2_IP20;
  328. break;
  329. default:
  330. case SGI_IP22:
  331. case SGI_IP26:
  332. case SGI_IP28:
  333. if (sys_config.system_subtype == IP22_INDIGO2)
  334. address = INT2_IP22;
  335. else
  336. address = INT2_IP24;
  337. break;
  338. }
  339. return PHYS_TO_XKPHYS((uint64_t)address, CCA_NC);
  340. }
  341. /*
  342. * Returns nonzero if the given interrupt source is pending.
  343. */
  344. int
  345. int2_is_intr_pending(int irq)
  346. {
  347. paddr_t reg;
  348. if (int2_base == 0)
  349. int2_base = int2_get_base();
  350. switch (irq >> 3) {
  351. case 0:
  352. reg = INT2_LOCAL0_STATUS;
  353. break;
  354. case 1:
  355. reg = INT2_LOCAL1_STATUS;
  356. break;
  357. case 2:
  358. case 3:
  359. reg = INT2_IP22_MAP_STATUS;
  360. break;
  361. default:
  362. return 0;
  363. }
  364. return int2_read(reg) & (1 << (irq & 7));
  365. }
  366. /*
  367. * Temporarily disable an interrupt handler. Note that disable/enable
  368. * calls can not be stacked.
  369. *
  370. * The interrupt source will become masked if it is the only handler.
  371. * (This is intended for panel(4) which is not supposed to be a shared
  372. * interrupt)
  373. */
  374. void
  375. int2_intr_disable(void *v)
  376. {
  377. struct int2_intrhand *ih = (struct int2_intrhand *)v;
  378. int s;
  379. s = splhigh();
  380. if ((ih->flags & IH_FL_DISABLED) == 0) {
  381. ih->flags |= IH_FL_DISABLED;
  382. if (ih == int2_intrhand[ih->ih.ih_irq] &&
  383. ih->ih.ih_next == NULL) {
  384. /* disable interrupt source */
  385. int2_intem &= ~(1 << ih->ih.ih_irq);
  386. }
  387. }
  388. splx(s);
  389. }
  390. /*
  391. * Reenable an interrupt handler.
  392. */
  393. void
  394. int2_intr_enable(void *v)
  395. {
  396. struct int2_intrhand *ih = (struct int2_intrhand *)v;
  397. int s;
  398. s = splhigh();
  399. if ((ih->flags & IH_FL_DISABLED) != 0) {
  400. ih->flags &= ~IH_FL_DISABLED;
  401. if (ih == int2_intrhand[ih->ih.ih_irq] &&
  402. ih->ih.ih_next == NULL) {
  403. /* reenable interrupt source */
  404. int2_intem |= 1 << ih->ih.ih_irq;
  405. }
  406. }
  407. splx(s);
  408. }
  409. /*
  410. * A master clock is wired to TIMER_2, which in turn clocks the two other
  411. * timers. The master frequency is 1MHz.
  412. *
  413. * TIMER_0 and TIMER_1 interrupt on HW_INT_2 and HW_INT_3, respectively.
  414. *
  415. * NB: Apparently int2 doesn't like counting down from one, but two works.
  416. */
  417. static struct evcount int_clock_count;
  418. static int int_clock_irq = 2;
  419. void
  420. int_8254_cal(void)
  421. {
  422. uint freq = 1000000 / 2 / hz;
  423. /* Timer0 is our hz. */
  424. int2_write(INT2_TIMER_CONTROL,
  425. TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
  426. int2_write(INT2_TIMER_0, freq & 0xff);
  427. mips_sync();
  428. delay(4);
  429. int2_write(INT2_TIMER_0, freq >> 8);
  430. /* Timer2 clocks timer0 and timer1. */
  431. int2_write(INT2_TIMER_CONTROL,
  432. TIMER_SEL2 | TIMER_RATEGEN | TIMER_16BIT);
  433. int2_write(INT2_TIMER_2, 2);
  434. mips_sync();
  435. delay(4);
  436. int2_write(INT2_TIMER_2, 0);
  437. set_intr(INTPRI_CLOCK, CR_INT_2, int_8254_intr0);
  438. evcount_attach(&int_clock_count, "clock", &int_clock_irq);
  439. md_startclock = int_8254_startclock;
  440. }
  441. uint32_t
  442. int_8254_intr0(uint32_t hwpend, struct trap_frame *tf)
  443. {
  444. struct cpu_info *ci = curcpu();
  445. int2_write(INT2_TIMER_CLEAR, 0x01);
  446. ci->ci_pendingticks++;
  447. if (ci->ci_clock_started != 0) {
  448. if (tf->ipl < IPL_CLOCK) {
  449. while (ci->ci_pendingticks) {
  450. int_clock_count.ec_count++;
  451. hardclock(tf);
  452. ci->ci_pendingticks--;
  453. }
  454. }
  455. }
  456. return hwpend;
  457. }
  458. void
  459. int_8254_startclock(struct cpu_info *ci)
  460. {
  461. ci->ci_clock_started++;
  462. }