i8259.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. /*
  2. * i8259 interrupt controller driver.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #undef DEBUG
  10. #include <linux/ioport.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/kernel.h>
  13. #include <linux/delay.h>
  14. #include <asm/io.h>
  15. #include <asm/i8259.h>
  16. #include <asm/prom.h>
  17. static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
  18. static unsigned char cached_8259[2] = { 0xff, 0xff };
  19. #define cached_A1 (cached_8259[0])
  20. #define cached_21 (cached_8259[1])
  21. static DEFINE_RAW_SPINLOCK(i8259_lock);
  22. static struct irq_domain *i8259_host;
  23. /*
  24. * Acknowledge the IRQ using either the PCI host bridge's interrupt
  25. * acknowledge feature or poll. How i8259_init() is called determines
  26. * which is called. It should be noted that polling is broken on some
  27. * IBM and Motorola PReP boxes so we must use the int-ack feature on them.
  28. */
  29. unsigned int i8259_irq(void)
  30. {
  31. int irq;
  32. int lock = 0;
  33. /* Either int-ack or poll for the IRQ */
  34. if (pci_intack)
  35. irq = readb(pci_intack);
  36. else {
  37. raw_spin_lock(&i8259_lock);
  38. lock = 1;
  39. /* Perform an interrupt acknowledge cycle on controller 1. */
  40. outb(0x0C, 0x20); /* prepare for poll */
  41. irq = inb(0x20) & 7;
  42. if (irq == 2 ) {
  43. /*
  44. * Interrupt is cascaded so perform interrupt
  45. * acknowledge on controller 2.
  46. */
  47. outb(0x0C, 0xA0); /* prepare for poll */
  48. irq = (inb(0xA0) & 7) + 8;
  49. }
  50. }
  51. if (irq == 7) {
  52. /*
  53. * This may be a spurious interrupt.
  54. *
  55. * Read the interrupt status register (ISR). If the most
  56. * significant bit is not set then there is no valid
  57. * interrupt.
  58. */
  59. if (!pci_intack)
  60. outb(0x0B, 0x20); /* ISR register */
  61. if(~inb(0x20) & 0x80)
  62. irq = 0;
  63. } else if (irq == 0xff)
  64. irq = 0;
  65. if (lock)
  66. raw_spin_unlock(&i8259_lock);
  67. return irq;
  68. }
  69. static void i8259_mask_and_ack_irq(struct irq_data *d)
  70. {
  71. unsigned long flags;
  72. raw_spin_lock_irqsave(&i8259_lock, flags);
  73. if (d->irq > 7) {
  74. cached_A1 |= 1 << (d->irq-8);
  75. inb(0xA1); /* DUMMY */
  76. outb(cached_A1, 0xA1);
  77. outb(0x20, 0xA0); /* Non-specific EOI */
  78. outb(0x20, 0x20); /* Non-specific EOI to cascade */
  79. } else {
  80. cached_21 |= 1 << d->irq;
  81. inb(0x21); /* DUMMY */
  82. outb(cached_21, 0x21);
  83. outb(0x20, 0x20); /* Non-specific EOI */
  84. }
  85. raw_spin_unlock_irqrestore(&i8259_lock, flags);
  86. }
  87. static void i8259_set_irq_mask(int irq_nr)
  88. {
  89. outb(cached_A1,0xA1);
  90. outb(cached_21,0x21);
  91. }
  92. static void i8259_mask_irq(struct irq_data *d)
  93. {
  94. unsigned long flags;
  95. pr_debug("i8259_mask_irq(%d)\n", d->irq);
  96. raw_spin_lock_irqsave(&i8259_lock, flags);
  97. if (d->irq < 8)
  98. cached_21 |= 1 << d->irq;
  99. else
  100. cached_A1 |= 1 << (d->irq-8);
  101. i8259_set_irq_mask(d->irq);
  102. raw_spin_unlock_irqrestore(&i8259_lock, flags);
  103. }
  104. static void i8259_unmask_irq(struct irq_data *d)
  105. {
  106. unsigned long flags;
  107. pr_debug("i8259_unmask_irq(%d)\n", d->irq);
  108. raw_spin_lock_irqsave(&i8259_lock, flags);
  109. if (d->irq < 8)
  110. cached_21 &= ~(1 << d->irq);
  111. else
  112. cached_A1 &= ~(1 << (d->irq-8));
  113. i8259_set_irq_mask(d->irq);
  114. raw_spin_unlock_irqrestore(&i8259_lock, flags);
  115. }
  116. static struct irq_chip i8259_pic = {
  117. .name = "i8259",
  118. .irq_mask = i8259_mask_irq,
  119. .irq_disable = i8259_mask_irq,
  120. .irq_unmask = i8259_unmask_irq,
  121. .irq_mask_ack = i8259_mask_and_ack_irq,
  122. };
  123. static struct resource pic1_iores = {
  124. .name = "8259 (master)",
  125. .start = 0x20,
  126. .end = 0x21,
  127. .flags = IORESOURCE_BUSY,
  128. };
  129. static struct resource pic2_iores = {
  130. .name = "8259 (slave)",
  131. .start = 0xa0,
  132. .end = 0xa1,
  133. .flags = IORESOURCE_BUSY,
  134. };
  135. static struct resource pic_edgectrl_iores = {
  136. .name = "8259 edge control",
  137. .start = 0x4d0,
  138. .end = 0x4d1,
  139. .flags = IORESOURCE_BUSY,
  140. };
  141. static int i8259_host_match(struct irq_domain *h, struct device_node *node,
  142. enum irq_domain_bus_token bus_token)
  143. {
  144. struct device_node *of_node = irq_domain_get_of_node(h);
  145. return of_node == NULL || of_node == node;
  146. }
  147. static int i8259_host_map(struct irq_domain *h, unsigned int virq,
  148. irq_hw_number_t hw)
  149. {
  150. pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw);
  151. /* We block the internal cascade */
  152. if (hw == 2)
  153. irq_set_status_flags(virq, IRQ_NOREQUEST);
  154. /* We use the level handler only for now, we might want to
  155. * be more cautious here but that works for now
  156. */
  157. irq_set_status_flags(virq, IRQ_LEVEL);
  158. irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq);
  159. return 0;
  160. }
  161. static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct,
  162. const u32 *intspec, unsigned int intsize,
  163. irq_hw_number_t *out_hwirq, unsigned int *out_flags)
  164. {
  165. static unsigned char map_isa_senses[4] = {
  166. IRQ_TYPE_LEVEL_LOW,
  167. IRQ_TYPE_LEVEL_HIGH,
  168. IRQ_TYPE_EDGE_FALLING,
  169. IRQ_TYPE_EDGE_RISING,
  170. };
  171. *out_hwirq = intspec[0];
  172. if (intsize > 1 && intspec[1] < 4)
  173. *out_flags = map_isa_senses[intspec[1]];
  174. else
  175. *out_flags = IRQ_TYPE_NONE;
  176. return 0;
  177. }
  178. static const struct irq_domain_ops i8259_host_ops = {
  179. .match = i8259_host_match,
  180. .map = i8259_host_map,
  181. .xlate = i8259_host_xlate,
  182. };
  183. struct irq_domain *i8259_get_host(void)
  184. {
  185. return i8259_host;
  186. }
  187. /**
  188. * i8259_init - Initialize the legacy controller
  189. * @node: device node of the legacy PIC (can be NULL, but then, it will match
  190. * all interrupts, so beware)
  191. * @intack_addr: PCI interrupt acknowledge (real) address which will return
  192. * the active irq from the 8259
  193. */
  194. void i8259_init(struct device_node *node, unsigned long intack_addr)
  195. {
  196. unsigned long flags;
  197. /* initialize the controller */
  198. raw_spin_lock_irqsave(&i8259_lock, flags);
  199. /* Mask all first */
  200. outb(0xff, 0xA1);
  201. outb(0xff, 0x21);
  202. /* init master interrupt controller */
  203. outb(0x11, 0x20); /* Start init sequence */
  204. outb(0x00, 0x21); /* Vector base */
  205. outb(0x04, 0x21); /* edge triggered, Cascade (slave) on IRQ2 */
  206. outb(0x01, 0x21); /* Select 8086 mode */
  207. /* init slave interrupt controller */
  208. outb(0x11, 0xA0); /* Start init sequence */
  209. outb(0x08, 0xA1); /* Vector base */
  210. outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
  211. outb(0x01, 0xA1); /* Select 8086 mode */
  212. /* That thing is slow */
  213. udelay(100);
  214. /* always read ISR */
  215. outb(0x0B, 0x20);
  216. outb(0x0B, 0xA0);
  217. /* Unmask the internal cascade */
  218. cached_21 &= ~(1 << 2);
  219. /* Set interrupt masks */
  220. outb(cached_A1, 0xA1);
  221. outb(cached_21, 0x21);
  222. raw_spin_unlock_irqrestore(&i8259_lock, flags);
  223. /* create a legacy host */
  224. i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL);
  225. if (i8259_host == NULL) {
  226. printk(KERN_ERR "i8259: failed to allocate irq host !\n");
  227. return;
  228. }
  229. /* reserve our resources */
  230. /* XXX should we continue doing that ? it seems to cause problems
  231. * with further requesting of PCI IO resources for that range...
  232. * need to look into it.
  233. */
  234. request_resource(&ioport_resource, &pic1_iores);
  235. request_resource(&ioport_resource, &pic2_iores);
  236. request_resource(&ioport_resource, &pic_edgectrl_iores);
  237. if (intack_addr != 0)
  238. pci_intack = ioremap(intack_addr, 1);
  239. printk(KERN_INFO "i8259 legacy interrupt controller initialized\n");
  240. }