mv64x60_pic.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. /*
  2. * Interrupt handling for Marvell mv64360/mv64460 host bridges (Discovery)
  3. *
  4. * Author: Dale Farnsworth <dale@farnsworth.org>
  5. *
  6. * 2007 (c) MontaVista, Software, Inc. This file is licensed under
  7. * the terms of the GNU General Public License version 2. This program
  8. * is licensed "as is" without any warranty of any kind, whether express
  9. * or implied.
  10. */
  11. #include <linux/stddef.h>
  12. #include <linux/kernel.h>
  13. #include <linux/init.h>
  14. #include <linux/irq.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/spinlock.h>
  17. #include <asm/byteorder.h>
  18. #include <asm/io.h>
  19. #include <asm/prom.h>
  20. #include <asm/irq.h>
  21. #include "mv64x60.h"
  22. /* Interrupt Controller Interface Registers */
  23. #define MV64X60_IC_MAIN_CAUSE_LO 0x0004
  24. #define MV64X60_IC_MAIN_CAUSE_HI 0x000c
  25. #define MV64X60_IC_CPU0_INTR_MASK_LO 0x0014
  26. #define MV64X60_IC_CPU0_INTR_MASK_HI 0x001c
  27. #define MV64X60_IC_CPU0_SELECT_CAUSE 0x0024
  28. #define MV64X60_HIGH_GPP_GROUPS 0x0f000000
  29. #define MV64X60_SELECT_CAUSE_HIGH 0x40000000
  30. /* General Purpose Pins Controller Interface Registers */
  31. #define MV64x60_GPP_INTR_CAUSE 0x0008
  32. #define MV64x60_GPP_INTR_MASK 0x000c
  33. #define MV64x60_LEVEL1_LOW 0
  34. #define MV64x60_LEVEL1_HIGH 1
  35. #define MV64x60_LEVEL1_GPP 2
  36. #define MV64x60_LEVEL1_MASK 0x00000060
  37. #define MV64x60_LEVEL1_OFFSET 5
  38. #define MV64x60_LEVEL2_MASK 0x0000001f
  39. #define MV64x60_NUM_IRQS 96
  40. static DEFINE_SPINLOCK(mv64x60_lock);
  41. static void __iomem *mv64x60_irq_reg_base;
  42. static void __iomem *mv64x60_gpp_reg_base;
  43. /*
  44. * Interrupt Controller Handling
  45. *
  46. * The interrupt controller handles three groups of interrupts:
  47. * main low: IRQ0-IRQ31
  48. * main high: IRQ32-IRQ63
  49. * gpp: IRQ64-IRQ95
  50. *
  51. * This code handles interrupts in two levels. Level 1 selects the
  52. * interrupt group, and level 2 selects an IRQ within that group.
  53. * Each group has its own irq_chip structure.
  54. */
  55. static u32 mv64x60_cached_low_mask;
  56. static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS;
  57. static u32 mv64x60_cached_gpp_mask;
  58. static struct irq_domain *mv64x60_irq_host;
  59. /*
  60. * mv64x60_chip_low functions
  61. */
  62. static void mv64x60_mask_low(struct irq_data *d)
  63. {
  64. int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
  65. unsigned long flags;
  66. spin_lock_irqsave(&mv64x60_lock, flags);
  67. mv64x60_cached_low_mask &= ~(1 << level2);
  68. out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
  69. mv64x60_cached_low_mask);
  70. spin_unlock_irqrestore(&mv64x60_lock, flags);
  71. (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO);
  72. }
  73. static void mv64x60_unmask_low(struct irq_data *d)
  74. {
  75. int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
  76. unsigned long flags;
  77. spin_lock_irqsave(&mv64x60_lock, flags);
  78. mv64x60_cached_low_mask |= 1 << level2;
  79. out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
  80. mv64x60_cached_low_mask);
  81. spin_unlock_irqrestore(&mv64x60_lock, flags);
  82. (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO);
  83. }
  84. static struct irq_chip mv64x60_chip_low = {
  85. .name = "mv64x60_low",
  86. .irq_mask = mv64x60_mask_low,
  87. .irq_mask_ack = mv64x60_mask_low,
  88. .irq_unmask = mv64x60_unmask_low,
  89. };
  90. /*
  91. * mv64x60_chip_high functions
  92. */
  93. static void mv64x60_mask_high(struct irq_data *d)
  94. {
  95. int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
  96. unsigned long flags;
  97. spin_lock_irqsave(&mv64x60_lock, flags);
  98. mv64x60_cached_high_mask &= ~(1 << level2);
  99. out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
  100. mv64x60_cached_high_mask);
  101. spin_unlock_irqrestore(&mv64x60_lock, flags);
  102. (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI);
  103. }
  104. static void mv64x60_unmask_high(struct irq_data *d)
  105. {
  106. int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
  107. unsigned long flags;
  108. spin_lock_irqsave(&mv64x60_lock, flags);
  109. mv64x60_cached_high_mask |= 1 << level2;
  110. out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
  111. mv64x60_cached_high_mask);
  112. spin_unlock_irqrestore(&mv64x60_lock, flags);
  113. (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI);
  114. }
  115. static struct irq_chip mv64x60_chip_high = {
  116. .name = "mv64x60_high",
  117. .irq_mask = mv64x60_mask_high,
  118. .irq_mask_ack = mv64x60_mask_high,
  119. .irq_unmask = mv64x60_unmask_high,
  120. };
  121. /*
  122. * mv64x60_chip_gpp functions
  123. */
  124. static void mv64x60_mask_gpp(struct irq_data *d)
  125. {
  126. int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
  127. unsigned long flags;
  128. spin_lock_irqsave(&mv64x60_lock, flags);
  129. mv64x60_cached_gpp_mask &= ~(1 << level2);
  130. out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
  131. mv64x60_cached_gpp_mask);
  132. spin_unlock_irqrestore(&mv64x60_lock, flags);
  133. (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK);
  134. }
  135. static void mv64x60_mask_ack_gpp(struct irq_data *d)
  136. {
  137. int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
  138. unsigned long flags;
  139. spin_lock_irqsave(&mv64x60_lock, flags);
  140. mv64x60_cached_gpp_mask &= ~(1 << level2);
  141. out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
  142. mv64x60_cached_gpp_mask);
  143. out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE,
  144. ~(1 << level2));
  145. spin_unlock_irqrestore(&mv64x60_lock, flags);
  146. (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE);
  147. }
  148. static void mv64x60_unmask_gpp(struct irq_data *d)
  149. {
  150. int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
  151. unsigned long flags;
  152. spin_lock_irqsave(&mv64x60_lock, flags);
  153. mv64x60_cached_gpp_mask |= 1 << level2;
  154. out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
  155. mv64x60_cached_gpp_mask);
  156. spin_unlock_irqrestore(&mv64x60_lock, flags);
  157. (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK);
  158. }
  159. static struct irq_chip mv64x60_chip_gpp = {
  160. .name = "mv64x60_gpp",
  161. .irq_mask = mv64x60_mask_gpp,
  162. .irq_mask_ack = mv64x60_mask_ack_gpp,
  163. .irq_unmask = mv64x60_unmask_gpp,
  164. };
  165. /*
  166. * mv64x60_host_ops functions
  167. */
  168. static struct irq_chip *mv64x60_chips[] = {
  169. [MV64x60_LEVEL1_LOW] = &mv64x60_chip_low,
  170. [MV64x60_LEVEL1_HIGH] = &mv64x60_chip_high,
  171. [MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp,
  172. };
  173. static int mv64x60_host_map(struct irq_domain *h, unsigned int virq,
  174. irq_hw_number_t hwirq)
  175. {
  176. int level1;
  177. irq_set_status_flags(virq, IRQ_LEVEL);
  178. level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET;
  179. BUG_ON(level1 > MV64x60_LEVEL1_GPP);
  180. irq_set_chip_and_handler(virq, mv64x60_chips[level1],
  181. handle_level_irq);
  182. return 0;
  183. }
  184. static const struct irq_domain_ops mv64x60_host_ops = {
  185. .map = mv64x60_host_map,
  186. };
  187. /*
  188. * Global functions
  189. */
  190. void __init mv64x60_init_irq(void)
  191. {
  192. struct device_node *np;
  193. phys_addr_t paddr;
  194. unsigned int size;
  195. const unsigned int *reg;
  196. unsigned long flags;
  197. np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp");
  198. reg = of_get_property(np, "reg", &size);
  199. paddr = of_translate_address(np, reg);
  200. mv64x60_gpp_reg_base = ioremap(paddr, reg[1]);
  201. of_node_put(np);
  202. np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-pic");
  203. reg = of_get_property(np, "reg", &size);
  204. paddr = of_translate_address(np, reg);
  205. mv64x60_irq_reg_base = ioremap(paddr, reg[1]);
  206. mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS,
  207. &mv64x60_host_ops, NULL);
  208. spin_lock_irqsave(&mv64x60_lock, flags);
  209. out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
  210. mv64x60_cached_gpp_mask);
  211. out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
  212. mv64x60_cached_low_mask);
  213. out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
  214. mv64x60_cached_high_mask);
  215. out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, 0);
  216. out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_LO, 0);
  217. out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_HI, 0);
  218. spin_unlock_irqrestore(&mv64x60_lock, flags);
  219. }
  220. unsigned int mv64x60_get_irq(void)
  221. {
  222. u32 cause;
  223. int level1;
  224. irq_hw_number_t hwirq;
  225. int virq = 0;
  226. cause = in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_SELECT_CAUSE);
  227. if (cause & MV64X60_SELECT_CAUSE_HIGH) {
  228. cause &= mv64x60_cached_high_mask;
  229. level1 = MV64x60_LEVEL1_HIGH;
  230. if (cause & MV64X60_HIGH_GPP_GROUPS) {
  231. cause = in_le32(mv64x60_gpp_reg_base +
  232. MV64x60_GPP_INTR_CAUSE);
  233. cause &= mv64x60_cached_gpp_mask;
  234. level1 = MV64x60_LEVEL1_GPP;
  235. }
  236. } else {
  237. cause &= mv64x60_cached_low_mask;
  238. level1 = MV64x60_LEVEL1_LOW;
  239. }
  240. if (cause) {
  241. hwirq = (level1 << MV64x60_LEVEL1_OFFSET) | __ilog2(cause);
  242. virq = irq_linear_revmap(mv64x60_irq_host, hwirq);
  243. }
  244. return virq;
  245. }