msp_irq_cic.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
  3. *
  4. * This file define the irq handler for MSP CIC subsystem interrupts.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel.h>
  14. #include <linux/bitops.h>
  15. #include <linux/irq.h>
  16. #include <asm/mipsregs.h>
  17. #include <msp_cic_int.h>
  18. #include <msp_regs.h>
  19. /*
  20. * External API
  21. */
  22. extern void msp_per_irq_init(void);
  23. extern void msp_per_irq_dispatch(void);
  24. /*
  25. * Convenience Macro. Should be somewhere generic.
  26. */
  27. #define get_current_vpe() \
  28. ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
  29. #ifdef CONFIG_SMP
  30. #define LOCK_VPE(flags, mtflags) \
  31. do { \
  32. local_irq_save(flags); \
  33. mtflags = dmt(); \
  34. } while (0)
  35. #define UNLOCK_VPE(flags, mtflags) \
  36. do { \
  37. emt(mtflags); \
  38. local_irq_restore(flags);\
  39. } while (0)
  40. #define LOCK_CORE(flags, mtflags) \
  41. do { \
  42. local_irq_save(flags); \
  43. mtflags = dvpe(); \
  44. } while (0)
  45. #define UNLOCK_CORE(flags, mtflags) \
  46. do { \
  47. evpe(mtflags); \
  48. local_irq_restore(flags);\
  49. } while (0)
  50. #else
  51. #define LOCK_VPE(flags, mtflags)
  52. #define UNLOCK_VPE(flags, mtflags)
  53. #endif
  54. /* ensure writes to cic are completed */
  55. static inline void cic_wmb(void)
  56. {
  57. const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG;
  58. volatile u32 dummy_read;
  59. wmb();
  60. dummy_read = __raw_readl(cic_mem);
  61. dummy_read++;
  62. }
  63. static void unmask_cic_irq(struct irq_data *d)
  64. {
  65. volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
  66. int vpe;
  67. #ifdef CONFIG_SMP
  68. unsigned int mtflags;
  69. unsigned long flags;
  70. /*
  71. * Make sure we have IRQ affinity. It may have changed while
  72. * we were processing the IRQ.
  73. */
  74. if (!cpumask_test_cpu(smp_processor_id(), d->affinity))
  75. return;
  76. #endif
  77. vpe = get_current_vpe();
  78. LOCK_VPE(flags, mtflags);
  79. cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
  80. UNLOCK_VPE(flags, mtflags);
  81. cic_wmb();
  82. }
  83. static void mask_cic_irq(struct irq_data *d)
  84. {
  85. volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
  86. int vpe = get_current_vpe();
  87. #ifdef CONFIG_SMP
  88. unsigned long flags, mtflags;
  89. #endif
  90. LOCK_VPE(flags, mtflags);
  91. cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
  92. UNLOCK_VPE(flags, mtflags);
  93. cic_wmb();
  94. }
  95. static void msp_cic_irq_ack(struct irq_data *d)
  96. {
  97. mask_cic_irq(d);
  98. /*
  99. * Only really necessary for 18, 16-14 and sometimes 3:0
  100. * (since these can be edge sensitive) but it doesn't
  101. * hurt for the others
  102. */
  103. *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
  104. }
  105. /* Note: Limiting to VSMP. */
  106. #ifdef CONFIG_MIPS_MT_SMP
  107. static int msp_cic_irq_set_affinity(struct irq_data *d,
  108. const struct cpumask *cpumask, bool force)
  109. {
  110. int cpu;
  111. unsigned long flags;
  112. unsigned int mtflags;
  113. unsigned long imask = (1 << (d->irq - MSP_CIC_INTBASE));
  114. volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
  115. /* timer balancing should be disabled in kernel code */
  116. BUG_ON(d->irq == MSP_INT_VPE0_TIMER || d->irq == MSP_INT_VPE1_TIMER);
  117. LOCK_CORE(flags, mtflags);
  118. /* enable if any of each VPE's TCs require this IRQ */
  119. for_each_online_cpu(cpu) {
  120. if (cpumask_test_cpu(cpu, cpumask))
  121. cic_mask[cpu] |= imask;
  122. else
  123. cic_mask[cpu] &= ~imask;
  124. }
  125. UNLOCK_CORE(flags, mtflags);
  126. return 0;
  127. }
  128. #endif
  129. static struct irq_chip msp_cic_irq_controller = {
  130. .name = "MSP_CIC",
  131. .irq_mask = mask_cic_irq,
  132. .irq_mask_ack = msp_cic_irq_ack,
  133. .irq_unmask = unmask_cic_irq,
  134. .irq_ack = msp_cic_irq_ack,
  135. #ifdef CONFIG_MIPS_MT_SMP
  136. .irq_set_affinity = msp_cic_irq_set_affinity,
  137. #endif
  138. };
  139. void __init msp_cic_irq_init(void)
  140. {
  141. int i;
  142. /* Mask/clear interrupts. */
  143. *CIC_VPE0_MSK_REG = 0x00000000;
  144. *CIC_VPE1_MSK_REG = 0x00000000;
  145. *CIC_STS_REG = 0xFFFFFFFF;
  146. /*
  147. * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
  148. * These inputs map to EXT_INT_POL[6:4] inside the CIC.
  149. * They are to be active low, level sensitive.
  150. */
  151. *CIC_EXT_CFG_REG &= 0xFFFF8F8F;
  152. /* initialize all the IRQ descriptors */
  153. for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
  154. irq_set_chip_and_handler(i, &msp_cic_irq_controller,
  155. handle_level_irq);
  156. }
  157. /* Initialize the PER interrupt sub-system */
  158. msp_per_irq_init();
  159. }
  160. /* CIC masked by CIC vector processing before dispatch called */
  161. void msp_cic_irq_dispatch(void)
  162. {
  163. volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG;
  164. u32 cic_mask;
  165. u32 pending;
  166. int cic_status = *CIC_STS_REG;
  167. cic_mask = cic_msk_reg[get_current_vpe()];
  168. pending = cic_status & cic_mask;
  169. if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) {
  170. do_IRQ(MSP_INT_VPE0_TIMER);
  171. } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) {
  172. do_IRQ(MSP_INT_VPE1_TIMER);
  173. } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
  174. msp_per_irq_dispatch();
  175. } else if (pending) {
  176. do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
  177. } else{
  178. spurious_interrupt();
  179. }
  180. }