msp_irq_cic.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /*
  2. * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
  3. *
  4. * This file define the irq handler for MSP CIC subsystem interrupts.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel.h>
  14. #include <linux/bitops.h>
  15. #include <linux/irq.h>
  16. #include <asm/mipsregs.h>
  17. #include <msp_cic_int.h>
  18. #include <msp_regs.h>
  19. /*
  20. * External API
  21. */
  22. extern void msp_per_irq_init(void);
  23. extern void msp_per_irq_dispatch(void);
  24. /*
  25. * Convenience Macro. Should be somewhere generic.
  26. */
  27. #define get_current_vpe() \
  28. ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
  29. #ifdef CONFIG_SMP
  30. #define LOCK_VPE(flags, mtflags) \
  31. do { \
  32. local_irq_save(flags); \
  33. mtflags = dmt(); \
  34. } while (0)
  35. #define UNLOCK_VPE(flags, mtflags) \
  36. do { \
  37. emt(mtflags); \
  38. local_irq_restore(flags);\
  39. } while (0)
  40. #define LOCK_CORE(flags, mtflags) \
  41. do { \
  42. local_irq_save(flags); \
  43. mtflags = dvpe(); \
  44. } while (0)
  45. #define UNLOCK_CORE(flags, mtflags) \
  46. do { \
  47. evpe(mtflags); \
  48. local_irq_restore(flags);\
  49. } while (0)
  50. #else
  51. #define LOCK_VPE(flags, mtflags)
  52. #define UNLOCK_VPE(flags, mtflags)
  53. #endif
  54. /* ensure writes to cic are completed */
  55. static inline void cic_wmb(void)
  56. {
  57. const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG;
  58. volatile u32 dummy_read;
  59. wmb();
  60. dummy_read = __raw_readl(cic_mem);
  61. dummy_read++;
  62. }
  63. static void unmask_cic_irq(struct irq_data *d)
  64. {
  65. volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
  66. int vpe;
  67. #ifdef CONFIG_SMP
  68. unsigned int mtflags;
  69. unsigned long flags;
  70. /*
  71. * Make sure we have IRQ affinity. It may have changed while
  72. * we were processing the IRQ.
  73. */
  74. if (!cpumask_test_cpu(smp_processor_id(),
  75. irq_data_get_affinity_mask(d)))
  76. return;
  77. #endif
  78. vpe = get_current_vpe();
  79. LOCK_VPE(flags, mtflags);
  80. cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
  81. UNLOCK_VPE(flags, mtflags);
  82. cic_wmb();
  83. }
  84. static void mask_cic_irq(struct irq_data *d)
  85. {
  86. volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
  87. int vpe = get_current_vpe();
  88. #ifdef CONFIG_SMP
  89. unsigned long flags, mtflags;
  90. #endif
  91. LOCK_VPE(flags, mtflags);
  92. cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
  93. UNLOCK_VPE(flags, mtflags);
  94. cic_wmb();
  95. }
  96. static void msp_cic_irq_ack(struct irq_data *d)
  97. {
  98. mask_cic_irq(d);
  99. /*
  100. * Only really necessary for 18, 16-14 and sometimes 3:0
  101. * (since these can be edge sensitive) but it doesn't
  102. * hurt for the others
  103. */
  104. *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
  105. }
  106. /* Note: Limiting to VSMP. */
  107. #ifdef CONFIG_MIPS_MT_SMP
  108. static int msp_cic_irq_set_affinity(struct irq_data *d,
  109. const struct cpumask *cpumask, bool force)
  110. {
  111. int cpu;
  112. unsigned long flags;
  113. unsigned int mtflags;
  114. unsigned long imask = (1 << (d->irq - MSP_CIC_INTBASE));
  115. volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
  116. /* timer balancing should be disabled in kernel code */
  117. BUG_ON(d->irq == MSP_INT_VPE0_TIMER || d->irq == MSP_INT_VPE1_TIMER);
  118. LOCK_CORE(flags, mtflags);
  119. /* enable if any of each VPE's TCs require this IRQ */
  120. for_each_online_cpu(cpu) {
  121. if (cpumask_test_cpu(cpu, cpumask))
  122. cic_mask[cpu] |= imask;
  123. else
  124. cic_mask[cpu] &= ~imask;
  125. }
  126. UNLOCK_CORE(flags, mtflags);
  127. return 0;
  128. }
  129. #endif
  130. static struct irq_chip msp_cic_irq_controller = {
  131. .name = "MSP_CIC",
  132. .irq_mask = mask_cic_irq,
  133. .irq_mask_ack = msp_cic_irq_ack,
  134. .irq_unmask = unmask_cic_irq,
  135. .irq_ack = msp_cic_irq_ack,
  136. #ifdef CONFIG_MIPS_MT_SMP
  137. .irq_set_affinity = msp_cic_irq_set_affinity,
  138. #endif
  139. };
  140. void __init msp_cic_irq_init(void)
  141. {
  142. int i;
  143. /* Mask/clear interrupts. */
  144. *CIC_VPE0_MSK_REG = 0x00000000;
  145. *CIC_VPE1_MSK_REG = 0x00000000;
  146. *CIC_STS_REG = 0xFFFFFFFF;
  147. /*
  148. * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
  149. * These inputs map to EXT_INT_POL[6:4] inside the CIC.
  150. * They are to be active low, level sensitive.
  151. */
  152. *CIC_EXT_CFG_REG &= 0xFFFF8F8F;
  153. /* initialize all the IRQ descriptors */
  154. for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
  155. irq_set_chip_and_handler(i, &msp_cic_irq_controller,
  156. handle_level_irq);
  157. }
  158. /* Initialize the PER interrupt sub-system */
  159. msp_per_irq_init();
  160. }
  161. /* CIC masked by CIC vector processing before dispatch called */
  162. void msp_cic_irq_dispatch(void)
  163. {
  164. volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG;
  165. u32 cic_mask;
  166. u32 pending;
  167. int cic_status = *CIC_STS_REG;
  168. cic_mask = cic_msk_reg[get_current_vpe()];
  169. pending = cic_status & cic_mask;
  170. if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) {
  171. do_IRQ(MSP_INT_VPE0_TIMER);
  172. } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) {
  173. do_IRQ(MSP_INT_VPE1_TIMER);
  174. } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
  175. msp_per_irq_dispatch();
  176. } else if (pending) {
  177. do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
  178. } else{
  179. spurious_interrupt();
  180. }
  181. }