msp_irq_per.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
  3. *
  4. * This file define the irq handler for MSP PER subsystem interrupts.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/bitops.h>
  16. #include <asm/mipsregs.h>
  17. #include <msp_cic_int.h>
  18. #include <msp_regs.h>
  19. /*
  20. * Convenience Macro. Should be somewhere generic.
  21. */
  22. #define get_current_vpe() \
  23. ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
  24. #ifdef CONFIG_SMP
  25. /*
  26. * The PER registers must be protected from concurrent access.
  27. */
  28. static DEFINE_SPINLOCK(per_lock);
  29. #endif
  30. /* ensure writes to per are completed */
  31. static inline void per_wmb(void)
  32. {
  33. const volatile void __iomem *per_mem = PER_INT_MSK_REG;
  34. volatile u32 dummy_read;
  35. wmb();
  36. dummy_read = __raw_readl(per_mem);
  37. dummy_read++;
  38. }
  39. static inline void unmask_per_irq(struct irq_data *d)
  40. {
  41. #ifdef CONFIG_SMP
  42. unsigned long flags;
  43. spin_lock_irqsave(&per_lock, flags);
  44. *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
  45. spin_unlock_irqrestore(&per_lock, flags);
  46. #else
  47. *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
  48. #endif
  49. per_wmb();
  50. }
  51. static inline void mask_per_irq(struct irq_data *d)
  52. {
  53. #ifdef CONFIG_SMP
  54. unsigned long flags;
  55. spin_lock_irqsave(&per_lock, flags);
  56. *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
  57. spin_unlock_irqrestore(&per_lock, flags);
  58. #else
  59. *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
  60. #endif
  61. per_wmb();
  62. }
  63. static inline void msp_per_irq_ack(struct irq_data *d)
  64. {
  65. mask_per_irq(d);
  66. /*
  67. * In the PER interrupt controller, only bits 11 and 10
  68. * are write-to-clear, (SPI TX complete, SPI RX complete).
  69. * It does nothing for any others.
  70. */
  71. *PER_INT_STS_REG = (1 << (d->irq - MSP_PER_INTBASE));
  72. }
  73. #ifdef CONFIG_SMP
  74. static int msp_per_irq_set_affinity(struct irq_data *d,
  75. const struct cpumask *affinity, bool force)
  76. {
  77. /* WTF is this doing ????? */
  78. unmask_per_irq(d);
  79. return 0;
  80. }
  81. #endif
  82. static struct irq_chip msp_per_irq_controller = {
  83. .name = "MSP_PER",
  84. .irq_enable = unmask_per_irq,
  85. .irq_disable = mask_per_irq,
  86. .irq_ack = msp_per_irq_ack,
  87. #ifdef CONFIG_SMP
  88. .irq_set_affinity = msp_per_irq_set_affinity,
  89. #endif
  90. };
  91. void __init msp_per_irq_init(void)
  92. {
  93. int i;
  94. /* Mask/clear interrupts. */
  95. *PER_INT_MSK_REG = 0x00000000;
  96. *PER_INT_STS_REG = 0xFFFFFFFF;
  97. /* initialize all the IRQ descriptors */
  98. for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) {
  99. irq_set_chip(i, &msp_per_irq_controller);
  100. }
  101. }
  102. void msp_per_irq_dispatch(void)
  103. {
  104. u32 per_mask = *PER_INT_MSK_REG;
  105. u32 per_status = *PER_INT_STS_REG;
  106. u32 pending;
  107. pending = per_status & per_mask;
  108. if (pending) {
  109. do_IRQ(ffs(pending) + MSP_PER_INTBASE - 1);
  110. } else {
  111. spurious_interrupt();
  112. }
  113. }