irq.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/irq.h>
  18. #include <linux/kernel_stat.h>
  19. #include <linux/uaccess.h>
  20. #include <hv/drv_pcie_rc_intf.h>
  21. #include <arch/spr_def.h>
  22. #include <asm/traps.h>
  23. #include <linux/perf_event.h>
  24. /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */
  25. #define IS_HW_CLEARED 1
  26. /*
  27. * The set of interrupts we enable for arch_local_irq_enable().
  28. * This is initialized to have just a single interrupt that the kernel
  29. * doesn't actually use as a sentinel. During kernel init,
  30. * interrupts are added as the kernel gets prepared to support them.
  31. * NOTE: we could probably initialize them all statically up front.
  32. */
  33. DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) =
  34. INITIAL_INTERRUPTS_ENABLED;
  35. EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask);
  36. /* Define per-tile device interrupt statistics state. */
  37. DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
  38. EXPORT_PER_CPU_SYMBOL(irq_stat);
  39. /*
  40. * Define per-tile irq disable mask; the hardware/HV only has a single
  41. * mask that we use to implement both masking and disabling.
  42. */
  43. static DEFINE_PER_CPU(unsigned long, irq_disable_mask)
  44. ____cacheline_internodealigned_in_smp;
  45. /*
  46. * Per-tile IRQ nesting depth. Used to make sure we enable newly
  47. * enabled IRQs before exiting the outermost interrupt.
  48. */
  49. static DEFINE_PER_CPU(int, irq_depth);
  50. #if CHIP_HAS_IPI()
  51. /* Use SPRs to manipulate device interrupts. */
  52. #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
  53. #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
  54. #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
  55. #else
  56. /* Use HV to manipulate device interrupts. */
  57. #define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
  58. #define unmask_irqs(irq_mask) hv_enable_intr(irq_mask)
  59. #define clear_irqs(irq_mask) hv_clear_intr(irq_mask)
  60. #endif
  61. /*
  62. * The interrupt handling path, implemented in terms of HV interrupt
  63. * emulation on TILEPro, and IPI hardware on TILE-Gx.
  64. * Entered with interrupts disabled.
  65. */
  66. void tile_dev_intr(struct pt_regs *regs, int intnum)
  67. {
  68. int depth = __this_cpu_inc_return(irq_depth);
  69. unsigned long original_irqs;
  70. unsigned long remaining_irqs;
  71. struct pt_regs *old_regs;
  72. #if CHIP_HAS_IPI()
  73. /*
  74. * Pending interrupts are listed in an SPR. We might be
  75. * nested, so be sure to only handle irqs that weren't already
  76. * masked by a previous interrupt. Then, mask out the ones
  77. * we're going to handle.
  78. */
  79. unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
  80. original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
  81. __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
  82. #else
  83. /*
  84. * Hypervisor performs the equivalent of the Gx code above and
  85. * then puts the pending interrupt mask into a system save reg
  86. * for us to find.
  87. */
  88. original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
  89. #endif
  90. remaining_irqs = original_irqs;
  91. /* Track time spent here in an interrupt context. */
  92. old_regs = set_irq_regs(regs);
  93. irq_enter();
  94. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  95. /* Debugging check for stack overflow: less than 1/8th stack free? */
  96. {
  97. long sp = stack_pointer - (long) current_thread_info();
  98. if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
  99. pr_emerg("%s: stack overflow: %ld\n",
  100. __func__, sp - sizeof(struct thread_info));
  101. dump_stack();
  102. }
  103. }
  104. #endif
  105. while (remaining_irqs) {
  106. unsigned long irq = __ffs(remaining_irqs);
  107. remaining_irqs &= ~(1UL << irq);
  108. /* Count device irqs; Linux IPIs are counted elsewhere. */
  109. if (irq != IRQ_RESCHEDULE)
  110. __this_cpu_inc(irq_stat.irq_dev_intr_count);
  111. generic_handle_irq(irq);
  112. }
  113. /*
  114. * If we weren't nested, turn on all enabled interrupts,
  115. * including any that were reenabled during interrupt
  116. * handling.
  117. */
  118. if (depth == 1)
  119. unmask_irqs(~__this_cpu_read(irq_disable_mask));
  120. __this_cpu_dec(irq_depth);
  121. /*
  122. * Track time spent against the current process again and
  123. * process any softirqs if they are waiting.
  124. */
  125. irq_exit();
  126. set_irq_regs(old_regs);
  127. }
  128. /*
  129. * Remove an irq from the disabled mask. If we're in an interrupt
  130. * context, defer enabling the HW interrupt until we leave.
  131. */
  132. static void tile_irq_chip_enable(struct irq_data *d)
  133. {
  134. get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
  135. if (__this_cpu_read(irq_depth) == 0)
  136. unmask_irqs(1UL << d->irq);
  137. put_cpu_var(irq_disable_mask);
  138. }
  139. /*
  140. * Add an irq to the disabled mask. We disable the HW interrupt
  141. * immediately so that there's no possibility of it firing. If we're
  142. * in an interrupt context, the return path is careful to avoid
  143. * unmasking a newly disabled interrupt.
  144. */
  145. static void tile_irq_chip_disable(struct irq_data *d)
  146. {
  147. get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
  148. mask_irqs(1UL << d->irq);
  149. put_cpu_var(irq_disable_mask);
  150. }
  151. /* Mask an interrupt. */
  152. static void tile_irq_chip_mask(struct irq_data *d)
  153. {
  154. mask_irqs(1UL << d->irq);
  155. }
  156. /* Unmask an interrupt. */
  157. static void tile_irq_chip_unmask(struct irq_data *d)
  158. {
  159. unmask_irqs(1UL << d->irq);
  160. }
  161. /*
  162. * Clear an interrupt before processing it so that any new assertions
  163. * will trigger another irq.
  164. */
  165. static void tile_irq_chip_ack(struct irq_data *d)
  166. {
  167. if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED)
  168. clear_irqs(1UL << d->irq);
  169. }
  170. /*
  171. * For per-cpu interrupts, we need to avoid unmasking any interrupts
  172. * that we disabled via disable_percpu_irq().
  173. */
  174. static void tile_irq_chip_eoi(struct irq_data *d)
  175. {
  176. if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq)))
  177. unmask_irqs(1UL << d->irq);
  178. }
  179. static struct irq_chip tile_irq_chip = {
  180. .name = "tile_irq_chip",
  181. .irq_enable = tile_irq_chip_enable,
  182. .irq_disable = tile_irq_chip_disable,
  183. .irq_ack = tile_irq_chip_ack,
  184. .irq_eoi = tile_irq_chip_eoi,
  185. .irq_mask = tile_irq_chip_mask,
  186. .irq_unmask = tile_irq_chip_unmask,
  187. };
  188. void __init init_IRQ(void)
  189. {
  190. ipi_init();
  191. }
  192. void setup_irq_regs(void)
  193. {
  194. /* Enable interrupt delivery. */
  195. unmask_irqs(~0UL);
  196. #if CHIP_HAS_IPI()
  197. arch_local_irq_unmask(INT_IPI_K);
  198. #endif
  199. }
  200. void tile_irq_activate(unsigned int irq, int tile_irq_type)
  201. {
  202. /*
  203. * We use handle_level_irq() by default because the pending
  204. * interrupt vector (whether modeled by the HV on
  205. * TILEPro or implemented in hardware on TILE-Gx) has
  206. * level-style semantics for each bit. An interrupt fires
  207. * whenever a bit is high, not just at edges.
  208. */
  209. irq_flow_handler_t handle = handle_level_irq;
  210. if (tile_irq_type == TILE_IRQ_PERCPU)
  211. handle = handle_percpu_irq;
  212. irq_set_chip_and_handler(irq, &tile_irq_chip, handle);
  213. /*
  214. * Flag interrupts that are hardware-cleared so that ack()
  215. * won't clear them.
  216. */
  217. if (tile_irq_type == TILE_IRQ_HW_CLEAR)
  218. irq_set_chip_data(irq, (void *)IS_HW_CLEARED);
  219. }
  220. EXPORT_SYMBOL(tile_irq_activate);
  221. void ack_bad_irq(unsigned int irq)
  222. {
  223. pr_err("unexpected IRQ trap at vector %02x\n", irq);
  224. }
  225. /*
  226. * /proc/interrupts printing:
  227. */
  228. int arch_show_interrupts(struct seq_file *p, int prec)
  229. {
  230. #ifdef CONFIG_PERF_EVENTS
  231. int i;
  232. seq_printf(p, "%*s: ", prec, "PMI");
  233. for_each_online_cpu(i)
  234. seq_printf(p, "%10llu ", per_cpu(perf_irqs, i));
  235. seq_puts(p, " perf_events\n");
  236. #endif
  237. return 0;
  238. }
  239. #if CHIP_HAS_IPI()
  240. int arch_setup_hwirq(unsigned int irq, int node)
  241. {
  242. return irq >= NR_IRQS ? -EINVAL : 0;
  243. }
  244. void arch_teardown_hwirq(unsigned int irq) { }
  245. #endif