cevt-r4k.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2007 MIPS Technologies, Inc.
  7. * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
  8. */
  9. #include <linux/clockchips.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/percpu.h>
  12. #include <linux/smp.h>
  13. #include <linux/irq.h>
  14. #include <asm/time.h>
  15. #include <asm/cevt-r4k.h>
  16. static int mips_next_event(unsigned long delta,
  17. struct clock_event_device *evt)
  18. {
  19. unsigned int cnt;
  20. int res;
  21. cnt = read_c0_count();
  22. cnt += delta;
  23. write_c0_compare(cnt);
  24. res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
  25. return res;
  26. }
  27. void mips_set_clock_mode(enum clock_event_mode mode,
  28. struct clock_event_device *evt)
  29. {
  30. /* Nothing to do ... */
  31. }
  32. DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
  33. int cp0_timer_irq_installed;
  34. /*
  35. * Possibly handle a performance counter interrupt.
  36. * Return true if the timer interrupt should not be checked
  37. */
  38. static inline int handle_perf_irq(int r2)
  39. {
  40. /*
  41. * The performance counter overflow interrupt may be shared with the
  42. * timer interrupt (cp0_perfcount_irq < 0). If it is and a
  43. * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
  44. * and we can't reliably determine if a counter interrupt has also
  45. * happened (!r2) then don't check for a timer interrupt.
  46. */
  47. return (cp0_perfcount_irq < 0) &&
  48. perf_irq() == IRQ_HANDLED &&
  49. !r2;
  50. }
  51. irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
  52. {
  53. const int r2 = cpu_has_mips_r2_r6;
  54. struct clock_event_device *cd;
  55. int cpu = smp_processor_id();
  56. /*
  57. * Suckage alert:
  58. * Before R2 of the architecture there was no way to see if a
  59. * performance counter interrupt was pending, so we have to run
  60. * the performance counter interrupt handler anyway.
  61. */
  62. if (handle_perf_irq(r2))
  63. return IRQ_HANDLED;
  64. /*
  65. * The same applies to performance counter interrupts. But with the
  66. * above we now know that the reason we got here must be a timer
  67. * interrupt. Being the paranoiacs we are we check anyway.
  68. */
  69. if (!r2 || (read_c0_cause() & CAUSEF_TI)) {
  70. /* Clear Count/Compare Interrupt */
  71. write_c0_compare(read_c0_compare());
  72. cd = &per_cpu(mips_clockevent_device, cpu);
  73. cd->event_handler(cd);
  74. return IRQ_HANDLED;
  75. }
  76. return IRQ_NONE;
  77. }
  78. struct irqaction c0_compare_irqaction = {
  79. .handler = c0_compare_interrupt,
  80. /*
  81. * IRQF_SHARED: The timer interrupt may be shared with other interrupts
  82. * such as perf counter and FDC interrupts.
  83. */
  84. .flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED,
  85. .name = "timer",
  86. };
  87. void mips_event_handler(struct clock_event_device *dev)
  88. {
  89. }
  90. /*
  91. * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
  92. */
  93. static int c0_compare_int_pending(void)
  94. {
  95. /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
  96. return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
  97. }
  98. /*
  99. * Compare interrupt can be routed and latched outside the core,
  100. * so wait up to worst case number of cycle counter ticks for timer interrupt
  101. * changes to propagate to the cause register.
  102. */
  103. #define COMPARE_INT_SEEN_TICKS 50
  104. int c0_compare_int_usable(void)
  105. {
  106. unsigned int delta;
  107. unsigned int cnt;
  108. #ifdef CONFIG_KVM_GUEST
  109. return 1;
  110. #endif
  111. /*
  112. * IP7 already pending? Try to clear it by acking the timer.
  113. */
  114. if (c0_compare_int_pending()) {
  115. cnt = read_c0_count();
  116. write_c0_compare(cnt);
  117. back_to_back_c0_hazard();
  118. while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
  119. if (!c0_compare_int_pending())
  120. break;
  121. if (c0_compare_int_pending())
  122. return 0;
  123. }
  124. for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
  125. cnt = read_c0_count();
  126. cnt += delta;
  127. write_c0_compare(cnt);
  128. back_to_back_c0_hazard();
  129. if ((int)(read_c0_count() - cnt) < 0)
  130. break;
  131. /* increase delta if the timer was already expired */
  132. }
  133. while ((int)(read_c0_count() - cnt) <= 0)
  134. ; /* Wait for expiry */
  135. while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
  136. if (c0_compare_int_pending())
  137. break;
  138. if (!c0_compare_int_pending())
  139. return 0;
  140. cnt = read_c0_count();
  141. write_c0_compare(cnt);
  142. back_to_back_c0_hazard();
  143. while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
  144. if (!c0_compare_int_pending())
  145. break;
  146. if (c0_compare_int_pending())
  147. return 0;
  148. /*
  149. * Feels like a real count / compare timer.
  150. */
  151. return 1;
  152. }
  153. int r4k_clockevent_init(void)
  154. {
  155. unsigned int cpu = smp_processor_id();
  156. struct clock_event_device *cd;
  157. unsigned int irq;
  158. if (!cpu_has_counter || !mips_hpt_frequency)
  159. return -ENXIO;
  160. if (!c0_compare_int_usable())
  161. return -ENXIO;
  162. /*
  163. * With vectored interrupts things are getting platform specific.
  164. * get_c0_compare_int is a hook to allow a platform to return the
  165. * interrupt number of it's liking.
  166. */
  167. irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
  168. if (get_c0_compare_int)
  169. irq = get_c0_compare_int();
  170. cd = &per_cpu(mips_clockevent_device, cpu);
  171. cd->name = "MIPS";
  172. cd->features = CLOCK_EVT_FEAT_ONESHOT |
  173. CLOCK_EVT_FEAT_C3STOP |
  174. CLOCK_EVT_FEAT_PERCPU;
  175. clockevent_set_clock(cd, mips_hpt_frequency);
  176. /* Calculate the min / max delta */
  177. cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
  178. cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
  179. cd->rating = 300;
  180. cd->irq = irq;
  181. cd->cpumask = cpumask_of(cpu);
  182. cd->set_next_event = mips_next_event;
  183. cd->set_mode = mips_set_clock_mode;
  184. cd->event_handler = mips_event_handler;
  185. clockevents_register_device(cd);
  186. if (cp0_timer_irq_installed)
  187. return 0;
  188. cp0_timer_irq_installed = 1;
  189. setup_irq(irq, &c0_compare_irqaction);
  190. return 0;
  191. }