time.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * vineetg: Jan 1011
  9. * -sched_clock( ) no longer jiffies based. Uses the same clocksource
  10. * as gtod
  11. *
  12. * Rajeshwarr/Vineetg: Mar 2008
  13. * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
  14. * for arch independent gettimeofday()
  15. * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
  16. *
  17. * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
  18. */
  19. /* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
  20. * Each can programmed to go from @count to @limit and optionally
  21. * interrupt when that happens.
  22. * A write to Control Register clears the Interrupt
  23. *
  24. * We've designated TIMER0 for events (clockevents)
  25. * while TIMER1 for free running (clocksource)
  26. *
  27. * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
  28. * which however is currently broken
  29. */
  30. #include <linux/interrupt.h>
  31. #include <linux/clk.h>
  32. #include <linux/clk-provider.h>
  33. #include <linux/clocksource.h>
  34. #include <linux/clockchips.h>
  35. #include <linux/cpu.h>
  36. #include <linux/of.h>
  37. #include <linux/of_irq.h>
  38. #include <asm/irq.h>
  39. #include <asm/arcregs.h>
  40. #include <asm/mcip.h>
  41. /* Timer related Aux registers */
  42. #define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
  43. #define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
  44. #define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
  45. #define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
  46. #define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
  47. #define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
  48. #define TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
  49. #define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
  50. #define ARC_TIMER_MAX 0xFFFFFFFF
  51. static unsigned long arc_timer_freq;
  52. static int noinline arc_get_timer_clk(struct device_node *node)
  53. {
  54. struct clk *clk;
  55. int ret;
  56. clk = of_clk_get(node, 0);
  57. if (IS_ERR(clk)) {
  58. pr_err("timer missing clk");
  59. return PTR_ERR(clk);
  60. }
  61. ret = clk_prepare_enable(clk);
  62. if (ret) {
  63. pr_err("Couldn't enable parent clk\n");
  64. return ret;
  65. }
  66. arc_timer_freq = clk_get_rate(clk);
  67. return 0;
  68. }
  69. /********** Clock Source Device *********/
  70. #ifdef CONFIG_ARC_HAS_GFRC
  71. static cycle_t arc_read_gfrc(struct clocksource *cs)
  72. {
  73. unsigned long flags;
  74. union {
  75. #ifdef CONFIG_CPU_BIG_ENDIAN
  76. struct { u32 h, l; };
  77. #else
  78. struct { u32 l, h; };
  79. #endif
  80. cycle_t full;
  81. } stamp;
  82. local_irq_save(flags);
  83. __mcip_cmd(CMD_GFRC_READ_LO, 0);
  84. stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
  85. __mcip_cmd(CMD_GFRC_READ_HI, 0);
  86. stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
  87. local_irq_restore(flags);
  88. return stamp.full;
  89. }
  90. static struct clocksource arc_counter_gfrc = {
  91. .name = "ARConnect GFRC",
  92. .rating = 400,
  93. .read = arc_read_gfrc,
  94. .mask = CLOCKSOURCE_MASK(64),
  95. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  96. };
  97. static int __init arc_cs_setup_gfrc(struct device_node *node)
  98. {
  99. int exists = cpuinfo_arc700[0].extn.gfrc;
  100. int ret;
  101. if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected"))
  102. return -ENXIO;
  103. ret = arc_get_timer_clk(node);
  104. if (ret)
  105. return ret;
  106. return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
  107. }
  108. CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
  109. #endif
  110. #ifdef CONFIG_ARC_HAS_RTC
  111. #define AUX_RTC_CTRL 0x103
  112. #define AUX_RTC_LOW 0x104
  113. #define AUX_RTC_HIGH 0x105
  114. static cycle_t arc_read_rtc(struct clocksource *cs)
  115. {
  116. unsigned long status;
  117. union {
  118. #ifdef CONFIG_CPU_BIG_ENDIAN
  119. struct { u32 high, low; };
  120. #else
  121. struct { u32 low, high; };
  122. #endif
  123. cycle_t full;
  124. } stamp;
  125. /*
  126. * hardware has an internal state machine which tracks readout of
  127. * low/high and updates the CTRL.status if
  128. * - interrupt/exception taken between the two reads
  129. * - high increments after low has been read
  130. */
  131. do {
  132. stamp.low = read_aux_reg(AUX_RTC_LOW);
  133. stamp.high = read_aux_reg(AUX_RTC_HIGH);
  134. status = read_aux_reg(AUX_RTC_CTRL);
  135. } while (!(status & _BITUL(31)));
  136. return stamp.full;
  137. }
  138. static struct clocksource arc_counter_rtc = {
  139. .name = "ARCv2 RTC",
  140. .rating = 350,
  141. .read = arc_read_rtc,
  142. .mask = CLOCKSOURCE_MASK(64),
  143. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  144. };
  145. static int __init arc_cs_setup_rtc(struct device_node *node)
  146. {
  147. int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc;
  148. int ret;
  149. if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected"))
  150. return -ENXIO;
  151. /* Local to CPU hence not usable in SMP */
  152. if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP"))
  153. return -EINVAL;
  154. ret = arc_get_timer_clk(node);
  155. if (ret)
  156. return ret;
  157. write_aux_reg(AUX_RTC_CTRL, 1);
  158. return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
  159. }
  160. CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
  161. #endif
  162. /*
  163. * 32bit TIMER1 to keep counting monotonically and wraparound
  164. */
  165. static cycle_t arc_read_timer1(struct clocksource *cs)
  166. {
  167. return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
  168. }
  169. static struct clocksource arc_counter_timer1 = {
  170. .name = "ARC Timer1",
  171. .rating = 300,
  172. .read = arc_read_timer1,
  173. .mask = CLOCKSOURCE_MASK(32),
  174. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  175. };
  176. static int __init arc_cs_setup_timer1(struct device_node *node)
  177. {
  178. int ret;
  179. /* Local to CPU hence not usable in SMP */
  180. if (IS_ENABLED(CONFIG_SMP))
  181. return -EINVAL;
  182. ret = arc_get_timer_clk(node);
  183. if (ret)
  184. return ret;
  185. write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
  186. write_aux_reg(ARC_REG_TIMER1_CNT, 0);
  187. write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
  188. return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
  189. }
  190. /********** Clock Event Device *********/
  191. static int arc_timer_irq;
  192. /*
  193. * Arm the timer to interrupt after @cycles
  194. * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
  195. */
  196. static void arc_timer_event_setup(unsigned int cycles)
  197. {
  198. write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
  199. write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
  200. write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
  201. }
  202. static int arc_clkevent_set_next_event(unsigned long delta,
  203. struct clock_event_device *dev)
  204. {
  205. arc_timer_event_setup(delta);
  206. return 0;
  207. }
  208. static int arc_clkevent_set_periodic(struct clock_event_device *dev)
  209. {
  210. /*
  211. * At X Hz, 1 sec = 1000ms -> X cycles;
  212. * 10ms -> X / 100 cycles
  213. */
  214. arc_timer_event_setup(arc_timer_freq / HZ);
  215. return 0;
  216. }
  217. static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
  218. .name = "ARC Timer0",
  219. .features = CLOCK_EVT_FEAT_ONESHOT |
  220. CLOCK_EVT_FEAT_PERIODIC,
  221. .rating = 300,
  222. .set_next_event = arc_clkevent_set_next_event,
  223. .set_state_periodic = arc_clkevent_set_periodic,
  224. };
  225. static irqreturn_t timer_irq_handler(int irq, void *dev_id)
  226. {
  227. /*
  228. * Note that generic IRQ core could have passed @evt for @dev_id if
  229. * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
  230. */
  231. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  232. int irq_reenable = clockevent_state_periodic(evt);
  233. /*
  234. * Any write to CTRL reg ACks the interrupt, we rewrite the
  235. * Count when [N]ot [H]alted bit.
  236. * And re-arm it if perioid by [I]nterrupt [E]nable bit
  237. */
  238. write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
  239. evt->event_handler(evt);
  240. return IRQ_HANDLED;
  241. }
  242. static int arc_timer_starting_cpu(unsigned int cpu)
  243. {
  244. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  245. evt->cpumask = cpumask_of(smp_processor_id());
  246. clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX);
  247. enable_percpu_irq(arc_timer_irq, 0);
  248. return 0;
  249. }
  250. static int arc_timer_dying_cpu(unsigned int cpu)
  251. {
  252. disable_percpu_irq(arc_timer_irq);
  253. return 0;
  254. }
  255. /*
  256. * clockevent setup for boot CPU
  257. */
  258. static int __init arc_clockevent_setup(struct device_node *node)
  259. {
  260. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  261. int ret;
  262. arc_timer_irq = irq_of_parse_and_map(node, 0);
  263. if (arc_timer_irq <= 0) {
  264. pr_err("clockevent: missing irq");
  265. return -EINVAL;
  266. }
  267. ret = arc_get_timer_clk(node);
  268. if (ret) {
  269. pr_err("clockevent: missing clk");
  270. return ret;
  271. }
  272. /* Needs apriori irq_set_percpu_devid() done in intc map function */
  273. ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
  274. "Timer0 (per-cpu-tick)", evt);
  275. if (ret) {
  276. pr_err("clockevent: unable to request irq\n");
  277. return ret;
  278. }
  279. ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
  280. "AP_ARC_TIMER_STARTING",
  281. arc_timer_starting_cpu,
  282. arc_timer_dying_cpu);
  283. if (ret) {
  284. pr_err("Failed to setup hotplug state");
  285. return ret;
  286. }
  287. return 0;
  288. }
  289. static int __init arc_of_timer_init(struct device_node *np)
  290. {
  291. static int init_count = 0;
  292. int ret;
  293. if (!init_count) {
  294. init_count = 1;
  295. ret = arc_clockevent_setup(np);
  296. } else {
  297. ret = arc_cs_setup_timer1(np);
  298. }
  299. return ret;
  300. }
  301. CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
  302. /*
  303. * Called from start_kernel() - boot CPU only
  304. */
  305. void __init time_init(void)
  306. {
  307. of_clk_init(NULL);
  308. clocksource_probe();
  309. }