exynos_mct.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. /* linux/arch/arm/mach-exynos4/mct.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * http://www.samsung.com
  5. *
  6. * EXYNOS4 MCT(Multi-Core Timer) support
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irq.h>
  15. #include <linux/err.h>
  16. #include <linux/clk.h>
  17. #include <linux/clockchips.h>
  18. #include <linux/cpu.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/delay.h>
  21. #include <linux/percpu.h>
  22. #include <linux/of.h>
  23. #include <linux/of_irq.h>
  24. #include <linux/of_address.h>
  25. #include <linux/clocksource.h>
  26. #include <linux/sched_clock.h>
  27. #define EXYNOS4_MCTREG(x) (x)
  28. #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
  29. #define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104)
  30. #define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110)
  31. #define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200)
  32. #define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204)
  33. #define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208)
  34. #define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240)
  35. #define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244)
  36. #define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248)
  37. #define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C)
  38. #define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300)
  39. #define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x))
  40. #define EXYNOS4_MCT_L_MASK (0xffffff00)
  41. #define MCT_L_TCNTB_OFFSET (0x00)
  42. #define MCT_L_ICNTB_OFFSET (0x08)
  43. #define MCT_L_TCON_OFFSET (0x20)
  44. #define MCT_L_INT_CSTAT_OFFSET (0x30)
  45. #define MCT_L_INT_ENB_OFFSET (0x34)
  46. #define MCT_L_WSTAT_OFFSET (0x40)
  47. #define MCT_G_TCON_START (1 << 8)
  48. #define MCT_G_TCON_COMP0_AUTO_INC (1 << 1)
  49. #define MCT_G_TCON_COMP0_ENABLE (1 << 0)
  50. #define MCT_L_TCON_INTERVAL_MODE (1 << 2)
  51. #define MCT_L_TCON_INT_START (1 << 1)
  52. #define MCT_L_TCON_TIMER_START (1 << 0)
  53. #define TICK_BASE_CNT 1
  54. enum {
  55. MCT_INT_SPI,
  56. MCT_INT_PPI
  57. };
  58. enum {
  59. MCT_G0_IRQ,
  60. MCT_G1_IRQ,
  61. MCT_G2_IRQ,
  62. MCT_G3_IRQ,
  63. MCT_L0_IRQ,
  64. MCT_L1_IRQ,
  65. MCT_L2_IRQ,
  66. MCT_L3_IRQ,
  67. MCT_L4_IRQ,
  68. MCT_L5_IRQ,
  69. MCT_L6_IRQ,
  70. MCT_L7_IRQ,
  71. MCT_NR_IRQS,
  72. };
  73. static void __iomem *reg_base;
  74. static unsigned long clk_rate;
  75. static unsigned int mct_int_type;
  76. static int mct_irqs[MCT_NR_IRQS];
  77. struct mct_clock_event_device {
  78. struct clock_event_device evt;
  79. unsigned long base;
  80. char name[10];
  81. };
  82. static void exynos4_mct_write(unsigned int value, unsigned long offset)
  83. {
  84. unsigned long stat_addr;
  85. u32 mask;
  86. u32 i;
  87. writel_relaxed(value, reg_base + offset);
  88. if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
  89. stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
  90. switch (offset & ~EXYNOS4_MCT_L_MASK) {
  91. case MCT_L_TCON_OFFSET:
  92. mask = 1 << 3; /* L_TCON write status */
  93. break;
  94. case MCT_L_ICNTB_OFFSET:
  95. mask = 1 << 1; /* L_ICNTB write status */
  96. break;
  97. case MCT_L_TCNTB_OFFSET:
  98. mask = 1 << 0; /* L_TCNTB write status */
  99. break;
  100. default:
  101. return;
  102. }
  103. } else {
  104. switch (offset) {
  105. case EXYNOS4_MCT_G_TCON:
  106. stat_addr = EXYNOS4_MCT_G_WSTAT;
  107. mask = 1 << 16; /* G_TCON write status */
  108. break;
  109. case EXYNOS4_MCT_G_COMP0_L:
  110. stat_addr = EXYNOS4_MCT_G_WSTAT;
  111. mask = 1 << 0; /* G_COMP0_L write status */
  112. break;
  113. case EXYNOS4_MCT_G_COMP0_U:
  114. stat_addr = EXYNOS4_MCT_G_WSTAT;
  115. mask = 1 << 1; /* G_COMP0_U write status */
  116. break;
  117. case EXYNOS4_MCT_G_COMP0_ADD_INCR:
  118. stat_addr = EXYNOS4_MCT_G_WSTAT;
  119. mask = 1 << 2; /* G_COMP0_ADD_INCR w status */
  120. break;
  121. case EXYNOS4_MCT_G_CNT_L:
  122. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  123. mask = 1 << 0; /* G_CNT_L write status */
  124. break;
  125. case EXYNOS4_MCT_G_CNT_U:
  126. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  127. mask = 1 << 1; /* G_CNT_U write status */
  128. break;
  129. default:
  130. return;
  131. }
  132. }
  133. /* Wait maximum 1 ms until written values are applied */
  134. for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
  135. if (readl_relaxed(reg_base + stat_addr) & mask) {
  136. writel_relaxed(mask, reg_base + stat_addr);
  137. return;
  138. }
  139. panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset);
  140. }
  141. /* Clocksource handling */
  142. static void exynos4_mct_frc_start(void)
  143. {
  144. u32 reg;
  145. reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
  146. reg |= MCT_G_TCON_START;
  147. exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
  148. }
  149. /**
  150. * exynos4_read_count_64 - Read all 64-bits of the global counter
  151. *
  152. * This will read all 64-bits of the global counter taking care to make sure
  153. * that the upper and lower half match. Note that reading the MCT can be quite
  154. * slow (hundreds of nanoseconds) so you should use the 32-bit (lower half
  155. * only) version when possible.
  156. *
  157. * Returns the number of cycles in the global counter.
  158. */
  159. static u64 exynos4_read_count_64(void)
  160. {
  161. unsigned int lo, hi;
  162. u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
  163. do {
  164. hi = hi2;
  165. lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
  166. hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
  167. } while (hi != hi2);
  168. return ((u64)hi << 32) | lo;
  169. }
  170. /**
  171. * exynos4_read_count_32 - Read the lower 32-bits of the global counter
  172. *
  173. * This will read just the lower 32-bits of the global counter. This is marked
  174. * as notrace so it can be used by the scheduler clock.
  175. *
  176. * Returns the number of cycles in the global counter (lower 32 bits).
  177. */
  178. static u32 notrace exynos4_read_count_32(void)
  179. {
  180. return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
  181. }
  182. static u64 exynos4_frc_read(struct clocksource *cs)
  183. {
  184. return exynos4_read_count_32();
  185. }
  186. static void exynos4_frc_resume(struct clocksource *cs)
  187. {
  188. exynos4_mct_frc_start();
  189. }
  190. static struct clocksource mct_frc = {
  191. .name = "mct-frc",
  192. .rating = 450, /* use value higher than ARM arch timer */
  193. .read = exynos4_frc_read,
  194. .mask = CLOCKSOURCE_MASK(32),
  195. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  196. .resume = exynos4_frc_resume,
  197. };
  198. static u64 notrace exynos4_read_sched_clock(void)
  199. {
  200. return exynos4_read_count_32();
  201. }
  202. #if defined(CONFIG_ARM)
  203. static struct delay_timer exynos4_delay_timer;
  204. static cycles_t exynos4_read_current_timer(void)
  205. {
  206. BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32),
  207. "cycles_t needs to move to 32-bit for ARM64 usage");
  208. return exynos4_read_count_32();
  209. }
  210. #endif
  211. static int __init exynos4_clocksource_init(void)
  212. {
  213. exynos4_mct_frc_start();
  214. #if defined(CONFIG_ARM)
  215. exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
  216. exynos4_delay_timer.freq = clk_rate;
  217. register_current_timer_delay(&exynos4_delay_timer);
  218. #endif
  219. if (clocksource_register_hz(&mct_frc, clk_rate))
  220. panic("%s: can't register clocksource\n", mct_frc.name);
  221. sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
  222. return 0;
  223. }
  224. static void exynos4_mct_comp0_stop(void)
  225. {
  226. unsigned int tcon;
  227. tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
  228. tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
  229. exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
  230. exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
  231. }
  232. static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
  233. {
  234. unsigned int tcon;
  235. u64 comp_cycle;
  236. tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
  237. if (periodic) {
  238. tcon |= MCT_G_TCON_COMP0_AUTO_INC;
  239. exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
  240. }
  241. comp_cycle = exynos4_read_count_64() + cycles;
  242. exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
  243. exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
  244. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
  245. tcon |= MCT_G_TCON_COMP0_ENABLE;
  246. exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
  247. }
  248. static int exynos4_comp_set_next_event(unsigned long cycles,
  249. struct clock_event_device *evt)
  250. {
  251. exynos4_mct_comp0_start(false, cycles);
  252. return 0;
  253. }
  254. static int mct_set_state_shutdown(struct clock_event_device *evt)
  255. {
  256. exynos4_mct_comp0_stop();
  257. return 0;
  258. }
  259. static int mct_set_state_periodic(struct clock_event_device *evt)
  260. {
  261. unsigned long cycles_per_jiffy;
  262. cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
  263. >> evt->shift);
  264. exynos4_mct_comp0_stop();
  265. exynos4_mct_comp0_start(true, cycles_per_jiffy);
  266. return 0;
  267. }
  268. static struct clock_event_device mct_comp_device = {
  269. .name = "mct-comp",
  270. .features = CLOCK_EVT_FEAT_PERIODIC |
  271. CLOCK_EVT_FEAT_ONESHOT,
  272. .rating = 250,
  273. .set_next_event = exynos4_comp_set_next_event,
  274. .set_state_periodic = mct_set_state_periodic,
  275. .set_state_shutdown = mct_set_state_shutdown,
  276. .set_state_oneshot = mct_set_state_shutdown,
  277. .set_state_oneshot_stopped = mct_set_state_shutdown,
  278. .tick_resume = mct_set_state_shutdown,
  279. };
  280. static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
  281. {
  282. struct clock_event_device *evt = dev_id;
  283. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
  284. evt->event_handler(evt);
  285. return IRQ_HANDLED;
  286. }
  287. static struct irqaction mct_comp_event_irq = {
  288. .name = "mct_comp_irq",
  289. .flags = IRQF_TIMER | IRQF_IRQPOLL,
  290. .handler = exynos4_mct_comp_isr,
  291. .dev_id = &mct_comp_device,
  292. };
  293. static int exynos4_clockevent_init(void)
  294. {
  295. mct_comp_device.cpumask = cpumask_of(0);
  296. clockevents_config_and_register(&mct_comp_device, clk_rate,
  297. 0xf, 0xffffffff);
  298. setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
  299. return 0;
  300. }
  301. static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
  302. /* Clock event handling */
  303. static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
  304. {
  305. unsigned long tmp;
  306. unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
  307. unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
  308. tmp = readl_relaxed(reg_base + offset);
  309. if (tmp & mask) {
  310. tmp &= ~mask;
  311. exynos4_mct_write(tmp, offset);
  312. }
  313. }
  314. static void exynos4_mct_tick_start(unsigned long cycles,
  315. struct mct_clock_event_device *mevt)
  316. {
  317. unsigned long tmp;
  318. exynos4_mct_tick_stop(mevt);
  319. tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */
  320. /* update interrupt count buffer */
  321. exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
  322. /* enable MCT tick interrupt */
  323. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
  324. tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET);
  325. tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
  326. MCT_L_TCON_INTERVAL_MODE;
  327. exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
  328. }
  329. static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
  330. {
  331. /* Clear the MCT tick interrupt */
  332. if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
  333. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
  334. }
  335. static int exynos4_tick_set_next_event(unsigned long cycles,
  336. struct clock_event_device *evt)
  337. {
  338. struct mct_clock_event_device *mevt;
  339. mevt = container_of(evt, struct mct_clock_event_device, evt);
  340. exynos4_mct_tick_start(cycles, mevt);
  341. return 0;
  342. }
  343. static int set_state_shutdown(struct clock_event_device *evt)
  344. {
  345. struct mct_clock_event_device *mevt;
  346. mevt = container_of(evt, struct mct_clock_event_device, evt);
  347. exynos4_mct_tick_stop(mevt);
  348. exynos4_mct_tick_clear(mevt);
  349. return 0;
  350. }
  351. static int set_state_periodic(struct clock_event_device *evt)
  352. {
  353. struct mct_clock_event_device *mevt;
  354. unsigned long cycles_per_jiffy;
  355. mevt = container_of(evt, struct mct_clock_event_device, evt);
  356. cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
  357. >> evt->shift);
  358. exynos4_mct_tick_stop(mevt);
  359. exynos4_mct_tick_start(cycles_per_jiffy, mevt);
  360. return 0;
  361. }
  362. static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
  363. {
  364. struct mct_clock_event_device *mevt = dev_id;
  365. struct clock_event_device *evt = &mevt->evt;
  366. /*
  367. * This is for supporting oneshot mode.
  368. * Mct would generate interrupt periodically
  369. * without explicit stopping.
  370. */
  371. if (!clockevent_state_periodic(&mevt->evt))
  372. exynos4_mct_tick_stop(mevt);
  373. exynos4_mct_tick_clear(mevt);
  374. evt->event_handler(evt);
  375. return IRQ_HANDLED;
  376. }
  377. static int exynos4_mct_starting_cpu(unsigned int cpu)
  378. {
  379. struct mct_clock_event_device *mevt =
  380. per_cpu_ptr(&percpu_mct_tick, cpu);
  381. struct clock_event_device *evt = &mevt->evt;
  382. mevt->base = EXYNOS4_MCT_L_BASE(cpu);
  383. snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
  384. evt->name = mevt->name;
  385. evt->cpumask = cpumask_of(cpu);
  386. evt->set_next_event = exynos4_tick_set_next_event;
  387. evt->set_state_periodic = set_state_periodic;
  388. evt->set_state_shutdown = set_state_shutdown;
  389. evt->set_state_oneshot = set_state_shutdown;
  390. evt->set_state_oneshot_stopped = set_state_shutdown;
  391. evt->tick_resume = set_state_shutdown;
  392. evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
  393. evt->rating = 500; /* use value higher than ARM arch timer */
  394. exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
  395. if (mct_int_type == MCT_INT_SPI) {
  396. if (evt->irq == -1)
  397. return -EIO;
  398. irq_force_affinity(evt->irq, cpumask_of(cpu));
  399. enable_irq(evt->irq);
  400. } else {
  401. enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
  402. }
  403. clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
  404. 0xf, 0x7fffffff);
  405. return 0;
  406. }
  407. static int exynos4_mct_dying_cpu(unsigned int cpu)
  408. {
  409. struct mct_clock_event_device *mevt =
  410. per_cpu_ptr(&percpu_mct_tick, cpu);
  411. struct clock_event_device *evt = &mevt->evt;
  412. evt->set_state_shutdown(evt);
  413. if (mct_int_type == MCT_INT_SPI) {
  414. if (evt->irq != -1)
  415. disable_irq_nosync(evt->irq);
  416. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
  417. } else {
  418. disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
  419. }
  420. return 0;
  421. }
  422. static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
  423. {
  424. int err, cpu;
  425. struct clk *mct_clk, *tick_clk;
  426. tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
  427. clk_get(NULL, "fin_pll");
  428. if (IS_ERR(tick_clk))
  429. panic("%s: unable to determine tick clock rate\n", __func__);
  430. clk_rate = clk_get_rate(tick_clk);
  431. mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct");
  432. if (IS_ERR(mct_clk))
  433. panic("%s: unable to retrieve mct clock instance\n", __func__);
  434. clk_prepare_enable(mct_clk);
  435. reg_base = base;
  436. if (!reg_base)
  437. panic("%s: unable to ioremap mct address space\n", __func__);
  438. if (mct_int_type == MCT_INT_PPI) {
  439. err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
  440. exynos4_mct_tick_isr, "MCT",
  441. &percpu_mct_tick);
  442. WARN(err, "MCT: can't request IRQ %d (%d)\n",
  443. mct_irqs[MCT_L0_IRQ], err);
  444. } else {
  445. for_each_possible_cpu(cpu) {
  446. int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
  447. struct mct_clock_event_device *pcpu_mevt =
  448. per_cpu_ptr(&percpu_mct_tick, cpu);
  449. pcpu_mevt->evt.irq = -1;
  450. irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
  451. if (request_irq(mct_irq,
  452. exynos4_mct_tick_isr,
  453. IRQF_TIMER | IRQF_NOBALANCING,
  454. pcpu_mevt->name, pcpu_mevt)) {
  455. pr_err("exynos-mct: cannot register IRQ (cpu%d)\n",
  456. cpu);
  457. continue;
  458. }
  459. pcpu_mevt->evt.irq = mct_irq;
  460. }
  461. }
  462. /* Install hotplug callbacks which configure the timer on this CPU */
  463. err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
  464. "clockevents/exynos4/mct_timer:starting",
  465. exynos4_mct_starting_cpu,
  466. exynos4_mct_dying_cpu);
  467. if (err)
  468. goto out_irq;
  469. return 0;
  470. out_irq:
  471. if (mct_int_type == MCT_INT_PPI) {
  472. free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
  473. } else {
  474. for_each_possible_cpu(cpu) {
  475. struct mct_clock_event_device *pcpu_mevt =
  476. per_cpu_ptr(&percpu_mct_tick, cpu);
  477. if (pcpu_mevt->evt.irq != -1) {
  478. free_irq(pcpu_mevt->evt.irq, pcpu_mevt);
  479. pcpu_mevt->evt.irq = -1;
  480. }
  481. }
  482. }
  483. return err;
  484. }
  485. static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
  486. {
  487. u32 nr_irqs, i;
  488. int ret;
  489. mct_int_type = int_type;
  490. /* This driver uses only one global timer interrupt */
  491. mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
  492. /*
  493. * Find out the number of local irqs specified. The local
  494. * timer irqs are specified after the four global timer
  495. * irqs are specified.
  496. */
  497. #ifdef CONFIG_OF
  498. nr_irqs = of_irq_count(np);
  499. #else
  500. nr_irqs = 0;
  501. #endif
  502. for (i = MCT_L0_IRQ; i < nr_irqs; i++)
  503. mct_irqs[i] = irq_of_parse_and_map(np, i);
  504. ret = exynos4_timer_resources(np, of_iomap(np, 0));
  505. if (ret)
  506. return ret;
  507. ret = exynos4_clocksource_init();
  508. if (ret)
  509. return ret;
  510. return exynos4_clockevent_init();
  511. }
  512. static int __init mct_init_spi(struct device_node *np)
  513. {
  514. return mct_init_dt(np, MCT_INT_SPI);
  515. }
  516. static int __init mct_init_ppi(struct device_node *np)
  517. {
  518. return mct_init_dt(np, MCT_INT_PPI);
  519. }
  520. TIMER_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
  521. TIMER_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);