sh_tmu.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. /*
  2. * SuperH Timer Support - TMU
  3. *
  4. * Copyright (C) 2009 Magnus Damm
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/clk.h>
  16. #include <linux/clockchips.h>
  17. #include <linux/clocksource.h>
  18. #include <linux/delay.h>
  19. #include <linux/err.h>
  20. #include <linux/init.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/ioport.h>
  24. #include <linux/irq.h>
  25. #include <linux/module.h>
  26. #include <linux/of.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/pm_domain.h>
  29. #include <linux/pm_runtime.h>
  30. #include <linux/sh_timer.h>
  31. #include <linux/slab.h>
  32. #include <linux/spinlock.h>
  33. enum sh_tmu_model {
  34. SH_TMU,
  35. SH_TMU_SH3,
  36. };
  37. struct sh_tmu_device;
  38. struct sh_tmu_channel {
  39. struct sh_tmu_device *tmu;
  40. unsigned int index;
  41. void __iomem *base;
  42. int irq;
  43. unsigned long periodic;
  44. struct clock_event_device ced;
  45. struct clocksource cs;
  46. bool cs_enabled;
  47. unsigned int enable_count;
  48. };
  49. struct sh_tmu_device {
  50. struct platform_device *pdev;
  51. void __iomem *mapbase;
  52. struct clk *clk;
  53. unsigned long rate;
  54. enum sh_tmu_model model;
  55. raw_spinlock_t lock; /* Protect the shared start/stop register */
  56. struct sh_tmu_channel *channels;
  57. unsigned int num_channels;
  58. bool has_clockevent;
  59. bool has_clocksource;
  60. };
  61. #define TSTR -1 /* shared register */
  62. #define TCOR 0 /* channel register */
  63. #define TCNT 1 /* channel register */
  64. #define TCR 2 /* channel register */
  65. #define TCR_UNF (1 << 8)
  66. #define TCR_UNIE (1 << 5)
  67. #define TCR_TPSC_CLK4 (0 << 0)
  68. #define TCR_TPSC_CLK16 (1 << 0)
  69. #define TCR_TPSC_CLK64 (2 << 0)
  70. #define TCR_TPSC_CLK256 (3 << 0)
  71. #define TCR_TPSC_CLK1024 (4 << 0)
  72. #define TCR_TPSC_MASK (7 << 0)
  73. static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
  74. {
  75. unsigned long offs;
  76. if (reg_nr == TSTR) {
  77. switch (ch->tmu->model) {
  78. case SH_TMU_SH3:
  79. return ioread8(ch->tmu->mapbase + 2);
  80. case SH_TMU:
  81. return ioread8(ch->tmu->mapbase + 4);
  82. }
  83. }
  84. offs = reg_nr << 2;
  85. if (reg_nr == TCR)
  86. return ioread16(ch->base + offs);
  87. else
  88. return ioread32(ch->base + offs);
  89. }
  90. static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
  91. unsigned long value)
  92. {
  93. unsigned long offs;
  94. if (reg_nr == TSTR) {
  95. switch (ch->tmu->model) {
  96. case SH_TMU_SH3:
  97. return iowrite8(value, ch->tmu->mapbase + 2);
  98. case SH_TMU:
  99. return iowrite8(value, ch->tmu->mapbase + 4);
  100. }
  101. }
  102. offs = reg_nr << 2;
  103. if (reg_nr == TCR)
  104. iowrite16(value, ch->base + offs);
  105. else
  106. iowrite32(value, ch->base + offs);
  107. }
  108. static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
  109. {
  110. unsigned long flags, value;
  111. /* start stop register shared by multiple timer channels */
  112. raw_spin_lock_irqsave(&ch->tmu->lock, flags);
  113. value = sh_tmu_read(ch, TSTR);
  114. if (start)
  115. value |= 1 << ch->index;
  116. else
  117. value &= ~(1 << ch->index);
  118. sh_tmu_write(ch, TSTR, value);
  119. raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
  120. }
  121. static int __sh_tmu_enable(struct sh_tmu_channel *ch)
  122. {
  123. int ret;
  124. /* enable clock */
  125. ret = clk_enable(ch->tmu->clk);
  126. if (ret) {
  127. dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
  128. ch->index);
  129. return ret;
  130. }
  131. /* make sure channel is disabled */
  132. sh_tmu_start_stop_ch(ch, 0);
  133. /* maximum timeout */
  134. sh_tmu_write(ch, TCOR, 0xffffffff);
  135. sh_tmu_write(ch, TCNT, 0xffffffff);
  136. /* configure channel to parent clock / 4, irq off */
  137. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  138. /* enable channel */
  139. sh_tmu_start_stop_ch(ch, 1);
  140. return 0;
  141. }
  142. static int sh_tmu_enable(struct sh_tmu_channel *ch)
  143. {
  144. if (ch->enable_count++ > 0)
  145. return 0;
  146. pm_runtime_get_sync(&ch->tmu->pdev->dev);
  147. dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
  148. return __sh_tmu_enable(ch);
  149. }
  150. static void __sh_tmu_disable(struct sh_tmu_channel *ch)
  151. {
  152. /* disable channel */
  153. sh_tmu_start_stop_ch(ch, 0);
  154. /* disable interrupts in TMU block */
  155. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  156. /* stop clock */
  157. clk_disable(ch->tmu->clk);
  158. }
  159. static void sh_tmu_disable(struct sh_tmu_channel *ch)
  160. {
  161. if (WARN_ON(ch->enable_count == 0))
  162. return;
  163. if (--ch->enable_count > 0)
  164. return;
  165. __sh_tmu_disable(ch);
  166. dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
  167. pm_runtime_put(&ch->tmu->pdev->dev);
  168. }
  169. static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
  170. int periodic)
  171. {
  172. /* stop timer */
  173. sh_tmu_start_stop_ch(ch, 0);
  174. /* acknowledge interrupt */
  175. sh_tmu_read(ch, TCR);
  176. /* enable interrupt */
  177. sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
  178. /* reload delta value in case of periodic timer */
  179. if (periodic)
  180. sh_tmu_write(ch, TCOR, delta);
  181. else
  182. sh_tmu_write(ch, TCOR, 0xffffffff);
  183. sh_tmu_write(ch, TCNT, delta);
  184. /* start timer */
  185. sh_tmu_start_stop_ch(ch, 1);
  186. }
  187. static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
  188. {
  189. struct sh_tmu_channel *ch = dev_id;
  190. /* disable or acknowledge interrupt */
  191. if (clockevent_state_oneshot(&ch->ced))
  192. sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
  193. else
  194. sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
  195. /* notify clockevent layer */
  196. ch->ced.event_handler(&ch->ced);
  197. return IRQ_HANDLED;
  198. }
  199. static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
  200. {
  201. return container_of(cs, struct sh_tmu_channel, cs);
  202. }
  203. static u64 sh_tmu_clocksource_read(struct clocksource *cs)
  204. {
  205. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  206. return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
  207. }
  208. static int sh_tmu_clocksource_enable(struct clocksource *cs)
  209. {
  210. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  211. int ret;
  212. if (WARN_ON(ch->cs_enabled))
  213. return 0;
  214. ret = sh_tmu_enable(ch);
  215. if (!ret)
  216. ch->cs_enabled = true;
  217. return ret;
  218. }
  219. static void sh_tmu_clocksource_disable(struct clocksource *cs)
  220. {
  221. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  222. if (WARN_ON(!ch->cs_enabled))
  223. return;
  224. sh_tmu_disable(ch);
  225. ch->cs_enabled = false;
  226. }
  227. static void sh_tmu_clocksource_suspend(struct clocksource *cs)
  228. {
  229. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  230. if (!ch->cs_enabled)
  231. return;
  232. if (--ch->enable_count == 0) {
  233. __sh_tmu_disable(ch);
  234. pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);
  235. }
  236. }
  237. static void sh_tmu_clocksource_resume(struct clocksource *cs)
  238. {
  239. struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
  240. if (!ch->cs_enabled)
  241. return;
  242. if (ch->enable_count++ == 0) {
  243. pm_genpd_syscore_poweron(&ch->tmu->pdev->dev);
  244. __sh_tmu_enable(ch);
  245. }
  246. }
  247. static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
  248. const char *name)
  249. {
  250. struct clocksource *cs = &ch->cs;
  251. cs->name = name;
  252. cs->rating = 200;
  253. cs->read = sh_tmu_clocksource_read;
  254. cs->enable = sh_tmu_clocksource_enable;
  255. cs->disable = sh_tmu_clocksource_disable;
  256. cs->suspend = sh_tmu_clocksource_suspend;
  257. cs->resume = sh_tmu_clocksource_resume;
  258. cs->mask = CLOCKSOURCE_MASK(32);
  259. cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
  260. dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
  261. ch->index);
  262. clocksource_register_hz(cs, ch->tmu->rate);
  263. return 0;
  264. }
  265. static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
  266. {
  267. return container_of(ced, struct sh_tmu_channel, ced);
  268. }
  269. static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
  270. {
  271. sh_tmu_enable(ch);
  272. if (periodic) {
  273. ch->periodic = (ch->tmu->rate + HZ/2) / HZ;
  274. sh_tmu_set_next(ch, ch->periodic, 1);
  275. }
  276. }
  277. static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced)
  278. {
  279. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  280. if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
  281. sh_tmu_disable(ch);
  282. return 0;
  283. }
  284. static int sh_tmu_clock_event_set_state(struct clock_event_device *ced,
  285. int periodic)
  286. {
  287. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  288. /* deal with old setting first */
  289. if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
  290. sh_tmu_disable(ch);
  291. dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
  292. ch->index, periodic ? "periodic" : "oneshot");
  293. sh_tmu_clock_event_start(ch, periodic);
  294. return 0;
  295. }
  296. static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced)
  297. {
  298. return sh_tmu_clock_event_set_state(ced, 0);
  299. }
  300. static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced)
  301. {
  302. return sh_tmu_clock_event_set_state(ced, 1);
  303. }
  304. static int sh_tmu_clock_event_next(unsigned long delta,
  305. struct clock_event_device *ced)
  306. {
  307. struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
  308. BUG_ON(!clockevent_state_oneshot(ced));
  309. /* program new delta value */
  310. sh_tmu_set_next(ch, delta, 0);
  311. return 0;
  312. }
  313. static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
  314. {
  315. pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
  316. }
  317. static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
  318. {
  319. pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
  320. }
  321. static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
  322. const char *name)
  323. {
  324. struct clock_event_device *ced = &ch->ced;
  325. int ret;
  326. ced->name = name;
  327. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  328. ced->features |= CLOCK_EVT_FEAT_ONESHOT;
  329. ced->rating = 200;
  330. ced->cpumask = cpu_possible_mask;
  331. ced->set_next_event = sh_tmu_clock_event_next;
  332. ced->set_state_shutdown = sh_tmu_clock_event_shutdown;
  333. ced->set_state_periodic = sh_tmu_clock_event_set_periodic;
  334. ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot;
  335. ced->suspend = sh_tmu_clock_event_suspend;
  336. ced->resume = sh_tmu_clock_event_resume;
  337. dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
  338. ch->index);
  339. clockevents_config_and_register(ced, ch->tmu->rate, 0x300, 0xffffffff);
  340. ret = request_irq(ch->irq, sh_tmu_interrupt,
  341. IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
  342. dev_name(&ch->tmu->pdev->dev), ch);
  343. if (ret) {
  344. dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
  345. ch->index, ch->irq);
  346. return;
  347. }
  348. }
  349. static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
  350. bool clockevent, bool clocksource)
  351. {
  352. if (clockevent) {
  353. ch->tmu->has_clockevent = true;
  354. sh_tmu_register_clockevent(ch, name);
  355. } else if (clocksource) {
  356. ch->tmu->has_clocksource = true;
  357. sh_tmu_register_clocksource(ch, name);
  358. }
  359. return 0;
  360. }
  361. static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
  362. bool clockevent, bool clocksource,
  363. struct sh_tmu_device *tmu)
  364. {
  365. /* Skip unused channels. */
  366. if (!clockevent && !clocksource)
  367. return 0;
  368. ch->tmu = tmu;
  369. ch->index = index;
  370. if (tmu->model == SH_TMU_SH3)
  371. ch->base = tmu->mapbase + 4 + ch->index * 12;
  372. else
  373. ch->base = tmu->mapbase + 8 + ch->index * 12;
  374. ch->irq = platform_get_irq(tmu->pdev, index);
  375. if (ch->irq < 0) {
  376. dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
  377. ch->index);
  378. return ch->irq;
  379. }
  380. ch->cs_enabled = false;
  381. ch->enable_count = 0;
  382. return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
  383. clockevent, clocksource);
  384. }
  385. static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
  386. {
  387. struct resource *res;
  388. res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
  389. if (!res) {
  390. dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
  391. return -ENXIO;
  392. }
  393. tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
  394. if (tmu->mapbase == NULL)
  395. return -ENXIO;
  396. return 0;
  397. }
  398. static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
  399. {
  400. struct device_node *np = tmu->pdev->dev.of_node;
  401. tmu->model = SH_TMU;
  402. tmu->num_channels = 3;
  403. of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
  404. if (tmu->num_channels != 2 && tmu->num_channels != 3) {
  405. dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
  406. tmu->num_channels);
  407. return -EINVAL;
  408. }
  409. return 0;
  410. }
  411. static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
  412. {
  413. unsigned int i;
  414. int ret;
  415. tmu->pdev = pdev;
  416. raw_spin_lock_init(&tmu->lock);
  417. if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
  418. ret = sh_tmu_parse_dt(tmu);
  419. if (ret < 0)
  420. return ret;
  421. } else if (pdev->dev.platform_data) {
  422. const struct platform_device_id *id = pdev->id_entry;
  423. struct sh_timer_config *cfg = pdev->dev.platform_data;
  424. tmu->model = id->driver_data;
  425. tmu->num_channels = hweight8(cfg->channels_mask);
  426. } else {
  427. dev_err(&tmu->pdev->dev, "missing platform data\n");
  428. return -ENXIO;
  429. }
  430. /* Get hold of clock. */
  431. tmu->clk = clk_get(&tmu->pdev->dev, "fck");
  432. if (IS_ERR(tmu->clk)) {
  433. dev_err(&tmu->pdev->dev, "cannot get clock\n");
  434. return PTR_ERR(tmu->clk);
  435. }
  436. ret = clk_prepare(tmu->clk);
  437. if (ret < 0)
  438. goto err_clk_put;
  439. /* Determine clock rate. */
  440. ret = clk_enable(tmu->clk);
  441. if (ret < 0)
  442. goto err_clk_unprepare;
  443. tmu->rate = clk_get_rate(tmu->clk) / 4;
  444. clk_disable(tmu->clk);
  445. /* Map the memory resource. */
  446. ret = sh_tmu_map_memory(tmu);
  447. if (ret < 0) {
  448. dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
  449. goto err_clk_unprepare;
  450. }
  451. /* Allocate and setup the channels. */
  452. tmu->channels = kcalloc(tmu->num_channels, sizeof(*tmu->channels),
  453. GFP_KERNEL);
  454. if (tmu->channels == NULL) {
  455. ret = -ENOMEM;
  456. goto err_unmap;
  457. }
  458. /*
  459. * Use the first channel as a clock event device and the second channel
  460. * as a clock source.
  461. */
  462. for (i = 0; i < tmu->num_channels; ++i) {
  463. ret = sh_tmu_channel_setup(&tmu->channels[i], i,
  464. i == 0, i == 1, tmu);
  465. if (ret < 0)
  466. goto err_unmap;
  467. }
  468. platform_set_drvdata(pdev, tmu);
  469. return 0;
  470. err_unmap:
  471. kfree(tmu->channels);
  472. iounmap(tmu->mapbase);
  473. err_clk_unprepare:
  474. clk_unprepare(tmu->clk);
  475. err_clk_put:
  476. clk_put(tmu->clk);
  477. return ret;
  478. }
  479. static int sh_tmu_probe(struct platform_device *pdev)
  480. {
  481. struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
  482. int ret;
  483. if (!is_early_platform_device(pdev)) {
  484. pm_runtime_set_active(&pdev->dev);
  485. pm_runtime_enable(&pdev->dev);
  486. }
  487. if (tmu) {
  488. dev_info(&pdev->dev, "kept as earlytimer\n");
  489. goto out;
  490. }
  491. tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
  492. if (tmu == NULL)
  493. return -ENOMEM;
  494. ret = sh_tmu_setup(tmu, pdev);
  495. if (ret) {
  496. kfree(tmu);
  497. pm_runtime_idle(&pdev->dev);
  498. return ret;
  499. }
  500. if (is_early_platform_device(pdev))
  501. return 0;
  502. out:
  503. if (tmu->has_clockevent || tmu->has_clocksource)
  504. pm_runtime_irq_safe(&pdev->dev);
  505. else
  506. pm_runtime_idle(&pdev->dev);
  507. return 0;
  508. }
  509. static int sh_tmu_remove(struct platform_device *pdev)
  510. {
  511. return -EBUSY; /* cannot unregister clockevent and clocksource */
  512. }
  513. static const struct platform_device_id sh_tmu_id_table[] = {
  514. { "sh-tmu", SH_TMU },
  515. { "sh-tmu-sh3", SH_TMU_SH3 },
  516. { }
  517. };
  518. MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
  519. static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
  520. { .compatible = "renesas,tmu" },
  521. { }
  522. };
  523. MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
  524. static struct platform_driver sh_tmu_device_driver = {
  525. .probe = sh_tmu_probe,
  526. .remove = sh_tmu_remove,
  527. .driver = {
  528. .name = "sh_tmu",
  529. .of_match_table = of_match_ptr(sh_tmu_of_table),
  530. },
  531. .id_table = sh_tmu_id_table,
  532. };
  533. static int __init sh_tmu_init(void)
  534. {
  535. return platform_driver_register(&sh_tmu_device_driver);
  536. }
  537. static void __exit sh_tmu_exit(void)
  538. {
  539. platform_driver_unregister(&sh_tmu_device_driver);
  540. }
  541. early_platform_init("earlytimer", &sh_tmu_device_driver);
  542. subsys_initcall(sh_tmu_init);
  543. module_exit(sh_tmu_exit);
  544. MODULE_AUTHOR("Magnus Damm");
  545. MODULE_DESCRIPTION("SuperH TMU Timer Driver");
  546. MODULE_LICENSE("GPL v2");