clocksource.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021
  1. /*
  2. * linux/kernel/time/clocksource.c
  3. *
  4. * This file contains the functions which manage clocksource drivers.
  5. *
  6. * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. *
  22. * TODO WishList:
  23. * o Allow clocksource drivers to be unregistered
  24. */
  25. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  26. #include <linux/device.h>
  27. #include <linux/clocksource.h>
  28. #include <linux/init.h>
  29. #include <linux/module.h>
  30. #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
  31. #include <linux/tick.h>
  32. #include <linux/kthread.h>
  33. #include "tick-internal.h"
  34. #include "timekeeping_internal.h"
  35. /**
  36. * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
  37. * @mult: pointer to mult variable
  38. * @shift: pointer to shift variable
  39. * @from: frequency to convert from
  40. * @to: frequency to convert to
  41. * @maxsec: guaranteed runtime conversion range in seconds
  42. *
  43. * The function evaluates the shift/mult pair for the scaled math
  44. * operations of clocksources and clockevents.
  45. *
  46. * @to and @from are frequency values in HZ. For clock sources @to is
  47. * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
  48. * event @to is the counter frequency and @from is NSEC_PER_SEC.
  49. *
  50. * The @maxsec conversion range argument controls the time frame in
  51. * seconds which must be covered by the runtime conversion with the
  52. * calculated mult and shift factors. This guarantees that no 64bit
  53. * overflow happens when the input value of the conversion is
  54. * multiplied with the calculated mult factor. Larger ranges may
  55. * reduce the conversion accuracy by chosing smaller mult and shift
  56. * factors.
  57. */
  58. void
  59. clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
  60. {
  61. u64 tmp;
  62. u32 sft, sftacc= 32;
  63. /*
  64. * Calculate the shift factor which is limiting the conversion
  65. * range:
  66. */
  67. tmp = ((u64)maxsec * from) >> 32;
  68. while (tmp) {
  69. tmp >>=1;
  70. sftacc--;
  71. }
  72. /*
  73. * Find the conversion shift/mult pair which has the best
  74. * accuracy and fits the maxsec conversion range:
  75. */
  76. for (sft = 32; sft > 0; sft--) {
  77. tmp = (u64) to << sft;
  78. tmp += from / 2;
  79. do_div(tmp, from);
  80. if ((tmp >> sftacc) == 0)
  81. break;
  82. }
  83. *mult = tmp;
  84. *shift = sft;
  85. }
  86. /*[Clocksource internal variables]---------
  87. * curr_clocksource:
  88. * currently selected clocksource.
  89. * clocksource_list:
  90. * linked list with the registered clocksources
  91. * clocksource_mutex:
  92. * protects manipulations to curr_clocksource and the clocksource_list
  93. * override_name:
  94. * Name of the user-specified clocksource.
  95. */
  96. static struct clocksource *curr_clocksource;
  97. static LIST_HEAD(clocksource_list);
  98. static DEFINE_MUTEX(clocksource_mutex);
  99. static char override_name[CS_NAME_LEN];
  100. static int finished_booting;
  101. #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
  102. static void clocksource_watchdog_work(struct work_struct *work);
  103. static void clocksource_select(void);
  104. static LIST_HEAD(watchdog_list);
  105. static struct clocksource *watchdog;
  106. static struct timer_list watchdog_timer;
  107. static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
  108. static DEFINE_SPINLOCK(watchdog_lock);
  109. static int watchdog_running;
  110. static atomic_t watchdog_reset_pending;
  111. static int clocksource_watchdog_kthread(void *data);
  112. static void __clocksource_change_rating(struct clocksource *cs, int rating);
  113. /*
  114. * Interval: 0.5sec Threshold: 0.0625s
  115. */
  116. #define WATCHDOG_INTERVAL (HZ >> 1)
  117. #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
  118. static void clocksource_watchdog_work(struct work_struct *work)
  119. {
  120. /*
  121. * If kthread_run fails the next watchdog scan over the
  122. * watchdog_list will find the unstable clock again.
  123. */
  124. kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
  125. }
  126. static void __clocksource_unstable(struct clocksource *cs)
  127. {
  128. cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
  129. cs->flags |= CLOCK_SOURCE_UNSTABLE;
  130. if (finished_booting)
  131. schedule_work(&watchdog_work);
  132. }
  133. /**
  134. * clocksource_mark_unstable - mark clocksource unstable via watchdog
  135. * @cs: clocksource to be marked unstable
  136. *
  137. * This function is called instead of clocksource_change_rating from
  138. * cpu hotplug code to avoid a deadlock between the clocksource mutex
  139. * and the cpu hotplug mutex. It defers the update of the clocksource
  140. * to the watchdog thread.
  141. */
  142. void clocksource_mark_unstable(struct clocksource *cs)
  143. {
  144. unsigned long flags;
  145. spin_lock_irqsave(&watchdog_lock, flags);
  146. if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
  147. if (list_empty(&cs->wd_list))
  148. list_add(&cs->wd_list, &watchdog_list);
  149. __clocksource_unstable(cs);
  150. }
  151. spin_unlock_irqrestore(&watchdog_lock, flags);
  152. }
  153. static void clocksource_watchdog(unsigned long data)
  154. {
  155. struct clocksource *cs;
  156. cycle_t csnow, wdnow, cslast, wdlast, delta;
  157. int64_t wd_nsec, cs_nsec;
  158. int next_cpu, reset_pending;
  159. spin_lock(&watchdog_lock);
  160. if (!watchdog_running)
  161. goto out;
  162. reset_pending = atomic_read(&watchdog_reset_pending);
  163. list_for_each_entry(cs, &watchdog_list, wd_list) {
  164. /* Clocksource already marked unstable? */
  165. if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
  166. if (finished_booting)
  167. schedule_work(&watchdog_work);
  168. continue;
  169. }
  170. local_irq_disable();
  171. csnow = cs->read(cs);
  172. wdnow = watchdog->read(watchdog);
  173. local_irq_enable();
  174. /* Clocksource initialized ? */
  175. if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
  176. atomic_read(&watchdog_reset_pending)) {
  177. cs->flags |= CLOCK_SOURCE_WATCHDOG;
  178. cs->wd_last = wdnow;
  179. cs->cs_last = csnow;
  180. continue;
  181. }
  182. delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
  183. wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
  184. watchdog->shift);
  185. delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
  186. cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
  187. wdlast = cs->wd_last; /* save these in case we print them */
  188. cslast = cs->cs_last;
  189. cs->cs_last = csnow;
  190. cs->wd_last = wdnow;
  191. if (atomic_read(&watchdog_reset_pending))
  192. continue;
  193. /* Check the deviation from the watchdog clocksource. */
  194. if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
  195. pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n",
  196. cs->name);
  197. pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
  198. watchdog->name, wdnow, wdlast, watchdog->mask);
  199. pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
  200. cs->name, csnow, cslast, cs->mask);
  201. __clocksource_unstable(cs);
  202. continue;
  203. }
  204. if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
  205. (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
  206. (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
  207. /* Mark it valid for high-res. */
  208. cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
  209. /*
  210. * clocksource_done_booting() will sort it if
  211. * finished_booting is not set yet.
  212. */
  213. if (!finished_booting)
  214. continue;
  215. /*
  216. * If this is not the current clocksource let
  217. * the watchdog thread reselect it. Due to the
  218. * change to high res this clocksource might
  219. * be preferred now. If it is the current
  220. * clocksource let the tick code know about
  221. * that change.
  222. */
  223. if (cs != curr_clocksource) {
  224. cs->flags |= CLOCK_SOURCE_RESELECT;
  225. schedule_work(&watchdog_work);
  226. } else {
  227. tick_clock_notify();
  228. }
  229. }
  230. }
  231. /*
  232. * We only clear the watchdog_reset_pending, when we did a
  233. * full cycle through all clocksources.
  234. */
  235. if (reset_pending)
  236. atomic_dec(&watchdog_reset_pending);
  237. /*
  238. * Cycle through CPUs to check if the CPUs stay synchronized
  239. * to each other.
  240. */
  241. next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
  242. if (next_cpu >= nr_cpu_ids)
  243. next_cpu = cpumask_first(cpu_online_mask);
  244. watchdog_timer.expires += WATCHDOG_INTERVAL;
  245. add_timer_on(&watchdog_timer, next_cpu);
  246. out:
  247. spin_unlock(&watchdog_lock);
  248. }
  249. static inline void clocksource_start_watchdog(void)
  250. {
  251. if (watchdog_running || !watchdog || list_empty(&watchdog_list))
  252. return;
  253. init_timer(&watchdog_timer);
  254. watchdog_timer.function = clocksource_watchdog;
  255. watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
  256. add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
  257. watchdog_running = 1;
  258. }
  259. static inline void clocksource_stop_watchdog(void)
  260. {
  261. if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
  262. return;
  263. del_timer(&watchdog_timer);
  264. watchdog_running = 0;
  265. }
  266. static inline void clocksource_reset_watchdog(void)
  267. {
  268. struct clocksource *cs;
  269. list_for_each_entry(cs, &watchdog_list, wd_list)
  270. cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
  271. }
  272. static void clocksource_resume_watchdog(void)
  273. {
  274. atomic_inc(&watchdog_reset_pending);
  275. }
  276. static void clocksource_enqueue_watchdog(struct clocksource *cs)
  277. {
  278. unsigned long flags;
  279. spin_lock_irqsave(&watchdog_lock, flags);
  280. if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
  281. /* cs is a clocksource to be watched. */
  282. list_add(&cs->wd_list, &watchdog_list);
  283. cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
  284. } else {
  285. /* cs is a watchdog. */
  286. if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
  287. cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
  288. /* Pick the best watchdog. */
  289. if (!watchdog || cs->rating > watchdog->rating) {
  290. watchdog = cs;
  291. /* Reset watchdog cycles */
  292. clocksource_reset_watchdog();
  293. }
  294. }
  295. /* Check if the watchdog timer needs to be started. */
  296. clocksource_start_watchdog();
  297. spin_unlock_irqrestore(&watchdog_lock, flags);
  298. }
  299. static void clocksource_dequeue_watchdog(struct clocksource *cs)
  300. {
  301. unsigned long flags;
  302. spin_lock_irqsave(&watchdog_lock, flags);
  303. if (cs != watchdog) {
  304. if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
  305. /* cs is a watched clocksource. */
  306. list_del_init(&cs->wd_list);
  307. /* Check if the watchdog timer needs to be stopped. */
  308. clocksource_stop_watchdog();
  309. }
  310. }
  311. spin_unlock_irqrestore(&watchdog_lock, flags);
  312. }
  313. static int __clocksource_watchdog_kthread(void)
  314. {
  315. struct clocksource *cs, *tmp;
  316. unsigned long flags;
  317. LIST_HEAD(unstable);
  318. int select = 0;
  319. spin_lock_irqsave(&watchdog_lock, flags);
  320. list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
  321. if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
  322. list_del_init(&cs->wd_list);
  323. list_add(&cs->wd_list, &unstable);
  324. select = 1;
  325. }
  326. if (cs->flags & CLOCK_SOURCE_RESELECT) {
  327. cs->flags &= ~CLOCK_SOURCE_RESELECT;
  328. select = 1;
  329. }
  330. }
  331. /* Check if the watchdog timer needs to be stopped. */
  332. clocksource_stop_watchdog();
  333. spin_unlock_irqrestore(&watchdog_lock, flags);
  334. /* Needs to be done outside of watchdog lock */
  335. list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
  336. list_del_init(&cs->wd_list);
  337. __clocksource_change_rating(cs, 0);
  338. }
  339. return select;
  340. }
  341. static int clocksource_watchdog_kthread(void *data)
  342. {
  343. mutex_lock(&clocksource_mutex);
  344. if (__clocksource_watchdog_kthread())
  345. clocksource_select();
  346. mutex_unlock(&clocksource_mutex);
  347. return 0;
  348. }
  349. static bool clocksource_is_watchdog(struct clocksource *cs)
  350. {
  351. return cs == watchdog;
  352. }
  353. #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
  354. static void clocksource_enqueue_watchdog(struct clocksource *cs)
  355. {
  356. if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
  357. cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
  358. }
  359. static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
  360. static inline void clocksource_resume_watchdog(void) { }
  361. static inline int __clocksource_watchdog_kthread(void) { return 0; }
  362. static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
  363. void clocksource_mark_unstable(struct clocksource *cs) { }
  364. #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
  365. /**
  366. * clocksource_suspend - suspend the clocksource(s)
  367. */
  368. void clocksource_suspend(void)
  369. {
  370. struct clocksource *cs;
  371. list_for_each_entry_reverse(cs, &clocksource_list, list)
  372. if (cs->suspend)
  373. cs->suspend(cs);
  374. }
  375. /**
  376. * clocksource_resume - resume the clocksource(s)
  377. */
  378. void clocksource_resume(void)
  379. {
  380. struct clocksource *cs;
  381. list_for_each_entry(cs, &clocksource_list, list)
  382. if (cs->resume)
  383. cs->resume(cs);
  384. clocksource_resume_watchdog();
  385. }
  386. /**
  387. * clocksource_touch_watchdog - Update watchdog
  388. *
  389. * Update the watchdog after exception contexts such as kgdb so as not
  390. * to incorrectly trip the watchdog. This might fail when the kernel
  391. * was stopped in code which holds watchdog_lock.
  392. */
  393. void clocksource_touch_watchdog(void)
  394. {
  395. clocksource_resume_watchdog();
  396. }
  397. /**
  398. * clocksource_max_adjustment- Returns max adjustment amount
  399. * @cs: Pointer to clocksource
  400. *
  401. */
  402. static u32 clocksource_max_adjustment(struct clocksource *cs)
  403. {
  404. u64 ret;
  405. /*
  406. * We won't try to correct for more than 11% adjustments (110,000 ppm),
  407. */
  408. ret = (u64)cs->mult * 11;
  409. do_div(ret,100);
  410. return (u32)ret;
  411. }
  412. /**
  413. * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
  414. * @mult: cycle to nanosecond multiplier
  415. * @shift: cycle to nanosecond divisor (power of two)
  416. * @maxadj: maximum adjustment value to mult (~11%)
  417. * @mask: bitmask for two's complement subtraction of non 64 bit counters
  418. * @max_cyc: maximum cycle value before potential overflow (does not include
  419. * any safety margin)
  420. *
  421. * NOTE: This function includes a safety margin of 50%, in other words, we
  422. * return half the number of nanoseconds the hardware counter can technically
  423. * cover. This is done so that we can potentially detect problems caused by
  424. * delayed timers or bad hardware, which might result in time intervals that
  425. * are larger then what the math used can handle without overflows.
  426. */
  427. u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
  428. {
  429. u64 max_nsecs, max_cycles;
  430. /*
  431. * Calculate the maximum number of cycles that we can pass to the
  432. * cyc2ns() function without overflowing a 64-bit result.
  433. */
  434. max_cycles = ULLONG_MAX;
  435. do_div(max_cycles, mult+maxadj);
  436. /*
  437. * The actual maximum number of cycles we can defer the clocksource is
  438. * determined by the minimum of max_cycles and mask.
  439. * Note: Here we subtract the maxadj to make sure we don't sleep for
  440. * too long if there's a large negative adjustment.
  441. */
  442. max_cycles = min(max_cycles, mask);
  443. max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
  444. /* return the max_cycles value as well if requested */
  445. if (max_cyc)
  446. *max_cyc = max_cycles;
  447. /* Return 50% of the actual maximum, so we can detect bad values */
  448. max_nsecs >>= 1;
  449. return max_nsecs;
  450. }
  451. /**
  452. * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
  453. * @cs: Pointer to clocksource to be updated
  454. *
  455. */
  456. static inline void clocksource_update_max_deferment(struct clocksource *cs)
  457. {
  458. cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
  459. cs->maxadj, cs->mask,
  460. &cs->max_cycles);
  461. }
  462. #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
  463. static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
  464. {
  465. struct clocksource *cs;
  466. if (!finished_booting || list_empty(&clocksource_list))
  467. return NULL;
  468. /*
  469. * We pick the clocksource with the highest rating. If oneshot
  470. * mode is active, we pick the highres valid clocksource with
  471. * the best rating.
  472. */
  473. list_for_each_entry(cs, &clocksource_list, list) {
  474. if (skipcur && cs == curr_clocksource)
  475. continue;
  476. if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
  477. continue;
  478. return cs;
  479. }
  480. return NULL;
  481. }
  482. static void __clocksource_select(bool skipcur)
  483. {
  484. bool oneshot = tick_oneshot_mode_active();
  485. struct clocksource *best, *cs;
  486. /* Find the best suitable clocksource */
  487. best = clocksource_find_best(oneshot, skipcur);
  488. if (!best)
  489. return;
  490. /* Check for the override clocksource. */
  491. list_for_each_entry(cs, &clocksource_list, list) {
  492. if (skipcur && cs == curr_clocksource)
  493. continue;
  494. if (strcmp(cs->name, override_name) != 0)
  495. continue;
  496. /*
  497. * Check to make sure we don't switch to a non-highres
  498. * capable clocksource if the tick code is in oneshot
  499. * mode (highres or nohz)
  500. */
  501. if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
  502. /* Override clocksource cannot be used. */
  503. pr_warn("Override clocksource %s is not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
  504. cs->name);
  505. override_name[0] = 0;
  506. } else
  507. /* Override clocksource can be used. */
  508. best = cs;
  509. break;
  510. }
  511. if (curr_clocksource != best && !timekeeping_notify(best)) {
  512. pr_info("Switched to clocksource %s\n", best->name);
  513. curr_clocksource = best;
  514. }
  515. }
  516. /**
  517. * clocksource_select - Select the best clocksource available
  518. *
  519. * Private function. Must hold clocksource_mutex when called.
  520. *
  521. * Select the clocksource with the best rating, or the clocksource,
  522. * which is selected by userspace override.
  523. */
  524. static void clocksource_select(void)
  525. {
  526. return __clocksource_select(false);
  527. }
  528. static void clocksource_select_fallback(void)
  529. {
  530. return __clocksource_select(true);
  531. }
  532. #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
  533. static inline void clocksource_select(void) { }
  534. static inline void clocksource_select_fallback(void) { }
  535. #endif
  536. /*
  537. * clocksource_done_booting - Called near the end of core bootup
  538. *
  539. * Hack to avoid lots of clocksource churn at boot time.
  540. * We use fs_initcall because we want this to start before
  541. * device_initcall but after subsys_initcall.
  542. */
  543. static int __init clocksource_done_booting(void)
  544. {
  545. mutex_lock(&clocksource_mutex);
  546. curr_clocksource = clocksource_default_clock();
  547. finished_booting = 1;
  548. /*
  549. * Run the watchdog first to eliminate unstable clock sources
  550. */
  551. __clocksource_watchdog_kthread();
  552. clocksource_select();
  553. mutex_unlock(&clocksource_mutex);
  554. return 0;
  555. }
  556. fs_initcall(clocksource_done_booting);
  557. /*
  558. * Enqueue the clocksource sorted by rating
  559. */
  560. static void clocksource_enqueue(struct clocksource *cs)
  561. {
  562. struct list_head *entry = &clocksource_list;
  563. struct clocksource *tmp;
  564. list_for_each_entry(tmp, &clocksource_list, list)
  565. /* Keep track of the place, where to insert */
  566. if (tmp->rating >= cs->rating)
  567. entry = &tmp->list;
  568. list_add(&cs->list, entry);
  569. }
  570. /**
  571. * __clocksource_update_freq_scale - Used update clocksource with new freq
  572. * @cs: clocksource to be registered
  573. * @scale: Scale factor multiplied against freq to get clocksource hz
  574. * @freq: clocksource frequency (cycles per second) divided by scale
  575. *
  576. * This should only be called from the clocksource->enable() method.
  577. *
  578. * This *SHOULD NOT* be called directly! Please use the
  579. * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
  580. * functions.
  581. */
  582. void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
  583. {
  584. u64 sec;
  585. /*
  586. * Default clocksources are *special* and self-define their mult/shift.
  587. * But, you're not special, so you should specify a freq value.
  588. */
  589. if (freq) {
  590. /*
  591. * Calc the maximum number of seconds which we can run before
  592. * wrapping around. For clocksources which have a mask > 32-bit
  593. * we need to limit the max sleep time to have a good
  594. * conversion precision. 10 minutes is still a reasonable
  595. * amount. That results in a shift value of 24 for a
  596. * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
  597. * ~ 0.06ppm granularity for NTP.
  598. */
  599. sec = cs->mask;
  600. do_div(sec, freq);
  601. do_div(sec, scale);
  602. if (!sec)
  603. sec = 1;
  604. else if (sec > 600 && cs->mask > UINT_MAX)
  605. sec = 600;
  606. clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
  607. NSEC_PER_SEC / scale, sec * scale);
  608. }
  609. /*
  610. * Ensure clocksources that have large 'mult' values don't overflow
  611. * when adjusted.
  612. */
  613. cs->maxadj = clocksource_max_adjustment(cs);
  614. while (freq && ((cs->mult + cs->maxadj < cs->mult)
  615. || (cs->mult - cs->maxadj > cs->mult))) {
  616. cs->mult >>= 1;
  617. cs->shift--;
  618. cs->maxadj = clocksource_max_adjustment(cs);
  619. }
  620. /*
  621. * Only warn for *special* clocksources that self-define
  622. * their mult/shift values and don't specify a freq.
  623. */
  624. WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
  625. "timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
  626. cs->name);
  627. clocksource_update_max_deferment(cs);
  628. pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
  629. cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
  630. }
  631. EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
  632. /**
  633. * __clocksource_register_scale - Used to install new clocksources
  634. * @cs: clocksource to be registered
  635. * @scale: Scale factor multiplied against freq to get clocksource hz
  636. * @freq: clocksource frequency (cycles per second) divided by scale
  637. *
  638. * Returns -EBUSY if registration fails, zero otherwise.
  639. *
  640. * This *SHOULD NOT* be called directly! Please use the
  641. * clocksource_register_hz() or clocksource_register_khz helper functions.
  642. */
  643. int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
  644. {
  645. /* Initialize mult/shift and max_idle_ns */
  646. __clocksource_update_freq_scale(cs, scale, freq);
  647. /* Add clocksource to the clocksource list */
  648. mutex_lock(&clocksource_mutex);
  649. clocksource_enqueue(cs);
  650. clocksource_enqueue_watchdog(cs);
  651. clocksource_select();
  652. mutex_unlock(&clocksource_mutex);
  653. return 0;
  654. }
  655. EXPORT_SYMBOL_GPL(__clocksource_register_scale);
  656. static void __clocksource_change_rating(struct clocksource *cs, int rating)
  657. {
  658. list_del(&cs->list);
  659. cs->rating = rating;
  660. clocksource_enqueue(cs);
  661. }
  662. /**
  663. * clocksource_change_rating - Change the rating of a registered clocksource
  664. * @cs: clocksource to be changed
  665. * @rating: new rating
  666. */
  667. void clocksource_change_rating(struct clocksource *cs, int rating)
  668. {
  669. mutex_lock(&clocksource_mutex);
  670. __clocksource_change_rating(cs, rating);
  671. clocksource_select();
  672. mutex_unlock(&clocksource_mutex);
  673. }
  674. EXPORT_SYMBOL(clocksource_change_rating);
  675. /*
  676. * Unbind clocksource @cs. Called with clocksource_mutex held
  677. */
  678. static int clocksource_unbind(struct clocksource *cs)
  679. {
  680. /*
  681. * I really can't convince myself to support this on hardware
  682. * designed by lobotomized monkeys.
  683. */
  684. if (clocksource_is_watchdog(cs))
  685. return -EBUSY;
  686. if (cs == curr_clocksource) {
  687. /* Select and try to install a replacement clock source */
  688. clocksource_select_fallback();
  689. if (curr_clocksource == cs)
  690. return -EBUSY;
  691. }
  692. clocksource_dequeue_watchdog(cs);
  693. list_del_init(&cs->list);
  694. return 0;
  695. }
  696. /**
  697. * clocksource_unregister - remove a registered clocksource
  698. * @cs: clocksource to be unregistered
  699. */
  700. int clocksource_unregister(struct clocksource *cs)
  701. {
  702. int ret = 0;
  703. mutex_lock(&clocksource_mutex);
  704. if (!list_empty(&cs->list))
  705. ret = clocksource_unbind(cs);
  706. mutex_unlock(&clocksource_mutex);
  707. return ret;
  708. }
  709. EXPORT_SYMBOL(clocksource_unregister);
  710. #ifdef CONFIG_SYSFS
  711. /**
  712. * sysfs_show_current_clocksources - sysfs interface for current clocksource
  713. * @dev: unused
  714. * @attr: unused
  715. * @buf: char buffer to be filled with clocksource list
  716. *
  717. * Provides sysfs interface for listing current clocksource.
  718. */
  719. static ssize_t
  720. sysfs_show_current_clocksources(struct device *dev,
  721. struct device_attribute *attr, char *buf)
  722. {
  723. ssize_t count = 0;
  724. mutex_lock(&clocksource_mutex);
  725. count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
  726. mutex_unlock(&clocksource_mutex);
  727. return count;
  728. }
  729. ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
  730. {
  731. size_t ret = cnt;
  732. /* strings from sysfs write are not 0 terminated! */
  733. if (!cnt || cnt >= CS_NAME_LEN)
  734. return -EINVAL;
  735. /* strip of \n: */
  736. if (buf[cnt-1] == '\n')
  737. cnt--;
  738. if (cnt > 0)
  739. memcpy(dst, buf, cnt);
  740. dst[cnt] = 0;
  741. return ret;
  742. }
  743. /**
  744. * sysfs_override_clocksource - interface for manually overriding clocksource
  745. * @dev: unused
  746. * @attr: unused
  747. * @buf: name of override clocksource
  748. * @count: length of buffer
  749. *
  750. * Takes input from sysfs interface for manually overriding the default
  751. * clocksource selection.
  752. */
  753. static ssize_t sysfs_override_clocksource(struct device *dev,
  754. struct device_attribute *attr,
  755. const char *buf, size_t count)
  756. {
  757. ssize_t ret;
  758. mutex_lock(&clocksource_mutex);
  759. ret = sysfs_get_uname(buf, override_name, count);
  760. if (ret >= 0)
  761. clocksource_select();
  762. mutex_unlock(&clocksource_mutex);
  763. return ret;
  764. }
  765. /**
  766. * sysfs_unbind_current_clocksource - interface for manually unbinding clocksource
  767. * @dev: unused
  768. * @attr: unused
  769. * @buf: unused
  770. * @count: length of buffer
  771. *
  772. * Takes input from sysfs interface for manually unbinding a clocksource.
  773. */
  774. static ssize_t sysfs_unbind_clocksource(struct device *dev,
  775. struct device_attribute *attr,
  776. const char *buf, size_t count)
  777. {
  778. struct clocksource *cs;
  779. char name[CS_NAME_LEN];
  780. ssize_t ret;
  781. ret = sysfs_get_uname(buf, name, count);
  782. if (ret < 0)
  783. return ret;
  784. ret = -ENODEV;
  785. mutex_lock(&clocksource_mutex);
  786. list_for_each_entry(cs, &clocksource_list, list) {
  787. if (strcmp(cs->name, name))
  788. continue;
  789. ret = clocksource_unbind(cs);
  790. break;
  791. }
  792. mutex_unlock(&clocksource_mutex);
  793. return ret ? ret : count;
  794. }
  795. /**
  796. * sysfs_show_available_clocksources - sysfs interface for listing clocksource
  797. * @dev: unused
  798. * @attr: unused
  799. * @buf: char buffer to be filled with clocksource list
  800. *
  801. * Provides sysfs interface for listing registered clocksources
  802. */
  803. static ssize_t
  804. sysfs_show_available_clocksources(struct device *dev,
  805. struct device_attribute *attr,
  806. char *buf)
  807. {
  808. struct clocksource *src;
  809. ssize_t count = 0;
  810. mutex_lock(&clocksource_mutex);
  811. list_for_each_entry(src, &clocksource_list, list) {
  812. /*
  813. * Don't show non-HRES clocksource if the tick code is
  814. * in one shot mode (highres=on or nohz=on)
  815. */
  816. if (!tick_oneshot_mode_active() ||
  817. (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
  818. count += snprintf(buf + count,
  819. max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
  820. "%s ", src->name);
  821. }
  822. mutex_unlock(&clocksource_mutex);
  823. count += snprintf(buf + count,
  824. max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
  825. return count;
  826. }
  827. /*
  828. * Sysfs setup bits:
  829. */
  830. static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
  831. sysfs_override_clocksource);
  832. static DEVICE_ATTR(unbind_clocksource, 0200, NULL, sysfs_unbind_clocksource);
  833. static DEVICE_ATTR(available_clocksource, 0444,
  834. sysfs_show_available_clocksources, NULL);
  835. static struct bus_type clocksource_subsys = {
  836. .name = "clocksource",
  837. .dev_name = "clocksource",
  838. };
  839. static struct device device_clocksource = {
  840. .id = 0,
  841. .bus = &clocksource_subsys,
  842. };
  843. static int __init init_clocksource_sysfs(void)
  844. {
  845. int error = subsys_system_register(&clocksource_subsys, NULL);
  846. if (!error)
  847. error = device_register(&device_clocksource);
  848. if (!error)
  849. error = device_create_file(
  850. &device_clocksource,
  851. &dev_attr_current_clocksource);
  852. if (!error)
  853. error = device_create_file(&device_clocksource,
  854. &dev_attr_unbind_clocksource);
  855. if (!error)
  856. error = device_create_file(
  857. &device_clocksource,
  858. &dev_attr_available_clocksource);
  859. return error;
  860. }
  861. device_initcall(init_clocksource_sysfs);
  862. #endif /* CONFIG_SYSFS */
  863. /**
  864. * boot_override_clocksource - boot clock override
  865. * @str: override name
  866. *
  867. * Takes a clocksource= boot argument and uses it
  868. * as the clocksource override name.
  869. */
  870. static int __init boot_override_clocksource(char* str)
  871. {
  872. mutex_lock(&clocksource_mutex);
  873. if (str)
  874. strlcpy(override_name, str, sizeof(override_name));
  875. mutex_unlock(&clocksource_mutex);
  876. return 1;
  877. }
  878. __setup("clocksource=", boot_override_clocksource);
  879. /**
  880. * boot_override_clock - Compatibility layer for deprecated boot option
  881. * @str: override name
  882. *
  883. * DEPRECATED! Takes a clock= boot argument and uses it
  884. * as the clocksource override name
  885. */
  886. static int __init boot_override_clock(char* str)
  887. {
  888. if (!strcmp(str, "pmtmr")) {
  889. pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
  890. return boot_override_clocksource("acpi_pm");
  891. }
  892. pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
  893. return boot_override_clocksource(str);
  894. }
  895. __setup("clock=", boot_override_clock);