kern_time.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794
  1. /* $OpenBSD: kern_time.c,v 1.91 2015/07/19 02:35:35 deraadt Exp $ */
  2. /* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */
  3. /*
  4. * Copyright (c) 1982, 1986, 1989, 1993
  5. * The Regents of the University of California. All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of the University nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  20. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  21. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  22. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  23. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  24. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  25. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  26. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  27. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  28. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  29. * SUCH DAMAGE.
  30. *
  31. * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
  32. */
  33. #include <sys/param.h>
  34. #include <sys/resourcevar.h>
  35. #include <sys/kernel.h>
  36. #include <sys/systm.h>
  37. #include <sys/proc.h>
  38. #include <sys/ktrace.h>
  39. #include <sys/vnode.h>
  40. #include <sys/signalvar.h>
  41. #include <sys/tame.h>
  42. #include <sys/timetc.h>
  43. #include <sys/mount.h>
  44. #include <sys/syscallargs.h>
  45. int64_t adjtimedelta; /* unapplied time correction (microseconds) */
  46. /*
  47. * Time of day and interval timer support.
  48. *
  49. * These routines provide the kernel entry points to get and set
  50. * the time-of-day and per-process interval timers. Subroutines
  51. * here provide support for adding and subtracting timeval structures
  52. * and decrementing interval timers, optionally reloading the interval
  53. * timers when they expire.
  54. */
  55. /* This function is used by clock_settime and settimeofday */
  56. int
  57. settime(struct timespec *ts)
  58. {
  59. struct timespec now;
  60. /*
  61. * Adjtime in progress is meaningless or harmful after
  62. * setting the clock. Cancel adjtime and then set new time.
  63. */
  64. adjtimedelta = 0;
  65. /*
  66. * Don't allow the time to be set forward so far it will wrap
  67. * and become negative, thus allowing an attacker to bypass
  68. * the next check below. The cutoff is 1 year before rollover
  69. * occurs, so even if the attacker uses adjtime(2) to move
  70. * the time past the cutoff, it will take a very long time
  71. * to get to the wrap point.
  72. *
  73. * XXX: we check against UINT_MAX until we can figure out
  74. * how to deal with the hardware RTCs.
  75. */
  76. if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
  77. printf("denied attempt to set clock forward to %lld\n",
  78. (long long)ts->tv_sec);
  79. return (EPERM);
  80. }
  81. /*
  82. * If the system is secure, we do not allow the time to be
  83. * set to an earlier value (it may be slowed using adjtime,
  84. * but not set back). This feature prevent interlopers from
  85. * setting arbitrary time stamps on files.
  86. */
  87. nanotime(&now);
  88. if (securelevel > 1 && timespeccmp(ts, &now, <)) {
  89. printf("denied attempt to set clock back %lld seconds\n",
  90. (long long)now.tv_sec - ts->tv_sec);
  91. return (EPERM);
  92. }
  93. tc_setrealtimeclock(ts);
  94. resettodr();
  95. return (0);
  96. }
  97. int
  98. clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
  99. {
  100. struct bintime bt;
  101. struct proc *q;
  102. switch (clock_id) {
  103. case CLOCK_REALTIME:
  104. nanotime(tp);
  105. break;
  106. case CLOCK_UPTIME:
  107. binuptime(&bt);
  108. bintime_sub(&bt, &naptime);
  109. bintime2timespec(&bt, tp);
  110. break;
  111. case CLOCK_MONOTONIC:
  112. nanouptime(tp);
  113. break;
  114. case CLOCK_PROCESS_CPUTIME_ID:
  115. nanouptime(tp);
  116. timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
  117. timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
  118. timespecadd(tp, &p->p_rtime, tp);
  119. break;
  120. case CLOCK_THREAD_CPUTIME_ID:
  121. nanouptime(tp);
  122. timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
  123. timespecadd(tp, &p->p_tu.tu_runtime, tp);
  124. timespecadd(tp, &p->p_rtime, tp);
  125. break;
  126. default:
  127. /* check for clock from pthread_getcpuclockid() */
  128. if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
  129. q = pfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
  130. if (q == NULL || q->p_p != p->p_p)
  131. return (ESRCH);
  132. *tp = q->p_tu.tu_runtime;
  133. } else
  134. return (EINVAL);
  135. }
  136. return (0);
  137. }
  138. /* ARGSUSED */
  139. int
  140. sys_clock_gettime(struct proc *p, void *v, register_t *retval)
  141. {
  142. struct sys_clock_gettime_args /* {
  143. syscallarg(clockid_t) clock_id;
  144. syscallarg(struct timespec *) tp;
  145. } */ *uap = v;
  146. struct timespec ats;
  147. int error;
  148. memset(&ats, 0, sizeof(ats));
  149. if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
  150. return (error);
  151. error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
  152. #ifdef KTRACE
  153. if (error == 0 && KTRPOINT(p, KTR_STRUCT)) {
  154. KERNEL_LOCK();
  155. ktrabstimespec(p, &ats);
  156. KERNEL_UNLOCK();
  157. }
  158. #endif
  159. return (error);
  160. }
  161. /* ARGSUSED */
  162. int
  163. sys_clock_settime(struct proc *p, void *v, register_t *retval)
  164. {
  165. struct sys_clock_settime_args /* {
  166. syscallarg(clockid_t) clock_id;
  167. syscallarg(const struct timespec *) tp;
  168. } */ *uap = v;
  169. struct timespec ats;
  170. clockid_t clock_id;
  171. int error;
  172. if ((error = suser(p, 0)) != 0)
  173. return (error);
  174. if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
  175. return (error);
  176. clock_id = SCARG(uap, clock_id);
  177. switch (clock_id) {
  178. case CLOCK_REALTIME:
  179. if ((error = settime(&ats)) != 0)
  180. return (error);
  181. break;
  182. default: /* Other clocks are read-only */
  183. return (EINVAL);
  184. }
  185. return (0);
  186. }
  187. int
  188. sys_clock_getres(struct proc *p, void *v, register_t *retval)
  189. {
  190. struct sys_clock_getres_args /* {
  191. syscallarg(clockid_t) clock_id;
  192. syscallarg(struct timespec *) tp;
  193. } */ *uap = v;
  194. clockid_t clock_id;
  195. struct timespec ts;
  196. struct proc *q;
  197. int error = 0;
  198. memset(&ts, 0, sizeof(ts));
  199. clock_id = SCARG(uap, clock_id);
  200. switch (clock_id) {
  201. case CLOCK_REALTIME:
  202. case CLOCK_MONOTONIC:
  203. case CLOCK_UPTIME:
  204. case CLOCK_PROCESS_CPUTIME_ID:
  205. case CLOCK_THREAD_CPUTIME_ID:
  206. ts.tv_sec = 0;
  207. ts.tv_nsec = 1000000000 / hz;
  208. break;
  209. default:
  210. /* check for clock from pthread_getcpuclockid() */
  211. if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
  212. q = pfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
  213. if (q == NULL || q->p_p != p->p_p)
  214. return (ESRCH);
  215. ts.tv_sec = 0;
  216. ts.tv_nsec = 1000000000 / hz;
  217. } else
  218. return (EINVAL);
  219. }
  220. if (SCARG(uap, tp)) {
  221. error = copyout(&ts, SCARG(uap, tp), sizeof (ts));
  222. #ifdef KTRACE
  223. if (error == 0 && KTRPOINT(p, KTR_STRUCT)) {
  224. KERNEL_LOCK();
  225. ktrreltimespec(p, &ts);
  226. KERNEL_UNLOCK();
  227. }
  228. #endif
  229. }
  230. return error;
  231. }
  232. /* ARGSUSED */
  233. int
  234. sys_nanosleep(struct proc *p, void *v, register_t *retval)
  235. {
  236. static int nanowait;
  237. struct sys_nanosleep_args/* {
  238. syscallarg(const struct timespec *) rqtp;
  239. syscallarg(struct timespec *) rmtp;
  240. } */ *uap = v;
  241. struct timespec rqt, rmt;
  242. struct timespec sts, ets;
  243. struct timespec *rmtp;
  244. struct timeval tv;
  245. int error, error1;
  246. rmtp = SCARG(uap, rmtp);
  247. error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
  248. if (error)
  249. return (error);
  250. #ifdef KTRACE
  251. if (KTRPOINT(p, KTR_STRUCT)) {
  252. KERNEL_LOCK();
  253. ktrreltimespec(p, &rqt);
  254. KERNEL_UNLOCK();
  255. }
  256. #endif
  257. TIMESPEC_TO_TIMEVAL(&tv, &rqt);
  258. if (itimerfix(&tv))
  259. return (EINVAL);
  260. if (rmtp)
  261. getnanouptime(&sts);
  262. error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep",
  263. MAX(1, tvtohz(&tv)));
  264. if (error == ERESTART)
  265. error = EINTR;
  266. if (error == EWOULDBLOCK)
  267. error = 0;
  268. if (rmtp) {
  269. getnanouptime(&ets);
  270. memset(&rmt, 0, sizeof(rmt));
  271. timespecsub(&ets, &sts, &sts);
  272. timespecsub(&rqt, &sts, &rmt);
  273. if (rmt.tv_sec < 0)
  274. timespecclear(&rmt);
  275. error1 = copyout(&rmt, rmtp, sizeof(rmt));
  276. if (error1 != 0)
  277. error = error1;
  278. #ifdef KTRACE
  279. if (error1 == 0 && KTRPOINT(p, KTR_STRUCT)) {
  280. KERNEL_LOCK();
  281. ktrreltimespec(p, &rmt);
  282. KERNEL_UNLOCK();
  283. }
  284. #endif
  285. }
  286. return error;
  287. }
  288. /* ARGSUSED */
  289. int
  290. sys_gettimeofday(struct proc *p, void *v, register_t *retval)
  291. {
  292. struct sys_gettimeofday_args /* {
  293. syscallarg(struct timeval *) tp;
  294. syscallarg(struct timezone *) tzp;
  295. } */ *uap = v;
  296. struct timeval atv;
  297. struct timeval *tp;
  298. struct timezone *tzp;
  299. int error = 0;
  300. tp = SCARG(uap, tp);
  301. tzp = SCARG(uap, tzp);
  302. if (tp) {
  303. memset(&atv, 0, sizeof(atv));
  304. microtime(&atv);
  305. if ((error = copyout(&atv, tp, sizeof (atv))))
  306. return (error);
  307. #ifdef KTRACE
  308. if (KTRPOINT(p, KTR_STRUCT)) {
  309. KERNEL_LOCK();
  310. ktrabstimeval(p, &atv);
  311. KERNEL_UNLOCK();
  312. }
  313. #endif
  314. }
  315. if (tzp)
  316. error = copyout(&tz, tzp, sizeof (tz));
  317. return (error);
  318. }
  319. /* ARGSUSED */
  320. int
  321. sys_settimeofday(struct proc *p, void *v, register_t *retval)
  322. {
  323. struct sys_settimeofday_args /* {
  324. syscallarg(const struct timeval *) tv;
  325. syscallarg(const struct timezone *) tzp;
  326. } */ *uap = v;
  327. struct timezone atz;
  328. struct timeval atv;
  329. const struct timeval *tv;
  330. const struct timezone *tzp;
  331. int error;
  332. tv = SCARG(uap, tv);
  333. tzp = SCARG(uap, tzp);
  334. if ((error = suser(p, 0)))
  335. return (error);
  336. /* Verify all parameters before changing time. */
  337. if (tv && (error = copyin(tv, &atv, sizeof(atv))))
  338. return (error);
  339. if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
  340. return (error);
  341. if (tv) {
  342. struct timespec ts;
  343. TIMEVAL_TO_TIMESPEC(&atv, &ts);
  344. if ((error = settime(&ts)) != 0)
  345. return (error);
  346. }
  347. if (tzp)
  348. tz = atz;
  349. return (0);
  350. }
  351. /* ARGSUSED */
  352. int
  353. sys_adjfreq(struct proc *p, void *v, register_t *retval)
  354. {
  355. struct sys_adjfreq_args /* {
  356. syscallarg(const int64_t *) freq;
  357. syscallarg(int64_t *) oldfreq;
  358. } */ *uap = v;
  359. int error;
  360. int64_t f;
  361. const int64_t *freq = SCARG(uap, freq);
  362. int64_t *oldfreq = SCARG(uap, oldfreq);
  363. if (oldfreq) {
  364. if ((error = tc_adjfreq(&f, NULL)))
  365. return (error);
  366. if ((error = copyout(&f, oldfreq, sizeof(f))))
  367. return (error);
  368. }
  369. if (freq) {
  370. if ((error = suser(p, 0)))
  371. return (error);
  372. if ((error = copyin(freq, &f, sizeof(f))))
  373. return (error);
  374. if ((error = tc_adjfreq(NULL, &f)))
  375. return (error);
  376. }
  377. return (0);
  378. }
  379. /* ARGSUSED */
  380. int
  381. sys_adjtime(struct proc *p, void *v, register_t *retval)
  382. {
  383. struct sys_adjtime_args /* {
  384. syscallarg(const struct timeval *) delta;
  385. syscallarg(struct timeval *) olddelta;
  386. } */ *uap = v;
  387. const struct timeval *delta = SCARG(uap, delta);
  388. struct timeval *olddelta = SCARG(uap, olddelta);
  389. struct timeval atv;
  390. int error;
  391. if (tame_adjtime_check(p, delta))
  392. return (EPERM);
  393. if (olddelta) {
  394. memset(&atv, 0, sizeof(atv));
  395. atv.tv_sec = adjtimedelta / 1000000;
  396. atv.tv_usec = adjtimedelta % 1000000;
  397. if (atv.tv_usec < 0) {
  398. atv.tv_usec += 1000000;
  399. atv.tv_sec--;
  400. }
  401. if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
  402. return (error);
  403. }
  404. if (delta) {
  405. if ((error = suser(p, 0)))
  406. return (error);
  407. if ((error = copyin(delta, &atv, sizeof(struct timeval))))
  408. return (error);
  409. /* XXX Check for overflow? */
  410. adjtimedelta = (int64_t)atv.tv_sec * 1000000 + atv.tv_usec;
  411. }
  412. return (0);
  413. }
  414. struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
  415. /*
  416. * Get value of an interval timer. The process virtual and
  417. * profiling virtual time timers are kept internally in the
  418. * way they are specified externally: in time until they expire.
  419. *
  420. * The real time interval timer's it_value, in contrast, is kept as an
  421. * absolute time rather than as a delta, so that it is easy to keep
  422. * periodic real-time signals from drifting.
  423. *
  424. * Virtual time timers are processed in the hardclock() routine of
  425. * kern_clock.c. The real time timer is processed by a timeout
  426. * routine, called from the softclock() routine. Since a callout
  427. * may be delayed in real time due to interrupt processing in the system,
  428. * it is possible for the real time timeout routine (realitexpire, given below),
  429. * to be delayed in real time past when it is supposed to occur. It
  430. * does not suffice, therefore, to reload the real timer .it_value from the
  431. * real time timers .it_interval. Rather, we compute the next time in
  432. * absolute time the timer should go off.
  433. */
  434. /* ARGSUSED */
  435. int
  436. sys_getitimer(struct proc *p, void *v, register_t *retval)
  437. {
  438. struct sys_getitimer_args /* {
  439. syscallarg(int) which;
  440. syscallarg(struct itimerval *) itv;
  441. } */ *uap = v;
  442. struct itimerval aitv;
  443. int which;
  444. which = SCARG(uap, which);
  445. if (which < ITIMER_REAL || which > ITIMER_PROF)
  446. return (EINVAL);
  447. memset(&aitv, 0, sizeof(aitv));
  448. mtx_enter(&itimer_mtx);
  449. aitv.it_interval.tv_sec = p->p_p->ps_timer[which].it_interval.tv_sec;
  450. aitv.it_interval.tv_usec = p->p_p->ps_timer[which].it_interval.tv_usec;
  451. aitv.it_value.tv_sec = p->p_p->ps_timer[which].it_value.tv_sec;
  452. aitv.it_value.tv_usec = p->p_p->ps_timer[which].it_value.tv_usec;
  453. mtx_leave(&itimer_mtx);
  454. if (which == ITIMER_REAL) {
  455. struct timeval now;
  456. getmicrouptime(&now);
  457. /*
  458. * Convert from absolute to relative time in .it_value
  459. * part of real time timer. If time for real time timer
  460. * has passed return 0, else return difference between
  461. * current time and time for the timer to go off.
  462. */
  463. if (timerisset(&aitv.it_value)) {
  464. if (timercmp(&aitv.it_value, &now, <))
  465. timerclear(&aitv.it_value);
  466. else
  467. timersub(&aitv.it_value, &now,
  468. &aitv.it_value);
  469. }
  470. }
  471. return (copyout(&aitv, SCARG(uap, itv), sizeof (struct itimerval)));
  472. }
  473. /* ARGSUSED */
  474. int
  475. sys_setitimer(struct proc *p, void *v, register_t *retval)
  476. {
  477. struct sys_setitimer_args /* {
  478. syscallarg(int) which;
  479. syscallarg(const struct itimerval *) itv;
  480. syscallarg(struct itimerval *) oitv;
  481. } */ *uap = v;
  482. struct sys_getitimer_args getargs;
  483. struct itimerval aitv;
  484. const struct itimerval *itvp;
  485. struct itimerval *oitv;
  486. struct process *pr = p->p_p;
  487. int error;
  488. int timo;
  489. int which;
  490. which = SCARG(uap, which);
  491. oitv = SCARG(uap, oitv);
  492. if (which < ITIMER_REAL || which > ITIMER_PROF)
  493. return (EINVAL);
  494. itvp = SCARG(uap, itv);
  495. if (itvp && (error = copyin((void *)itvp, (void *)&aitv,
  496. sizeof(struct itimerval))))
  497. return (error);
  498. if (oitv != NULL) {
  499. SCARG(&getargs, which) = which;
  500. SCARG(&getargs, itv) = oitv;
  501. if ((error = sys_getitimer(p, &getargs, retval)))
  502. return (error);
  503. }
  504. if (itvp == 0)
  505. return (0);
  506. if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
  507. return (EINVAL);
  508. if (which == ITIMER_REAL) {
  509. struct timeval ctv;
  510. timeout_del(&pr->ps_realit_to);
  511. getmicrouptime(&ctv);
  512. if (timerisset(&aitv.it_value)) {
  513. timo = tvtohz(&aitv.it_value);
  514. timeout_add(&pr->ps_realit_to, timo);
  515. timeradd(&aitv.it_value, &ctv, &aitv.it_value);
  516. }
  517. pr->ps_timer[ITIMER_REAL] = aitv;
  518. } else {
  519. itimerround(&aitv.it_interval);
  520. mtx_enter(&itimer_mtx);
  521. pr->ps_timer[which] = aitv;
  522. mtx_leave(&itimer_mtx);
  523. }
  524. return (0);
  525. }
  526. /*
  527. * Real interval timer expired:
  528. * send process whose timer expired an alarm signal.
  529. * If time is not set up to reload, then just return.
  530. * Else compute next time timer should go off which is > current time.
  531. * This is where delay in processing this timeout causes multiple
  532. * SIGALRM calls to be compressed into one.
  533. */
  534. void
  535. realitexpire(void *arg)
  536. {
  537. struct process *pr = arg;
  538. struct itimerval *tp = &pr->ps_timer[ITIMER_REAL];
  539. prsignal(pr, SIGALRM);
  540. if (!timerisset(&tp->it_interval)) {
  541. timerclear(&tp->it_value);
  542. return;
  543. }
  544. for (;;) {
  545. struct timeval ctv, ntv;
  546. int timo;
  547. timeradd(&tp->it_value, &tp->it_interval, &tp->it_value);
  548. getmicrouptime(&ctv);
  549. if (timercmp(&tp->it_value, &ctv, >)) {
  550. ntv = tp->it_value;
  551. timersub(&ntv, &ctv, &ntv);
  552. timo = tvtohz(&ntv) - 1;
  553. if (timo <= 0)
  554. timo = 1;
  555. if ((pr->ps_flags & PS_EXITING) == 0)
  556. timeout_add(&pr->ps_realit_to, timo);
  557. return;
  558. }
  559. }
  560. }
  561. /*
  562. * Check that a timespec value is legit
  563. */
  564. int
  565. timespecfix(struct timespec *ts)
  566. {
  567. if (ts->tv_sec < 0 || ts->tv_sec > 100000000 ||
  568. ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
  569. return (EINVAL);
  570. return (0);
  571. }
  572. /*
  573. * Check that a proposed value to load into the .it_value or
  574. * .it_interval part of an interval timer is acceptable.
  575. */
  576. int
  577. itimerfix(struct timeval *tv)
  578. {
  579. if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
  580. tv->tv_usec < 0 || tv->tv_usec >= 1000000)
  581. return (EINVAL);
  582. if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
  583. tv->tv_usec = tick;
  584. return (0);
  585. }
  586. /*
  587. * Nonzero timer interval smaller than the resolution of the
  588. * system clock are rounded up.
  589. */
  590. void
  591. itimerround(struct timeval *tv)
  592. {
  593. if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
  594. tv->tv_usec = tick;
  595. }
  596. /*
  597. * Decrement an interval timer by a specified number
  598. * of microseconds, which must be less than a second,
  599. * i.e. < 1000000. If the timer expires, then reload
  600. * it. In this case, carry over (usec - old value) to
  601. * reduce the value reloaded into the timer so that
  602. * the timer does not drift. This routine assumes
  603. * that it is called in a context where the timers
  604. * on which it is operating cannot change in value.
  605. */
  606. int
  607. itimerdecr(struct itimerval *itp, int usec)
  608. {
  609. mtx_enter(&itimer_mtx);
  610. if (itp->it_value.tv_usec < usec) {
  611. if (itp->it_value.tv_sec == 0) {
  612. /* expired, and already in next interval */
  613. usec -= itp->it_value.tv_usec;
  614. goto expire;
  615. }
  616. itp->it_value.tv_usec += 1000000;
  617. itp->it_value.tv_sec--;
  618. }
  619. itp->it_value.tv_usec -= usec;
  620. usec = 0;
  621. if (timerisset(&itp->it_value)) {
  622. mtx_leave(&itimer_mtx);
  623. return (1);
  624. }
  625. /* expired, exactly at end of interval */
  626. expire:
  627. if (timerisset(&itp->it_interval)) {
  628. itp->it_value = itp->it_interval;
  629. itp->it_value.tv_usec -= usec;
  630. if (itp->it_value.tv_usec < 0) {
  631. itp->it_value.tv_usec += 1000000;
  632. itp->it_value.tv_sec--;
  633. }
  634. } else
  635. itp->it_value.tv_usec = 0; /* sec is already 0 */
  636. mtx_leave(&itimer_mtx);
  637. return (0);
  638. }
  639. /*
  640. * ratecheck(): simple time-based rate-limit checking. see ratecheck(9)
  641. * for usage and rationale.
  642. */
  643. int
  644. ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
  645. {
  646. struct timeval tv, delta;
  647. int rv = 0;
  648. getmicrouptime(&tv);
  649. timersub(&tv, lasttime, &delta);
  650. /*
  651. * check for 0,0 is so that the message will be seen at least once,
  652. * even if interval is huge.
  653. */
  654. if (timercmp(&delta, mininterval, >=) ||
  655. (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
  656. *lasttime = tv;
  657. rv = 1;
  658. }
  659. return (rv);
  660. }
  661. /*
  662. * ppsratecheck(): packets (or events) per second limitation.
  663. */
  664. int
  665. ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
  666. {
  667. struct timeval tv, delta;
  668. int rv;
  669. microuptime(&tv);
  670. timersub(&tv, lasttime, &delta);
  671. /*
  672. * check for 0,0 is so that the message will be seen at least once.
  673. * if more than one second have passed since the last update of
  674. * lasttime, reset the counter.
  675. *
  676. * we do increment *curpps even in *curpps < maxpps case, as some may
  677. * try to use *curpps for stat purposes as well.
  678. */
  679. if (maxpps == 0)
  680. rv = 0;
  681. else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
  682. delta.tv_sec >= 1) {
  683. *lasttime = tv;
  684. *curpps = 0;
  685. rv = 1;
  686. } else if (maxpps < 0)
  687. rv = 1;
  688. else if (*curpps < maxpps)
  689. rv = 1;
  690. else
  691. rv = 0;
  692. #if 1 /*DIAGNOSTIC?*/
  693. /* be careful about wrap-around */
  694. if (*curpps + 1 > *curpps)
  695. *curpps = *curpps + 1;
  696. #else
  697. /*
  698. * assume that there's not too many calls to this function.
  699. * not sure if the assumption holds, as it depends on *caller's*
  700. * behavior, not the behavior of this function.
  701. * IMHO it is wrong to make assumption on the caller's behavior,
  702. * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
  703. */
  704. *curpps = *curpps + 1;
  705. #endif
  706. return (rv);
  707. }