lock.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222
  1. /* Locking in multithreaded situations.
  2. Copyright (C) 2005-2017 Free Software Foundation, Inc.
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 3, or (at your option)
  6. any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, see <http://www.gnu.org/licenses/>. */
  13. /* Written by Bruno Haible <bruno@clisp.org>, 2005.
  14. Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
  15. gthr-win32.h. */
  16. #include <config.h>
  17. #include "glthread/lock.h"
  18. /* ========================================================================= */
  19. #if USE_POSIX_THREADS
  20. /* -------------------------- gl_lock_t datatype -------------------------- */
  21. /* ------------------------- gl_rwlock_t datatype ------------------------- */
  22. # if HAVE_PTHREAD_RWLOCK && (HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER || (defined PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP && (__GNU_LIBRARY__ > 1)))
  23. # ifdef PTHREAD_RWLOCK_INITIALIZER
  24. # if !HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER
  25. /* glibc with bug https://sourceware.org/bugzilla/show_bug.cgi?id=13701 */
  26. int
  27. glthread_rwlock_init_for_glibc (pthread_rwlock_t *lock)
  28. {
  29. pthread_rwlockattr_t attributes;
  30. int err;
  31. err = pthread_rwlockattr_init (&attributes);
  32. if (err != 0)
  33. return err;
  34. /* Note: PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP is the only value that
  35. causes the writer to be preferred. PTHREAD_RWLOCK_PREFER_WRITER_NP does not
  36. do this; see
  37. http://man7.org/linux/man-pages/man3/pthread_rwlockattr_setkind_np.3.html */
  38. err = pthread_rwlockattr_setkind_np (&attributes,
  39. PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
  40. if (err == 0)
  41. err = pthread_rwlock_init(lock, &attributes);
  42. /* pthread_rwlockattr_destroy always returns 0. It cannot influence the
  43. return value. */
  44. pthread_rwlockattr_destroy (&attributes);
  45. return err;
  46. }
  47. # endif
  48. # else
  49. int
  50. glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
  51. {
  52. int err;
  53. err = pthread_rwlock_init (&lock->rwlock, NULL);
  54. if (err != 0)
  55. return err;
  56. lock->initialized = 1;
  57. return 0;
  58. }
  59. int
  60. glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
  61. {
  62. if (!lock->initialized)
  63. {
  64. int err;
  65. err = pthread_mutex_lock (&lock->guard);
  66. if (err != 0)
  67. return err;
  68. if (!lock->initialized)
  69. {
  70. err = glthread_rwlock_init_multithreaded (lock);
  71. if (err != 0)
  72. {
  73. pthread_mutex_unlock (&lock->guard);
  74. return err;
  75. }
  76. }
  77. err = pthread_mutex_unlock (&lock->guard);
  78. if (err != 0)
  79. return err;
  80. }
  81. return pthread_rwlock_rdlock (&lock->rwlock);
  82. }
  83. int
  84. glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
  85. {
  86. if (!lock->initialized)
  87. {
  88. int err;
  89. err = pthread_mutex_lock (&lock->guard);
  90. if (err != 0)
  91. return err;
  92. if (!lock->initialized)
  93. {
  94. err = glthread_rwlock_init_multithreaded (lock);
  95. if (err != 0)
  96. {
  97. pthread_mutex_unlock (&lock->guard);
  98. return err;
  99. }
  100. }
  101. err = pthread_mutex_unlock (&lock->guard);
  102. if (err != 0)
  103. return err;
  104. }
  105. return pthread_rwlock_wrlock (&lock->rwlock);
  106. }
  107. int
  108. glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
  109. {
  110. if (!lock->initialized)
  111. return EINVAL;
  112. return pthread_rwlock_unlock (&lock->rwlock);
  113. }
  114. int
  115. glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
  116. {
  117. int err;
  118. if (!lock->initialized)
  119. return EINVAL;
  120. err = pthread_rwlock_destroy (&lock->rwlock);
  121. if (err != 0)
  122. return err;
  123. lock->initialized = 0;
  124. return 0;
  125. }
  126. # endif
  127. # else
  128. int
  129. glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
  130. {
  131. int err;
  132. err = pthread_mutex_init (&lock->lock, NULL);
  133. if (err != 0)
  134. return err;
  135. err = pthread_cond_init (&lock->waiting_readers, NULL);
  136. if (err != 0)
  137. return err;
  138. err = pthread_cond_init (&lock->waiting_writers, NULL);
  139. if (err != 0)
  140. return err;
  141. lock->waiting_writers_count = 0;
  142. lock->runcount = 0;
  143. return 0;
  144. }
  145. int
  146. glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
  147. {
  148. int err;
  149. err = pthread_mutex_lock (&lock->lock);
  150. if (err != 0)
  151. return err;
  152. /* Test whether only readers are currently running, and whether the runcount
  153. field will not overflow, and whether no writer is waiting. The latter
  154. condition is because POSIX recommends that "write locks shall take
  155. precedence over read locks", to avoid "writer starvation". */
  156. while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
  157. {
  158. /* This thread has to wait for a while. Enqueue it among the
  159. waiting_readers. */
  160. err = pthread_cond_wait (&lock->waiting_readers, &lock->lock);
  161. if (err != 0)
  162. {
  163. pthread_mutex_unlock (&lock->lock);
  164. return err;
  165. }
  166. }
  167. lock->runcount++;
  168. return pthread_mutex_unlock (&lock->lock);
  169. }
  170. int
  171. glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
  172. {
  173. int err;
  174. err = pthread_mutex_lock (&lock->lock);
  175. if (err != 0)
  176. return err;
  177. /* Test whether no readers or writers are currently running. */
  178. while (!(lock->runcount == 0))
  179. {
  180. /* This thread has to wait for a while. Enqueue it among the
  181. waiting_writers. */
  182. lock->waiting_writers_count++;
  183. err = pthread_cond_wait (&lock->waiting_writers, &lock->lock);
  184. if (err != 0)
  185. {
  186. lock->waiting_writers_count--;
  187. pthread_mutex_unlock (&lock->lock);
  188. return err;
  189. }
  190. lock->waiting_writers_count--;
  191. }
  192. lock->runcount--; /* runcount becomes -1 */
  193. return pthread_mutex_unlock (&lock->lock);
  194. }
  195. int
  196. glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
  197. {
  198. int err;
  199. err = pthread_mutex_lock (&lock->lock);
  200. if (err != 0)
  201. return err;
  202. if (lock->runcount < 0)
  203. {
  204. /* Drop a writer lock. */
  205. if (!(lock->runcount == -1))
  206. {
  207. pthread_mutex_unlock (&lock->lock);
  208. return EINVAL;
  209. }
  210. lock->runcount = 0;
  211. }
  212. else
  213. {
  214. /* Drop a reader lock. */
  215. if (!(lock->runcount > 0))
  216. {
  217. pthread_mutex_unlock (&lock->lock);
  218. return EINVAL;
  219. }
  220. lock->runcount--;
  221. }
  222. if (lock->runcount == 0)
  223. {
  224. /* POSIX recommends that "write locks shall take precedence over read
  225. locks", to avoid "writer starvation". */
  226. if (lock->waiting_writers_count > 0)
  227. {
  228. /* Wake up one of the waiting writers. */
  229. err = pthread_cond_signal (&lock->waiting_writers);
  230. if (err != 0)
  231. {
  232. pthread_mutex_unlock (&lock->lock);
  233. return err;
  234. }
  235. }
  236. else
  237. {
  238. /* Wake up all waiting readers. */
  239. err = pthread_cond_broadcast (&lock->waiting_readers);
  240. if (err != 0)
  241. {
  242. pthread_mutex_unlock (&lock->lock);
  243. return err;
  244. }
  245. }
  246. }
  247. return pthread_mutex_unlock (&lock->lock);
  248. }
  249. int
  250. glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
  251. {
  252. int err;
  253. err = pthread_mutex_destroy (&lock->lock);
  254. if (err != 0)
  255. return err;
  256. err = pthread_cond_destroy (&lock->waiting_readers);
  257. if (err != 0)
  258. return err;
  259. err = pthread_cond_destroy (&lock->waiting_writers);
  260. if (err != 0)
  261. return err;
  262. return 0;
  263. }
  264. # endif
  265. /* --------------------- gl_recursive_lock_t datatype --------------------- */
  266. # if HAVE_PTHREAD_MUTEX_RECURSIVE
  267. # if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
  268. int
  269. glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
  270. {
  271. pthread_mutexattr_t attributes;
  272. int err;
  273. err = pthread_mutexattr_init (&attributes);
  274. if (err != 0)
  275. return err;
  276. err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
  277. if (err != 0)
  278. {
  279. pthread_mutexattr_destroy (&attributes);
  280. return err;
  281. }
  282. err = pthread_mutex_init (lock, &attributes);
  283. if (err != 0)
  284. {
  285. pthread_mutexattr_destroy (&attributes);
  286. return err;
  287. }
  288. err = pthread_mutexattr_destroy (&attributes);
  289. if (err != 0)
  290. return err;
  291. return 0;
  292. }
  293. # else
  294. int
  295. glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
  296. {
  297. pthread_mutexattr_t attributes;
  298. int err;
  299. err = pthread_mutexattr_init (&attributes);
  300. if (err != 0)
  301. return err;
  302. err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
  303. if (err != 0)
  304. {
  305. pthread_mutexattr_destroy (&attributes);
  306. return err;
  307. }
  308. err = pthread_mutex_init (&lock->recmutex, &attributes);
  309. if (err != 0)
  310. {
  311. pthread_mutexattr_destroy (&attributes);
  312. return err;
  313. }
  314. err = pthread_mutexattr_destroy (&attributes);
  315. if (err != 0)
  316. return err;
  317. lock->initialized = 1;
  318. return 0;
  319. }
  320. int
  321. glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
  322. {
  323. if (!lock->initialized)
  324. {
  325. int err;
  326. err = pthread_mutex_lock (&lock->guard);
  327. if (err != 0)
  328. return err;
  329. if (!lock->initialized)
  330. {
  331. err = glthread_recursive_lock_init_multithreaded (lock);
  332. if (err != 0)
  333. {
  334. pthread_mutex_unlock (&lock->guard);
  335. return err;
  336. }
  337. }
  338. err = pthread_mutex_unlock (&lock->guard);
  339. if (err != 0)
  340. return err;
  341. }
  342. return pthread_mutex_lock (&lock->recmutex);
  343. }
  344. int
  345. glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
  346. {
  347. if (!lock->initialized)
  348. return EINVAL;
  349. return pthread_mutex_unlock (&lock->recmutex);
  350. }
  351. int
  352. glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
  353. {
  354. int err;
  355. if (!lock->initialized)
  356. return EINVAL;
  357. err = pthread_mutex_destroy (&lock->recmutex);
  358. if (err != 0)
  359. return err;
  360. lock->initialized = 0;
  361. return 0;
  362. }
  363. # endif
  364. # else
  365. int
  366. glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
  367. {
  368. int err;
  369. err = pthread_mutex_init (&lock->mutex, NULL);
  370. if (err != 0)
  371. return err;
  372. lock->owner = (pthread_t) 0;
  373. lock->depth = 0;
  374. return 0;
  375. }
  376. int
  377. glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
  378. {
  379. pthread_t self = pthread_self ();
  380. if (lock->owner != self)
  381. {
  382. int err;
  383. err = pthread_mutex_lock (&lock->mutex);
  384. if (err != 0)
  385. return err;
  386. lock->owner = self;
  387. }
  388. if (++(lock->depth) == 0) /* wraparound? */
  389. {
  390. lock->depth--;
  391. return EAGAIN;
  392. }
  393. return 0;
  394. }
  395. int
  396. glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
  397. {
  398. if (lock->owner != pthread_self ())
  399. return EPERM;
  400. if (lock->depth == 0)
  401. return EINVAL;
  402. if (--(lock->depth) == 0)
  403. {
  404. lock->owner = (pthread_t) 0;
  405. return pthread_mutex_unlock (&lock->mutex);
  406. }
  407. else
  408. return 0;
  409. }
  410. int
  411. glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
  412. {
  413. if (lock->owner != (pthread_t) 0)
  414. return EBUSY;
  415. return pthread_mutex_destroy (&lock->mutex);
  416. }
  417. # endif
  418. /* -------------------------- gl_once_t datatype -------------------------- */
  419. static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT;
  420. int
  421. glthread_once_singlethreaded (pthread_once_t *once_control)
  422. {
  423. /* We don't know whether pthread_once_t is an integer type, a floating-point
  424. type, a pointer type, or a structure type. */
  425. char *firstbyte = (char *)once_control;
  426. if (*firstbyte == *(const char *)&fresh_once)
  427. {
  428. /* First time use of once_control. Invert the first byte. */
  429. *firstbyte = ~ *(const char *)&fresh_once;
  430. return 1;
  431. }
  432. else
  433. return 0;
  434. }
  435. #endif
  436. /* ========================================================================= */
  437. #if USE_PTH_THREADS
  438. /* Use the GNU Pth threads library. */
  439. /* -------------------------- gl_lock_t datatype -------------------------- */
  440. /* ------------------------- gl_rwlock_t datatype ------------------------- */
  441. # if !HAVE_PTH_RWLOCK_ACQUIRE_PREFER_WRITER
  442. int
  443. glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
  444. {
  445. if (!pth_mutex_init (&lock->lock))
  446. return errno;
  447. if (!pth_cond_init (&lock->waiting_readers))
  448. return errno;
  449. if (!pth_cond_init (&lock->waiting_writers))
  450. return errno;
  451. lock->waiting_writers_count = 0;
  452. lock->runcount = 0;
  453. lock->initialized = 1;
  454. return 0;
  455. }
  456. int
  457. glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
  458. {
  459. if (!lock->initialized)
  460. glthread_rwlock_init_multithreaded (lock);
  461. if (!pth_mutex_acquire (&lock->lock, 0, NULL))
  462. return errno;
  463. /* Test whether only readers are currently running, and whether the runcount
  464. field will not overflow, and whether no writer is waiting. The latter
  465. condition is because POSIX recommends that "write locks shall take
  466. precedence over read locks", to avoid "writer starvation". */
  467. while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
  468. {
  469. /* This thread has to wait for a while. Enqueue it among the
  470. waiting_readers. */
  471. if (!pth_cond_await (&lock->waiting_readers, &lock->lock, NULL))
  472. {
  473. int err = errno;
  474. pth_mutex_release (&lock->lock);
  475. return err;
  476. }
  477. }
  478. lock->runcount++;
  479. return (!pth_mutex_release (&lock->lock) ? errno : 0);
  480. }
  481. int
  482. glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
  483. {
  484. if (!lock->initialized)
  485. glthread_rwlock_init_multithreaded (lock);
  486. if (!pth_mutex_acquire (&lock->lock, 0, NULL))
  487. return errno;
  488. /* Test whether no readers or writers are currently running. */
  489. while (!(lock->runcount == 0))
  490. {
  491. /* This thread has to wait for a while. Enqueue it among the
  492. waiting_writers. */
  493. lock->waiting_writers_count++;
  494. if (!pth_cond_await (&lock->waiting_writers, &lock->lock, NULL))
  495. {
  496. int err = errno;
  497. lock->waiting_writers_count--;
  498. pth_mutex_release (&lock->lock);
  499. return err;
  500. }
  501. lock->waiting_writers_count--;
  502. }
  503. lock->runcount--; /* runcount becomes -1 */
  504. return (!pth_mutex_release (&lock->lock) ? errno : 0);
  505. }
  506. int
  507. glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
  508. {
  509. int err;
  510. if (!lock->initialized)
  511. return EINVAL;
  512. if (!pth_mutex_acquire (&lock->lock, 0, NULL))
  513. return errno;
  514. if (lock->runcount < 0)
  515. {
  516. /* Drop a writer lock. */
  517. if (!(lock->runcount == -1))
  518. {
  519. pth_mutex_release (&lock->lock);
  520. return EINVAL;
  521. }
  522. lock->runcount = 0;
  523. }
  524. else
  525. {
  526. /* Drop a reader lock. */
  527. if (!(lock->runcount > 0))
  528. {
  529. pth_mutex_release (&lock->lock);
  530. return EINVAL;
  531. }
  532. lock->runcount--;
  533. }
  534. if (lock->runcount == 0)
  535. {
  536. /* POSIX recommends that "write locks shall take precedence over read
  537. locks", to avoid "writer starvation". */
  538. if (lock->waiting_writers_count > 0)
  539. {
  540. /* Wake up one of the waiting writers. */
  541. if (!pth_cond_notify (&lock->waiting_writers, FALSE))
  542. {
  543. int err = errno;
  544. pth_mutex_release (&lock->lock);
  545. return err;
  546. }
  547. }
  548. else
  549. {
  550. /* Wake up all waiting readers. */
  551. if (!pth_cond_notify (&lock->waiting_readers, TRUE))
  552. {
  553. int err = errno;
  554. pth_mutex_release (&lock->lock);
  555. return err;
  556. }
  557. }
  558. }
  559. return (!pth_mutex_release (&lock->lock) ? errno : 0);
  560. }
  561. int
  562. glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
  563. {
  564. lock->initialized = 0;
  565. return 0;
  566. }
  567. # endif
  568. /* --------------------- gl_recursive_lock_t datatype --------------------- */
  569. /* -------------------------- gl_once_t datatype -------------------------- */
  570. static void
  571. glthread_once_call (void *arg)
  572. {
  573. void (**gl_once_temp_addr) (void) = (void (**) (void)) arg;
  574. void (*initfunction) (void) = *gl_once_temp_addr;
  575. initfunction ();
  576. }
  577. int
  578. glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void))
  579. {
  580. void (*temp) (void) = initfunction;
  581. return (!pth_once (once_control, glthread_once_call, &temp) ? errno : 0);
  582. }
  583. int
  584. glthread_once_singlethreaded (pth_once_t *once_control)
  585. {
  586. /* We know that pth_once_t is an integer type. */
  587. if (*once_control == PTH_ONCE_INIT)
  588. {
  589. /* First time use of once_control. Invert the marker. */
  590. *once_control = ~ PTH_ONCE_INIT;
  591. return 1;
  592. }
  593. else
  594. return 0;
  595. }
  596. #endif
  597. /* ========================================================================= */
  598. #if USE_SOLARIS_THREADS
  599. /* Use the old Solaris threads library. */
  600. /* -------------------------- gl_lock_t datatype -------------------------- */
  601. /* ------------------------- gl_rwlock_t datatype ------------------------- */
  602. /* --------------------- gl_recursive_lock_t datatype --------------------- */
  603. int
  604. glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
  605. {
  606. int err;
  607. err = mutex_init (&lock->mutex, USYNC_THREAD, NULL);
  608. if (err != 0)
  609. return err;
  610. lock->owner = (thread_t) 0;
  611. lock->depth = 0;
  612. return 0;
  613. }
  614. int
  615. glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
  616. {
  617. thread_t self = thr_self ();
  618. if (lock->owner != self)
  619. {
  620. int err;
  621. err = mutex_lock (&lock->mutex);
  622. if (err != 0)
  623. return err;
  624. lock->owner = self;
  625. }
  626. if (++(lock->depth) == 0) /* wraparound? */
  627. {
  628. lock->depth--;
  629. return EAGAIN;
  630. }
  631. return 0;
  632. }
  633. int
  634. glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
  635. {
  636. if (lock->owner != thr_self ())
  637. return EPERM;
  638. if (lock->depth == 0)
  639. return EINVAL;
  640. if (--(lock->depth) == 0)
  641. {
  642. lock->owner = (thread_t) 0;
  643. return mutex_unlock (&lock->mutex);
  644. }
  645. else
  646. return 0;
  647. }
  648. int
  649. glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
  650. {
  651. if (lock->owner != (thread_t) 0)
  652. return EBUSY;
  653. return mutex_destroy (&lock->mutex);
  654. }
  655. /* -------------------------- gl_once_t datatype -------------------------- */
  656. int
  657. glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void))
  658. {
  659. if (!once_control->inited)
  660. {
  661. int err;
  662. /* Use the mutex to guarantee that if another thread is already calling
  663. the initfunction, this thread waits until it's finished. */
  664. err = mutex_lock (&once_control->mutex);
  665. if (err != 0)
  666. return err;
  667. if (!once_control->inited)
  668. {
  669. once_control->inited = 1;
  670. initfunction ();
  671. }
  672. return mutex_unlock (&once_control->mutex);
  673. }
  674. else
  675. return 0;
  676. }
  677. int
  678. glthread_once_singlethreaded (gl_once_t *once_control)
  679. {
  680. /* We know that gl_once_t contains an integer type. */
  681. if (!once_control->inited)
  682. {
  683. /* First time use of once_control. Invert the marker. */
  684. once_control->inited = ~ 0;
  685. return 1;
  686. }
  687. else
  688. return 0;
  689. }
  690. #endif
  691. /* ========================================================================= */
  692. #if USE_WINDOWS_THREADS
  693. /* -------------------------- gl_lock_t datatype -------------------------- */
  694. void
  695. glthread_lock_init_func (gl_lock_t *lock)
  696. {
  697. InitializeCriticalSection (&lock->lock);
  698. lock->guard.done = 1;
  699. }
  700. int
  701. glthread_lock_lock_func (gl_lock_t *lock)
  702. {
  703. if (!lock->guard.done)
  704. {
  705. if (InterlockedIncrement (&lock->guard.started) == 0)
  706. /* This thread is the first one to need this lock. Initialize it. */
  707. glthread_lock_init (lock);
  708. else
  709. /* Yield the CPU while waiting for another thread to finish
  710. initializing this lock. */
  711. while (!lock->guard.done)
  712. Sleep (0);
  713. }
  714. EnterCriticalSection (&lock->lock);
  715. return 0;
  716. }
  717. int
  718. glthread_lock_unlock_func (gl_lock_t *lock)
  719. {
  720. if (!lock->guard.done)
  721. return EINVAL;
  722. LeaveCriticalSection (&lock->lock);
  723. return 0;
  724. }
  725. int
  726. glthread_lock_destroy_func (gl_lock_t *lock)
  727. {
  728. if (!lock->guard.done)
  729. return EINVAL;
  730. DeleteCriticalSection (&lock->lock);
  731. lock->guard.done = 0;
  732. return 0;
  733. }
  734. /* ------------------------- gl_rwlock_t datatype ------------------------- */
  735. /* In this file, the waitqueues are implemented as circular arrays. */
  736. #define gl_waitqueue_t gl_carray_waitqueue_t
  737. static void
  738. gl_waitqueue_init (gl_waitqueue_t *wq)
  739. {
  740. wq->array = NULL;
  741. wq->count = 0;
  742. wq->alloc = 0;
  743. wq->offset = 0;
  744. }
  745. /* Enqueues the current thread, represented by an event, in a wait queue.
  746. Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
  747. static HANDLE
  748. gl_waitqueue_add (gl_waitqueue_t *wq)
  749. {
  750. HANDLE event;
  751. unsigned int index;
  752. if (wq->count == wq->alloc)
  753. {
  754. unsigned int new_alloc = 2 * wq->alloc + 1;
  755. HANDLE *new_array =
  756. (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
  757. if (new_array == NULL)
  758. /* No more memory. */
  759. return INVALID_HANDLE_VALUE;
  760. /* Now is a good opportunity to rotate the array so that its contents
  761. starts at offset 0. */
  762. if (wq->offset > 0)
  763. {
  764. unsigned int old_count = wq->count;
  765. unsigned int old_alloc = wq->alloc;
  766. unsigned int old_offset = wq->offset;
  767. unsigned int i;
  768. if (old_offset + old_count > old_alloc)
  769. {
  770. unsigned int limit = old_offset + old_count - old_alloc;
  771. for (i = 0; i < limit; i++)
  772. new_array[old_alloc + i] = new_array[i];
  773. }
  774. for (i = 0; i < old_count; i++)
  775. new_array[i] = new_array[old_offset + i];
  776. wq->offset = 0;
  777. }
  778. wq->array = new_array;
  779. wq->alloc = new_alloc;
  780. }
  781. /* Whether the created event is a manual-reset one or an auto-reset one,
  782. does not matter, since we will wait on it only once. */
  783. event = CreateEvent (NULL, TRUE, FALSE, NULL);
  784. if (event == INVALID_HANDLE_VALUE)
  785. /* No way to allocate an event. */
  786. return INVALID_HANDLE_VALUE;
  787. index = wq->offset + wq->count;
  788. if (index >= wq->alloc)
  789. index -= wq->alloc;
  790. wq->array[index] = event;
  791. wq->count++;
  792. return event;
  793. }
  794. /* Notifies the first thread from a wait queue and dequeues it. */
  795. static void
  796. gl_waitqueue_notify_first (gl_waitqueue_t *wq)
  797. {
  798. SetEvent (wq->array[wq->offset + 0]);
  799. wq->offset++;
  800. wq->count--;
  801. if (wq->count == 0 || wq->offset == wq->alloc)
  802. wq->offset = 0;
  803. }
  804. /* Notifies all threads from a wait queue and dequeues them all. */
  805. static void
  806. gl_waitqueue_notify_all (gl_waitqueue_t *wq)
  807. {
  808. unsigned int i;
  809. for (i = 0; i < wq->count; i++)
  810. {
  811. unsigned int index = wq->offset + i;
  812. if (index >= wq->alloc)
  813. index -= wq->alloc;
  814. SetEvent (wq->array[index]);
  815. }
  816. wq->count = 0;
  817. wq->offset = 0;
  818. }
  819. void
  820. glthread_rwlock_init_func (gl_rwlock_t *lock)
  821. {
  822. InitializeCriticalSection (&lock->lock);
  823. gl_waitqueue_init (&lock->waiting_readers);
  824. gl_waitqueue_init (&lock->waiting_writers);
  825. lock->runcount = 0;
  826. lock->guard.done = 1;
  827. }
  828. int
  829. glthread_rwlock_rdlock_func (gl_rwlock_t *lock)
  830. {
  831. if (!lock->guard.done)
  832. {
  833. if (InterlockedIncrement (&lock->guard.started) == 0)
  834. /* This thread is the first one to need this lock. Initialize it. */
  835. glthread_rwlock_init (lock);
  836. else
  837. /* Yield the CPU while waiting for another thread to finish
  838. initializing this lock. */
  839. while (!lock->guard.done)
  840. Sleep (0);
  841. }
  842. EnterCriticalSection (&lock->lock);
  843. /* Test whether only readers are currently running, and whether the runcount
  844. field will not overflow, and whether no writer is waiting. The latter
  845. condition is because POSIX recommends that "write locks shall take
  846. precedence over read locks", to avoid "writer starvation". */
  847. if (!(lock->runcount + 1 > 0 && lock->waiting_writers.count == 0))
  848. {
  849. /* This thread has to wait for a while. Enqueue it among the
  850. waiting_readers. */
  851. HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
  852. if (event != INVALID_HANDLE_VALUE)
  853. {
  854. DWORD result;
  855. LeaveCriticalSection (&lock->lock);
  856. /* Wait until another thread signals this event. */
  857. result = WaitForSingleObject (event, INFINITE);
  858. if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
  859. abort ();
  860. CloseHandle (event);
  861. /* The thread which signalled the event already did the bookkeeping:
  862. removed us from the waiting_readers, incremented lock->runcount. */
  863. if (!(lock->runcount > 0))
  864. abort ();
  865. return 0;
  866. }
  867. else
  868. {
  869. /* Allocation failure. Weird. */
  870. do
  871. {
  872. LeaveCriticalSection (&lock->lock);
  873. Sleep (1);
  874. EnterCriticalSection (&lock->lock);
  875. }
  876. while (!(lock->runcount + 1 > 0));
  877. }
  878. }
  879. lock->runcount++;
  880. LeaveCriticalSection (&lock->lock);
  881. return 0;
  882. }
  883. int
  884. glthread_rwlock_wrlock_func (gl_rwlock_t *lock)
  885. {
  886. if (!lock->guard.done)
  887. {
  888. if (InterlockedIncrement (&lock->guard.started) == 0)
  889. /* This thread is the first one to need this lock. Initialize it. */
  890. glthread_rwlock_init (lock);
  891. else
  892. /* Yield the CPU while waiting for another thread to finish
  893. initializing this lock. */
  894. while (!lock->guard.done)
  895. Sleep (0);
  896. }
  897. EnterCriticalSection (&lock->lock);
  898. /* Test whether no readers or writers are currently running. */
  899. if (!(lock->runcount == 0))
  900. {
  901. /* This thread has to wait for a while. Enqueue it among the
  902. waiting_writers. */
  903. HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
  904. if (event != INVALID_HANDLE_VALUE)
  905. {
  906. DWORD result;
  907. LeaveCriticalSection (&lock->lock);
  908. /* Wait until another thread signals this event. */
  909. result = WaitForSingleObject (event, INFINITE);
  910. if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
  911. abort ();
  912. CloseHandle (event);
  913. /* The thread which signalled the event already did the bookkeeping:
  914. removed us from the waiting_writers, set lock->runcount = -1. */
  915. if (!(lock->runcount == -1))
  916. abort ();
  917. return 0;
  918. }
  919. else
  920. {
  921. /* Allocation failure. Weird. */
  922. do
  923. {
  924. LeaveCriticalSection (&lock->lock);
  925. Sleep (1);
  926. EnterCriticalSection (&lock->lock);
  927. }
  928. while (!(lock->runcount == 0));
  929. }
  930. }
  931. lock->runcount--; /* runcount becomes -1 */
  932. LeaveCriticalSection (&lock->lock);
  933. return 0;
  934. }
  935. int
  936. glthread_rwlock_unlock_func (gl_rwlock_t *lock)
  937. {
  938. if (!lock->guard.done)
  939. return EINVAL;
  940. EnterCriticalSection (&lock->lock);
  941. if (lock->runcount < 0)
  942. {
  943. /* Drop a writer lock. */
  944. if (!(lock->runcount == -1))
  945. abort ();
  946. lock->runcount = 0;
  947. }
  948. else
  949. {
  950. /* Drop a reader lock. */
  951. if (!(lock->runcount > 0))
  952. {
  953. LeaveCriticalSection (&lock->lock);
  954. return EPERM;
  955. }
  956. lock->runcount--;
  957. }
  958. if (lock->runcount == 0)
  959. {
  960. /* POSIX recommends that "write locks shall take precedence over read
  961. locks", to avoid "writer starvation". */
  962. if (lock->waiting_writers.count > 0)
  963. {
  964. /* Wake up one of the waiting writers. */
  965. lock->runcount--;
  966. gl_waitqueue_notify_first (&lock->waiting_writers);
  967. }
  968. else
  969. {
  970. /* Wake up all waiting readers. */
  971. lock->runcount += lock->waiting_readers.count;
  972. gl_waitqueue_notify_all (&lock->waiting_readers);
  973. }
  974. }
  975. LeaveCriticalSection (&lock->lock);
  976. return 0;
  977. }
  978. int
  979. glthread_rwlock_destroy_func (gl_rwlock_t *lock)
  980. {
  981. if (!lock->guard.done)
  982. return EINVAL;
  983. if (lock->runcount != 0)
  984. return EBUSY;
  985. DeleteCriticalSection (&lock->lock);
  986. if (lock->waiting_readers.array != NULL)
  987. free (lock->waiting_readers.array);
  988. if (lock->waiting_writers.array != NULL)
  989. free (lock->waiting_writers.array);
  990. lock->guard.done = 0;
  991. return 0;
  992. }
  993. /* --------------------- gl_recursive_lock_t datatype --------------------- */
  994. void
  995. glthread_recursive_lock_init_func (gl_recursive_lock_t *lock)
  996. {
  997. lock->owner = 0;
  998. lock->depth = 0;
  999. InitializeCriticalSection (&lock->lock);
  1000. lock->guard.done = 1;
  1001. }
  1002. int
  1003. glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock)
  1004. {
  1005. if (!lock->guard.done)
  1006. {
  1007. if (InterlockedIncrement (&lock->guard.started) == 0)
  1008. /* This thread is the first one to need this lock. Initialize it. */
  1009. glthread_recursive_lock_init (lock);
  1010. else
  1011. /* Yield the CPU while waiting for another thread to finish
  1012. initializing this lock. */
  1013. while (!lock->guard.done)
  1014. Sleep (0);
  1015. }
  1016. {
  1017. DWORD self = GetCurrentThreadId ();
  1018. if (lock->owner != self)
  1019. {
  1020. EnterCriticalSection (&lock->lock);
  1021. lock->owner = self;
  1022. }
  1023. if (++(lock->depth) == 0) /* wraparound? */
  1024. {
  1025. lock->depth--;
  1026. return EAGAIN;
  1027. }
  1028. }
  1029. return 0;
  1030. }
  1031. int
  1032. glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock)
  1033. {
  1034. if (lock->owner != GetCurrentThreadId ())
  1035. return EPERM;
  1036. if (lock->depth == 0)
  1037. return EINVAL;
  1038. if (--(lock->depth) == 0)
  1039. {
  1040. lock->owner = 0;
  1041. LeaveCriticalSection (&lock->lock);
  1042. }
  1043. return 0;
  1044. }
  1045. int
  1046. glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock)
  1047. {
  1048. if (lock->owner != 0)
  1049. return EBUSY;
  1050. DeleteCriticalSection (&lock->lock);
  1051. lock->guard.done = 0;
  1052. return 0;
  1053. }
  1054. /* -------------------------- gl_once_t datatype -------------------------- */
  1055. void
  1056. glthread_once_func (gl_once_t *once_control, void (*initfunction) (void))
  1057. {
  1058. if (once_control->inited <= 0)
  1059. {
  1060. if (InterlockedIncrement (&once_control->started) == 0)
  1061. {
  1062. /* This thread is the first one to come to this once_control. */
  1063. InitializeCriticalSection (&once_control->lock);
  1064. EnterCriticalSection (&once_control->lock);
  1065. once_control->inited = 0;
  1066. initfunction ();
  1067. once_control->inited = 1;
  1068. LeaveCriticalSection (&once_control->lock);
  1069. }
  1070. else
  1071. {
  1072. /* Undo last operation. */
  1073. InterlockedDecrement (&once_control->started);
  1074. /* Some other thread has already started the initialization.
  1075. Yield the CPU while waiting for the other thread to finish
  1076. initializing and taking the lock. */
  1077. while (once_control->inited < 0)
  1078. Sleep (0);
  1079. if (once_control->inited <= 0)
  1080. {
  1081. /* Take the lock. This blocks until the other thread has
  1082. finished calling the initfunction. */
  1083. EnterCriticalSection (&once_control->lock);
  1084. LeaveCriticalSection (&once_control->lock);
  1085. if (!(once_control->inited > 0))
  1086. abort ();
  1087. }
  1088. }
  1089. }
  1090. }
  1091. #endif
  1092. /* ========================================================================= */