kern_rwlock.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. /* $OpenBSD: kern_rwlock.c,v 1.27 2015/03/14 07:33:42 jsg Exp $ */
  2. /*
  3. * Copyright (c) 2002, 2003 Artur Grabowski <art@openbsd.org>
  4. * Copyright (c) 2011 Thordur Bjornsson <thib@secnorth.net>
  5. *
  6. * Permission to use, copy, modify, and distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <sys/param.h>
  19. #include <sys/systm.h>
  20. #include <sys/proc.h>
  21. #include <sys/rwlock.h>
  22. #include <sys/limits.h>
  23. #include <sys/atomic.h>
  24. /* XXX - temporary measure until proc0 is properly aligned */
  25. #define RW_PROC(p) (((long)p) & ~RWLOCK_MASK)
  26. #ifdef MULTIPROCESSOR
  27. #define rw_cas(p, o, n) (atomic_cas_ulong(p, o, n) != o)
  28. #else
  29. static inline int
  30. rw_cas(volatile unsigned long *p, unsigned long o, unsigned long n)
  31. {
  32. if (*p != o)
  33. return (1);
  34. *p = n;
  35. return (0);
  36. }
  37. #endif
  38. /*
  39. * Magic wand for lock operations. Every operation checks if certain
  40. * flags are set and if they aren't, it increments the lock with some
  41. * value (that might need some computing in a few cases). If the operation
  42. * fails, we need to set certain flags while waiting for the lock.
  43. *
  44. * RW_WRITE The lock must be completely empty. We increment it with
  45. * RWLOCK_WRLOCK and the proc pointer of the holder.
  46. * Sets RWLOCK_WAIT|RWLOCK_WRWANT while waiting.
  47. * RW_READ RWLOCK_WRLOCK|RWLOCK_WRWANT may not be set. We increment
  48. * with RWLOCK_READ_INCR. RWLOCK_WAIT while waiting.
  49. */
  50. static const struct rwlock_op {
  51. unsigned long inc;
  52. unsigned long check;
  53. unsigned long wait_set;
  54. long proc_mult;
  55. int wait_prio;
  56. } rw_ops[] = {
  57. { /* RW_WRITE */
  58. RWLOCK_WRLOCK,
  59. ULONG_MAX,
  60. RWLOCK_WAIT | RWLOCK_WRWANT,
  61. 1,
  62. PLOCK - 4
  63. },
  64. { /* RW_READ */
  65. RWLOCK_READ_INCR,
  66. RWLOCK_WRLOCK,
  67. RWLOCK_WAIT,
  68. 0,
  69. PLOCK
  70. },
  71. { /* Sparse Entry. */
  72. 0,
  73. },
  74. { /* RW_DOWNGRADE */
  75. RWLOCK_READ_INCR - RWLOCK_WRLOCK,
  76. 0,
  77. 0,
  78. -1,
  79. PLOCK
  80. },
  81. };
  82. void
  83. rw_enter_read(struct rwlock *rwl)
  84. {
  85. unsigned long owner = rwl->rwl_owner;
  86. if (__predict_false((owner & RWLOCK_WRLOCK) ||
  87. rw_cas(&rwl->rwl_owner, owner, owner + RWLOCK_READ_INCR)))
  88. rw_enter(rwl, RW_READ);
  89. else
  90. membar_enter();
  91. }
  92. void
  93. rw_enter_write(struct rwlock *rwl)
  94. {
  95. struct proc *p = curproc;
  96. if (__predict_false(rw_cas(&rwl->rwl_owner, 0,
  97. RW_PROC(p) | RWLOCK_WRLOCK)))
  98. rw_enter(rwl, RW_WRITE);
  99. else
  100. membar_enter();
  101. }
  102. void
  103. rw_exit_read(struct rwlock *rwl)
  104. {
  105. unsigned long owner = rwl->rwl_owner;
  106. rw_assert_rdlock(rwl);
  107. membar_exit();
  108. if (__predict_false((owner & RWLOCK_WAIT) ||
  109. rw_cas(&rwl->rwl_owner, owner, owner - RWLOCK_READ_INCR)))
  110. rw_exit(rwl);
  111. }
  112. void
  113. rw_exit_write(struct rwlock *rwl)
  114. {
  115. unsigned long owner = rwl->rwl_owner;
  116. rw_assert_wrlock(rwl);
  117. membar_exit();
  118. if (__predict_false((owner & RWLOCK_WAIT) ||
  119. rw_cas(&rwl->rwl_owner, owner, 0)))
  120. rw_exit(rwl);
  121. }
  122. #ifdef DIAGNOSTIC
  123. /*
  124. * Put the diagnostic functions here to keep the main code free
  125. * from ifdef clutter.
  126. */
  127. static void
  128. rw_enter_diag(struct rwlock *rwl, int flags)
  129. {
  130. switch (flags & RW_OPMASK) {
  131. case RW_WRITE:
  132. case RW_READ:
  133. if (RW_PROC(curproc) == RW_PROC(rwl->rwl_owner))
  134. panic("rw_enter: %s locking against myself",
  135. rwl->rwl_name);
  136. break;
  137. case RW_DOWNGRADE:
  138. /*
  139. * If we're downgrading, we must hold the write lock.
  140. */
  141. if ((rwl->rwl_owner & RWLOCK_WRLOCK) == 0)
  142. panic("rw_enter: %s downgrade of non-write lock",
  143. rwl->rwl_name);
  144. if (RW_PROC(curproc) != RW_PROC(rwl->rwl_owner))
  145. panic("rw_enter: %s downgrade, not holder",
  146. rwl->rwl_name);
  147. break;
  148. default:
  149. panic("rw_enter: unknown op 0x%x", flags);
  150. }
  151. }
  152. #else
  153. #define rw_enter_diag(r, f)
  154. #endif
  155. void
  156. rw_init(struct rwlock *rwl, const char *name)
  157. {
  158. rwl->rwl_owner = 0;
  159. rwl->rwl_name = name;
  160. }
  161. int
  162. rw_enter(struct rwlock *rwl, int flags)
  163. {
  164. const struct rwlock_op *op;
  165. struct sleep_state sls;
  166. unsigned long inc, o;
  167. int error;
  168. op = &rw_ops[(flags & RW_OPMASK) - 1];
  169. inc = op->inc + RW_PROC(curproc) * op->proc_mult;
  170. retry:
  171. while (__predict_false(((o = rwl->rwl_owner) & op->check) != 0)) {
  172. unsigned long set = o | op->wait_set;
  173. int do_sleep;
  174. rw_enter_diag(rwl, flags);
  175. if (flags & RW_NOSLEEP)
  176. return (EBUSY);
  177. sleep_setup(&sls, rwl, op->wait_prio, rwl->rwl_name);
  178. if (flags & RW_INTR)
  179. sleep_setup_signal(&sls, op->wait_prio | PCATCH);
  180. do_sleep = !rw_cas(&rwl->rwl_owner, o, set);
  181. sleep_finish(&sls, do_sleep);
  182. if ((flags & RW_INTR) &&
  183. (error = sleep_finish_signal(&sls)) != 0)
  184. return (error);
  185. if (flags & RW_SLEEPFAIL)
  186. return (EAGAIN);
  187. }
  188. if (__predict_false(rw_cas(&rwl->rwl_owner, o, o + inc)))
  189. goto retry;
  190. membar_enter();
  191. /*
  192. * If old lock had RWLOCK_WAIT and RWLOCK_WRLOCK set, it means we
  193. * downgraded a write lock and had possible read waiter, wake them
  194. * to let them retry the lock.
  195. */
  196. if (__predict_false((o & (RWLOCK_WRLOCK|RWLOCK_WAIT)) ==
  197. (RWLOCK_WRLOCK|RWLOCK_WAIT)))
  198. wakeup(rwl);
  199. return (0);
  200. }
  201. void
  202. rw_exit(struct rwlock *rwl)
  203. {
  204. unsigned long owner = rwl->rwl_owner;
  205. int wrlock = owner & RWLOCK_WRLOCK;
  206. unsigned long set;
  207. if (wrlock)
  208. rw_assert_wrlock(rwl);
  209. else
  210. rw_assert_rdlock(rwl);
  211. membar_exit();
  212. do {
  213. owner = rwl->rwl_owner;
  214. if (wrlock)
  215. set = 0;
  216. else
  217. set = (owner - RWLOCK_READ_INCR) &
  218. ~(RWLOCK_WAIT|RWLOCK_WRWANT);
  219. } while (rw_cas(&rwl->rwl_owner, owner, set));
  220. if (owner & RWLOCK_WAIT)
  221. wakeup(rwl);
  222. }
  223. int
  224. rw_status(struct rwlock *rwl)
  225. {
  226. if (rwl->rwl_owner & RWLOCK_WRLOCK) {
  227. if (RW_PROC(curproc) == RW_PROC(rwl->rwl_owner))
  228. return RW_WRITE;
  229. else
  230. return RW_WRITE_OTHER;
  231. }
  232. if (rwl->rwl_owner)
  233. return RW_READ;
  234. return (0);
  235. }
  236. #ifdef DIAGNOSTIC
  237. void
  238. rw_assert_wrlock(struct rwlock *rwl)
  239. {
  240. if (!(rwl->rwl_owner & RWLOCK_WRLOCK))
  241. panic("%s: lock not held", rwl->rwl_name);
  242. if (RWLOCK_OWNER(rwl) != (struct proc *)RW_PROC(curproc))
  243. panic("%s: lock not held by this process", rwl->rwl_name);
  244. }
  245. void
  246. rw_assert_rdlock(struct rwlock *rwl)
  247. {
  248. if (!RWLOCK_OWNER(rwl) || (rwl->rwl_owner & RWLOCK_WRLOCK))
  249. panic("%s: lock not shared", rwl->rwl_name);
  250. }
  251. void
  252. rw_assert_unlocked(struct rwlock *rwl)
  253. {
  254. if (rwl->rwl_owner != 0L)
  255. panic("%s: lock held", rwl->rwl_name);
  256. }
  257. #endif
  258. /* recursive rwlocks; */
  259. void
  260. rrw_init(struct rrwlock *rrwl, char *name)
  261. {
  262. memset(rrwl, 0, sizeof(struct rrwlock));
  263. rw_init(&rrwl->rrwl_lock, name);
  264. }
  265. int
  266. rrw_enter(struct rrwlock *rrwl, int flags)
  267. {
  268. int rv;
  269. if (RWLOCK_OWNER(&rrwl->rrwl_lock) ==
  270. (struct proc *)RW_PROC(curproc)) {
  271. if (flags & RW_RECURSEFAIL)
  272. return (EDEADLK);
  273. else {
  274. rrwl->rrwl_wcnt++;
  275. return (0);
  276. }
  277. }
  278. rv = rw_enter(&rrwl->rrwl_lock, flags);
  279. if (rv == 0)
  280. rrwl->rrwl_wcnt = 1;
  281. return (rv);
  282. }
  283. void
  284. rrw_exit(struct rrwlock *rrwl)
  285. {
  286. if (RWLOCK_OWNER(&rrwl->rrwl_lock) ==
  287. (struct proc *)RW_PROC(curproc)) {
  288. KASSERT(rrwl->rrwl_wcnt > 0);
  289. rrwl->rrwl_wcnt--;
  290. if (rrwl->rrwl_wcnt != 0)
  291. return;
  292. }
  293. rw_exit(&rrwl->rrwl_lock);
  294. }
  295. int
  296. rrw_status(struct rrwlock *rrwl)
  297. {
  298. return (rw_status(&rrwl->rrwl_lock));
  299. }