adaptive_lock.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /*
  2. * Copyright (c) 2017 Agustina Arzille.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <assert.h>
  18. #include <errno.h>
  19. #include <stdbool.h>
  20. #include <stddef.h>
  21. #include <stdint.h>
  22. #include <kern/adaptive_lock.h>
  23. #include <kern/atomic.h>
  24. #include <kern/clock.h>
  25. #include <kern/init.h>
  26. #include <kern/sleepq.h>
  27. #include <kern/syscnt.h>
  28. #include <kern/thread.h>
  29. #include <machine/cpu.h>
  30. static struct thread*
  31. adaptive_lock_get_thread (uintptr_t owner)
  32. {
  33. return ((struct thread *)(owner & ~ADAPTIVE_LOCK_CONTENDED));
  34. }
  35. static void
  36. adaptive_lock_set_contended (struct adaptive_lock *lock)
  37. {
  38. atomic_or_rel (&lock->owner, ADAPTIVE_LOCK_CONTENDED);
  39. }
  40. static inline bool
  41. adaptive_lock_is_owner (struct adaptive_lock *lock, struct thread *thread)
  42. {
  43. uintptr_t prev = atomic_load_rlx (&lock->owner);
  44. return (adaptive_lock_get_thread (prev) == thread);
  45. }
  46. void
  47. adaptive_lock_acquire_slow (struct adaptive_lock *lock)
  48. {
  49. uintptr_t self = (uintptr_t) thread_self ();
  50. struct sleepq *sleepq = sleepq_lend (lock);
  51. adaptive_lock_set_contended (lock);
  52. while (1)
  53. {
  54. uintptr_t owner = atomic_cas_acq (&lock->owner, ADAPTIVE_LOCK_CONTENDED,
  55. self | ADAPTIVE_LOCK_CONTENDED);
  56. assert (owner & ADAPTIVE_LOCK_CONTENDED);
  57. _Auto thr = adaptive_lock_get_thread (owner);
  58. if (! thr)
  59. break;
  60. /*
  61. * The owner may not return from the unlock function if a thread is
  62. * spinning on it.
  63. */
  64. while (adaptive_lock_is_owner (lock, thr))
  65. {
  66. if (thread_is_running (thr) &&
  67. !sleepq_test_circular (sleepq, thread_wchan_addr (thr)))
  68. cpu_pause ();
  69. else
  70. sleepq_wait (sleepq, "adptlk");
  71. }
  72. }
  73. /*
  74. * Attempt to clear the contended bit.
  75. *
  76. * In case of success, the current thread becomes the new owner, and
  77. * simply checking if the sleep queue is empty is enough.
  78. *
  79. * Keep in mind accesses to the lock word aren't synchronized by
  80. * the sleep queue, i.e. an unlock may occur completely concurrently
  81. * while attempting to clear the contended bit .
  82. */
  83. if (sleepq_empty (sleepq))
  84. atomic_store_rlx (&lock->owner, self);
  85. sleepq_return (sleepq);
  86. }
  87. void
  88. adaptive_lock_release_slow (struct adaptive_lock *lock)
  89. {
  90. uintptr_t self = (uintptr_t) thread_self () | ADAPTIVE_LOCK_CONTENDED;
  91. while (1)
  92. {
  93. uintptr_t owner = atomic_cas_rel (&lock->owner, self,
  94. ADAPTIVE_LOCK_CONTENDED);
  95. if (owner == self)
  96. break;
  97. /*
  98. * The contended bit was cleared after the fast path failed,
  99. * but before the slow path (re)started.
  100. */
  101. assert (owner == (uintptr_t) thread_self ());
  102. if (adaptive_lock_release_fast (lock) != 0)
  103. continue;
  104. return;
  105. }
  106. while (1)
  107. {
  108. uintptr_t owner = atomic_load_rlx (&lock->owner);
  109. /*
  110. * This only happens if :
  111. * 1/ Another thread was able to become the new owner, in which
  112. * case that thread isn't spinning on the current thread, i.e.
  113. * there is no need for an additional reference.
  114. * 2/ A timeout cleared the contended bit.
  115. */
  116. if (owner != ADAPTIVE_LOCK_CONTENDED)
  117. break;
  118. /*
  119. * Avoid contending with incoming threads that are about to spin/wait
  120. * on the lock. This is particularly expensive with queued locks.
  121. *
  122. * Also, this call returns NULL if another thread is currently spinning
  123. * on the current thread, in which case the latter doesn't return,
  124. * averting the need for an additional reference.
  125. */
  126. struct sleepq *sleepq = sleepq_tryacquire (lock);
  127. if (sleepq)
  128. {
  129. sleepq_signal (sleepq);
  130. sleepq_release (sleepq);
  131. break;
  132. }
  133. /*
  134. * Acquiring the sleep queue may fail because of contention on
  135. * unrelated objects. Retry.
  136. */
  137. }
  138. }