rtmutex.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /*
  2. * Copyright (c) 2017 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <assert.h>
  18. #include <stdbool.h>
  19. #include <stddef.h>
  20. #include <stdint.h>
  21. #include <kern/atomic.h>
  22. #include <kern/init.h>
  23. #include <kern/macros.h>
  24. #include <kern/rtmutex.h>
  25. #include <kern/rtmutex_i.h>
  26. #include <kern/rtmutex_types.h>
  27. #include <kern/syscnt.h>
  28. #include <kern/thread.h>
  29. #include <kern/turnstile.h>
  30. #ifdef CONFIG_MUTEX_DEBUG
  31. enum {
  32. RTMUTEX_SC_WAIT_SUCCESSES,
  33. RTMUTEX_SC_WAIT_ERRORS,
  34. RTMUTEX_SC_DOWNGRADES,
  35. RTMUTEX_SC_ERROR_DOWNGRADES,
  36. RTMUTEX_SC_CANCELED_DOWNGRADES,
  37. RTMUTEX_NR_SCS
  38. };
  39. static struct syscnt rtmutex_sc_array[RTMUTEX_NR_SCS];
  40. static void
  41. rtmutex_register_sc(unsigned int index, const char *name)
  42. {
  43. assert(index < ARRAY_SIZE(rtmutex_sc_array));
  44. syscnt_register(&rtmutex_sc_array[index], name);
  45. }
  46. static void
  47. rtmutex_setup_debug(void)
  48. {
  49. rtmutex_register_sc(RTMUTEX_SC_WAIT_SUCCESSES,
  50. "rtmutex_wait_successes");
  51. rtmutex_register_sc(RTMUTEX_SC_WAIT_ERRORS,
  52. "rtmutex_wait_errors");
  53. rtmutex_register_sc(RTMUTEX_SC_DOWNGRADES,
  54. "rtmutex_downgrades");
  55. rtmutex_register_sc(RTMUTEX_SC_ERROR_DOWNGRADES,
  56. "rtmutex_error_downgrades");
  57. rtmutex_register_sc(RTMUTEX_SC_CANCELED_DOWNGRADES,
  58. "rtmutex_canceled_downgrades");
  59. }
  60. static void
  61. rtmutex_inc_sc(unsigned int index)
  62. {
  63. assert(index < ARRAY_SIZE(rtmutex_sc_array));
  64. syscnt_inc(&rtmutex_sc_array[index]);
  65. }
  66. #else /* CONFIG_MUTEX_DEBUG */
  67. #define rtmutex_setup_debug()
  68. #define rtmutex_inc_sc(x)
  69. #endif /* CONFIG_MUTEX_DEBUG */
  70. static struct thread *
  71. rtmutex_get_thread(uintptr_t owner)
  72. {
  73. return (struct thread *)(owner & RTMUTEX_OWNER_MASK);
  74. }
  75. static void
  76. rtmutex_set_contended(struct rtmutex *rtmutex)
  77. {
  78. atomic_or(&rtmutex->owner, RTMUTEX_CONTENDED, ATOMIC_RELEASE);
  79. }
  80. static int
  81. rtmutex_lock_slow_common(struct rtmutex *rtmutex, bool timed, uint64_t ticks)
  82. {
  83. struct turnstile *turnstile;
  84. uintptr_t self, owner;
  85. struct thread *thread;
  86. uintptr_t bits;
  87. int error;
  88. error = 0;
  89. self = (uintptr_t)thread_self();
  90. turnstile = turnstile_lend(rtmutex);
  91. rtmutex_set_contended(rtmutex);
  92. bits = RTMUTEX_CONTENDED;
  93. for (;;) {
  94. owner = atomic_cas(&rtmutex->owner, bits, self | bits, ATOMIC_ACQUIRE);
  95. assert((owner & bits) == bits);
  96. if (owner == bits) {
  97. break;
  98. }
  99. thread = rtmutex_get_thread(owner);
  100. if (!timed) {
  101. turnstile_wait(turnstile, "rtmutex", thread);
  102. } else {
  103. error = turnstile_timedwait(turnstile, "rtmutex", thread, ticks);
  104. if (error) {
  105. break;
  106. }
  107. }
  108. bits |= RTMUTEX_FORCE_WAIT;
  109. }
  110. if (error) {
  111. rtmutex_inc_sc(RTMUTEX_SC_WAIT_ERRORS);
  112. /*
  113. * Keep in mind more than one thread may have timed out on waiting.
  114. * These threads aren't considered waiters, making the turnstile
  115. * potentially empty. The first to reacquire the turnstile clears
  116. * the contention bits, allowing the owner to unlock through the
  117. * fast path.
  118. */
  119. if (turnstile_empty(turnstile)) {
  120. owner = atomic_load(&rtmutex->owner, ATOMIC_RELAXED);
  121. if (owner & RTMUTEX_CONTENDED) {
  122. rtmutex_inc_sc(RTMUTEX_SC_ERROR_DOWNGRADES);
  123. owner &= RTMUTEX_OWNER_MASK;
  124. atomic_store(&rtmutex->owner, owner, ATOMIC_RELAXED);
  125. } else {
  126. rtmutex_inc_sc(RTMUTEX_SC_CANCELED_DOWNGRADES);
  127. }
  128. }
  129. goto out;
  130. }
  131. rtmutex_inc_sc(RTMUTEX_SC_WAIT_SUCCESSES);
  132. turnstile_own(turnstile);
  133. if (turnstile_empty(turnstile)) {
  134. rtmutex_inc_sc(RTMUTEX_SC_DOWNGRADES);
  135. owner = atomic_swap(&rtmutex->owner, self, ATOMIC_RELAXED);
  136. assert(owner == (self | bits));
  137. }
  138. out:
  139. turnstile_return(turnstile);
  140. /*
  141. * A lock owner should never perform priority propagation on itself,
  142. * because this process is done using its own priority, potentially
  143. * introducing unbounded priority inversion.
  144. * Instead, let new waiters do it, using their own priority.
  145. */
  146. return error;
  147. }
  148. void
  149. rtmutex_lock_slow(struct rtmutex *rtmutex)
  150. {
  151. int error;
  152. error = rtmutex_lock_slow_common(rtmutex, false, 0);
  153. assert(!error);
  154. }
  155. int
  156. rtmutex_timedlock_slow(struct rtmutex *rtmutex, uint64_t ticks)
  157. {
  158. return rtmutex_lock_slow_common(rtmutex, true, ticks);
  159. }
  160. void
  161. rtmutex_unlock_slow(struct rtmutex *rtmutex)
  162. {
  163. struct turnstile *turnstile;
  164. uintptr_t owner;
  165. for (;;) {
  166. turnstile = turnstile_acquire(rtmutex);
  167. if (turnstile != NULL) {
  168. break;
  169. }
  170. owner = rtmutex_unlock_fast(rtmutex);
  171. if (!(owner & RTMUTEX_CONTENDED)) {
  172. goto out;
  173. }
  174. }
  175. owner = atomic_swap(&rtmutex->owner,
  176. RTMUTEX_FORCE_WAIT | RTMUTEX_CONTENDED,
  177. ATOMIC_RELEASE);
  178. assert(rtmutex_get_thread(owner) == thread_self());
  179. turnstile_disown(turnstile);
  180. turnstile_signal(turnstile);
  181. turnstile_release(turnstile);
  182. out:
  183. thread_propagate_priority();
  184. }
  185. static int
  186. rtmutex_bootstrap(void)
  187. {
  188. return 0;
  189. }
  190. INIT_OP_DEFINE(rtmutex_bootstrap,
  191. INIT_OP_DEP(thread_setup_booter, true));
  192. static int
  193. rtmutex_setup(void)
  194. {
  195. rtmutex_setup_debug();
  196. return 0;
  197. }
  198. #ifdef CONFIG_MUTEX_DEBUG
  199. #define RTMUTEX_DEBUG_INIT_OPS \
  200. INIT_OP_DEP(syscnt_setup, true),
  201. #else /* CONFIG_MUTEX_DEBUG */
  202. #define RTMUTEX_DEBUG_INIT_OPS
  203. #endif /* CONFIG_MUTEX_DEBUG */
  204. INIT_OP_DEFINE(rtmutex_setup,
  205. INIT_OP_DEP(rtmutex_bootstrap, true),
  206. RTMUTEX_DEBUG_INIT_OPS
  207. );