rtmutex.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. /*
  2. * Copyright (c) 2017 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. *
  18. * Real-time mutual exclusion locks.
  19. *
  20. * A real-time mutex is similar to a regular mutex, except priority
  21. * inheritance is unconditionally enabled.
  22. */
  23. #ifndef KERN_RTMUTEX_H
  24. #define KERN_RTMUTEX_H
  25. #include <assert.h>
  26. #include <errno.h>
  27. #include <stdbool.h>
  28. #include <stdint.h>
  29. #include <kern/atomic.h>
  30. #include <kern/init.h>
  31. #include <kern/macros.h>
  32. #include <kern/rtmutex_types.h>
  33. #include <kern/thread.h>
  34. /*
  35. * Real-time mutex flags.
  36. *
  37. * The "contended" flag indicates that threads are waiting for the mutex
  38. * to be unlocked. It forces threads trying to lock the mutex as well as
  39. * the owner to take the slow path.
  40. *
  41. * The "force-wait" flag prevents "stealing" a mutex. When a contended
  42. * mutex is unlocked, a thread may concurrently try to lock it. Without
  43. * this flag, it may succeed, and in doing so, it would prevent a
  44. * potentially higher priority thread from locking the mutex. The flag
  45. * forces all threads to not only take the slow path, but to also call
  46. * the turnstile wait function so that only the highest priority thread
  47. * may lock the mutex.
  48. */
  49. #define RTMUTEX_CONTENDED ((uintptr_t)0x1)
  50. #define RTMUTEX_FORCE_WAIT ((uintptr_t)0x2)
  51. #define RTMUTEX_OWNER_MASK \
  52. (~((uintptr_t)(RTMUTEX_FORCE_WAIT | RTMUTEX_CONTENDED)))
  53. static inline bool
  54. rtmutex_owner_aligned (uintptr_t owner)
  55. {
  56. return ((owner & ~RTMUTEX_OWNER_MASK) == 0);
  57. }
  58. static inline uintptr_t
  59. rtmutex_lock_fast (struct rtmutex *rtmutex)
  60. {
  61. uintptr_t owner = (uintptr_t)thread_self ();
  62. assert (rtmutex_owner_aligned (owner));
  63. return (atomic_cas (&rtmutex->owner, 0, owner, ATOMIC_ACQUIRE));
  64. }
  65. static inline uintptr_t
  66. rtmutex_unlock_fast (struct rtmutex *rtmutex)
  67. {
  68. uintptr_t owner = (uintptr_t)thread_self ();
  69. assert (rtmutex_owner_aligned (owner));
  70. uintptr_t prev_owner = atomic_cas (&rtmutex->owner, owner, 0,
  71. ATOMIC_RELEASE);
  72. assert ((prev_owner & RTMUTEX_OWNER_MASK) == owner);
  73. return (prev_owner);
  74. }
  75. void rtmutex_lock_slow (struct rtmutex *rtmutex);
  76. int rtmutex_timedlock_slow (struct rtmutex *rtmutex, uint64_t ticks);
  77. void rtmutex_unlock_slow (struct rtmutex *rtmutex);
  78. static inline bool
  79. rtmutex_locked (const struct rtmutex *rtmutex)
  80. {
  81. uintptr_t owner = atomic_load_rlx (&rtmutex->owner);
  82. return (owner != 0);
  83. }
  84. // Initialize a real-time mutex.
  85. static inline void
  86. rtmutex_init (struct rtmutex *rtmutex)
  87. {
  88. rtmutex->owner = 0;
  89. }
  90. /*
  91. * Attempt to lock the given real-time mutex.
  92. *
  93. * This function may not sleep.
  94. *
  95. * Return 0 on success, EBUSY if the mutex is already locked.
  96. */
  97. static inline int
  98. rtmutex_trylock (struct rtmutex *rtmutex)
  99. {
  100. uintptr_t prev_owner = rtmutex_lock_fast (rtmutex);
  101. return (prev_owner ? EBUSY : 0);
  102. }
  103. /*
  104. * Lock a real-time mutex.
  105. *
  106. * If the mutex is already locked, the calling thread sleeps until the
  107. * mutex is unlocked, and its priority is propagated as needed to prevent
  108. * unbounded priority inversion.
  109. *
  110. * A mutex can only be locked once.
  111. *
  112. * This function may sleep.
  113. */
  114. static inline void
  115. rtmutex_lock (struct rtmutex *rtmutex)
  116. {
  117. uintptr_t prev_owner = rtmutex_lock_fast (rtmutex);
  118. if (unlikely (prev_owner))
  119. rtmutex_lock_slow (rtmutex);
  120. }
  121. /*
  122. * Lock a real-time mutex, with a time boundary.
  123. *
  124. * The time boundary is an absolute time in ticks.
  125. *
  126. * If successful, the mutex is locked, otherwise an error is returned.
  127. * A mutex can only be locked once.
  128. *
  129. * This function may sleep.
  130. */
  131. static inline int
  132. rtmutex_timedlock (struct rtmutex *rtmutex, uint64_t ticks)
  133. {
  134. uintptr_t prev_owner = rtmutex_lock_fast (rtmutex);
  135. return (prev_owner ? rtmutex_timedlock_slow (rtmutex, ticks) : 0);
  136. }
  137. /*
  138. * Unlock a real-time mutex.
  139. *
  140. * The mutex must be locked, and must have been locked by the calling
  141. * thread.
  142. */
  143. static inline void
  144. rtmutex_unlock (struct rtmutex *rtmutex)
  145. {
  146. uintptr_t prev_owner = rtmutex_unlock_fast (rtmutex);
  147. if (unlikely (prev_owner & RTMUTEX_CONTENDED))
  148. rtmutex_unlock_slow (rtmutex);
  149. }
  150. #endif