spinlock.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /*
  2. * Copyright (c) 2012-2018 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. *
  18. * Spin locks.
  19. *
  20. * Critical sections built with spin locks run with preemption disabled.
  21. *
  22. * This module provides fair spin locks which guarantee time-bounded lock
  23. * acquisition depending only on the number of contending processors.
  24. */
  25. #ifndef KERN_SPINLOCK_H
  26. #define KERN_SPINLOCK_H
  27. #include <assert.h>
  28. #include <errno.h>
  29. #include <stdbool.h>
  30. #include <stdint.h>
  31. #include <kern/atomic.h>
  32. #include <kern/init.h>
  33. #include <kern/macros.h>
  34. #include <kern/spinlock_types.h>
  35. #include <kern/thread.h>
  36. #include <machine/cpu.h>
  37. /*
  38. * Uncontended lock values.
  39. *
  40. * Any other value implies a contended lock.
  41. */
  42. #define SPINLOCK_UNLOCKED 0x0
  43. #define SPINLOCK_LOCKED 0x1
  44. #ifdef SPINLOCK_TRACK_OWNER
  45. static inline void
  46. spinlock_own (struct spinlock *lock)
  47. {
  48. assert (!lock->owner);
  49. lock->owner = thread_self ();
  50. }
  51. static inline void
  52. spinlock_disown (struct spinlock *lock)
  53. {
  54. assert (lock->owner == thread_self ());
  55. lock->owner = NULL;
  56. }
  57. #else
  58. #define spinlock_own(lock)
  59. #define spinlock_disown(lock)
  60. #endif
  61. static inline int
  62. spinlock_lock_fast (struct spinlock *lock)
  63. {
  64. uint32_t prev = atomic_cas_acq (&lock->value, SPINLOCK_UNLOCKED,
  65. SPINLOCK_LOCKED);
  66. if (unlikely (prev != SPINLOCK_UNLOCKED))
  67. return (EBUSY);
  68. spinlock_own (lock);
  69. return (0);
  70. }
  71. void spinlock_lock_slow (struct spinlock *lock);
  72. static inline void
  73. spinlock_lock_common (struct spinlock *lock)
  74. {
  75. int error = spinlock_lock_fast (lock);
  76. if (unlikely (error))
  77. spinlock_lock_slow (lock);
  78. }
  79. static inline void
  80. spinlock_unlock_common (struct spinlock *lock)
  81. {
  82. spinlock_disown (lock);
  83. atomic_and_rel (&lock->value, ~SPINLOCK_LOCKED);
  84. }
  85. static inline bool
  86. spinlock_locked (const struct spinlock *lock)
  87. {
  88. return (atomic_load_rlx (&lock->value) != SPINLOCK_UNLOCKED);
  89. }
  90. #ifdef SPINLOCK_TRACK_OWNER
  91. static inline void
  92. spinlock_transfer_owner (struct spinlock *lock, struct thread *owner)
  93. {
  94. assert (lock->owner == thread_self ());
  95. lock->owner = owner;
  96. }
  97. #else
  98. #define spinlock_transfer_owner(lock, owner)
  99. #endif
  100. // Initialize a spin lock.
  101. void spinlock_init (struct spinlock *lock);
  102. /*
  103. * Attempt to lock the given spin lock.
  104. *
  105. * Return 0 on success, EBUSY if the spin lock is already locked.
  106. *
  107. * Preemption is disabled on success.
  108. */
  109. static inline int
  110. spinlock_trylock (struct spinlock *lock)
  111. {
  112. thread_preempt_disable ();
  113. int error = spinlock_lock_fast (lock);
  114. if (unlikely (error))
  115. thread_preempt_enable ();
  116. return (error);
  117. }
  118. /*
  119. * Lock a spin lock.
  120. *
  121. * If the spin lock is already locked, the calling thread spins until the
  122. * spin lock is unlocked.
  123. *
  124. * A spin lock can only be locked once.
  125. *
  126. * This function disables preemption.
  127. */
  128. static inline void
  129. spinlock_lock (struct spinlock *lock)
  130. {
  131. thread_preempt_disable ();
  132. spinlock_lock_common (lock);
  133. }
  134. /*
  135. * Unlock a spin lock.
  136. *
  137. * The spin lock must be locked, and must have been locked on the same
  138. * processor it is unlocked on.
  139. *
  140. * This function may reenable preemption.
  141. */
  142. static inline void
  143. spinlock_unlock (struct spinlock *lock)
  144. {
  145. spinlock_unlock_common (lock);
  146. thread_preempt_enable ();
  147. }
  148. /*
  149. * Versions of the spinlock functions that also disable interrupts during
  150. * critical sections.
  151. */
  152. /*
  153. * Attempt to lock the given spin lock.
  154. *
  155. * Return 0 on success, EBUSY if the spin lock is already locked.
  156. *
  157. * Preemption and interrupts are disabled on success, in which case the
  158. * flags passed by the caller are filled with the previous value of the
  159. * CPU flags.
  160. */
  161. static inline int
  162. spinlock_trylock_intr_save (struct spinlock *lock, cpu_flags_t *flags)
  163. {
  164. thread_preempt_disable_intr_save (flags);
  165. int error = spinlock_lock_fast (lock);
  166. if (unlikely (error))
  167. thread_preempt_enable_intr_restore (*flags);
  168. return (error);
  169. }
  170. /*
  171. * Lock a spin lock.
  172. *
  173. * If the spin lock is already locked, the calling thread spins until the
  174. * spin lock is unlocked.
  175. *
  176. * A spin lock can only be locked once.
  177. *
  178. * This function disables preemption and interrupts. The flags passed by
  179. * the caller are filled with the previous value of the CPU flags.
  180. */
  181. static inline void
  182. spinlock_lock_intr_save (struct spinlock *lock, cpu_flags_t *flags)
  183. {
  184. thread_preempt_disable_intr_save (flags);
  185. spinlock_lock_common (lock);
  186. }
  187. /*
  188. * Unlock a spin lock.
  189. *
  190. * The spin lock must be locked, and must have been locked on the same
  191. * processor it is unlocked on.
  192. *
  193. * This function may reenable preemption and interrupts, using the given
  194. * flags which must have been obtained with a lock or trylock operation.
  195. */
  196. static inline void
  197. spinlock_unlock_intr_restore (struct spinlock *lock, cpu_flags_t flags)
  198. {
  199. spinlock_unlock_common (lock);
  200. thread_preempt_enable_intr_restore (flags);
  201. }
  202. // Spinlock guards.
  203. struct spinlock_guard
  204. {
  205. struct spinlock *spinlock;
  206. cpu_flags_t flags;
  207. bool saved_flags;
  208. };
  209. static inline void
  210. spinlock_guard_lock (struct spinlock_guard *guard)
  211. {
  212. if (guard->saved_flags)
  213. spinlock_lock_intr_save (guard->spinlock, &guard->flags);
  214. else
  215. spinlock_lock (guard->spinlock);
  216. }
  217. static inline struct spinlock_guard
  218. spinlock_guard_make (struct spinlock *spinlock, bool save_flags)
  219. {
  220. struct spinlock_guard ret = { .spinlock = spinlock };
  221. ret.saved_flags = save_flags;
  222. spinlock_guard_lock (&ret);
  223. return (ret);
  224. }
  225. static inline void
  226. spinlock_guard_fini (void *ptr)
  227. {
  228. struct spinlock_guard *guard = ptr;
  229. if (guard->saved_flags)
  230. spinlock_unlock_intr_restore (guard->spinlock, guard->flags);
  231. else
  232. spinlock_unlock (guard->spinlock);
  233. }
  234. #define SPINLOCK_GUARD_MAKE(spinlock, save_flags) \
  235. CLEANUP (spinlock_guard_fini) _Auto __unused UNIQ (sg) = \
  236. spinlock_guard_make ((spinlock), (save_flags))
  237. #define SPINLOCK_INTR_GUARD(spinlock) SPINLOCK_GUARD_MAKE ((spinlock), true)
  238. #define SPINLOCK_GUARD(spinlock) SPINLOCK_GUARD_MAKE ((spinlock), false)
  239. /*
  240. * This init operation provides :
  241. * - uncontended spinlock locking
  242. *
  243. * Contended locking may only occur after starting APs.
  244. */
  245. INIT_OP_DECLARE (spinlock_setup);
  246. #endif