spinlock.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /*
  2. * Copyright (c) 2012-2018 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. *
  18. * Spin locks.
  19. *
  20. * Critical sections built with spin locks run with preemption disabled.
  21. *
  22. * This module provides fair spin locks which guarantee time-bounded lock
  23. * acquisition depending only on the number of contending processors.
  24. */
  25. #ifndef KERN_SPINLOCK_H
  26. #define KERN_SPINLOCK_H
  27. #include <assert.h>
  28. #include <errno.h>
  29. #include <stdbool.h>
  30. #include <stdint.h>
  31. #include <kern/atomic.h>
  32. #include <kern/init.h>
  33. #include <kern/macros.h>
  34. #include <kern/spinlock_types.h>
  35. #include <kern/thread.h>
  36. #include <machine/cpu.h>
  37. /*
  38. * Uncontended lock values.
  39. *
  40. * Any other value implies a contended lock.
  41. */
  42. #define SPINLOCK_UNLOCKED 0x0
  43. #define SPINLOCK_LOCKED 0x1
  44. #ifdef SPINLOCK_TRACK_OWNER
  45. static inline void
  46. spinlock_own (struct spinlock *lock)
  47. {
  48. assert (!lock->owner);
  49. lock->owner = thread_self ();
  50. }
  51. static inline void
  52. spinlock_disown (struct spinlock *lock)
  53. {
  54. assert (lock->owner == thread_self ());
  55. lock->owner = NULL;
  56. }
  57. #else
  58. #define spinlock_own(lock)
  59. #define spinlock_disown(lock)
  60. #endif
  61. static inline int
  62. spinlock_lock_fast (struct spinlock *lock)
  63. {
  64. uint32_t prev = atomic_cas_acq (&lock->value, SPINLOCK_UNLOCKED,
  65. SPINLOCK_LOCKED);
  66. if (unlikely (prev != SPINLOCK_UNLOCKED))
  67. return (EBUSY);
  68. spinlock_own (lock);
  69. return (0);
  70. }
  71. void spinlock_lock_slow (struct spinlock *lock);
  72. static inline void
  73. spinlock_lock_common (struct spinlock *lock)
  74. {
  75. int error = spinlock_lock_fast (lock);
  76. if (unlikely (error))
  77. spinlock_lock_slow (lock);
  78. }
  79. static inline void
  80. spinlock_unlock_common (struct spinlock *lock)
  81. {
  82. spinlock_disown (lock);
  83. atomic_and_rel (&lock->value, ~SPINLOCK_LOCKED);
  84. }
  85. static inline bool
  86. spinlock_locked (const struct spinlock *lock)
  87. {
  88. uint32_t value = atomic_load_rlx (&lock->value);
  89. return (value != SPINLOCK_UNLOCKED);
  90. }
  91. #ifdef SPINLOCK_TRACK_OWNER
  92. static inline void
  93. spinlock_transfer_owner (struct spinlock *lock, struct thread *owner)
  94. {
  95. assert (lock->owner == thread_self ());
  96. lock->owner = owner;
  97. }
  98. #else
  99. #define spinlock_transfer_owner(lock, owner)
  100. #endif
  101. // Initialize a spin lock.
  102. void spinlock_init (struct spinlock *lock);
  103. /*
  104. * Attempt to lock the given spin lock.
  105. *
  106. * Return 0 on success, EBUSY if the spin lock is already locked.
  107. *
  108. * Preemption is disabled on success.
  109. */
  110. static inline int
  111. spinlock_trylock (struct spinlock *lock)
  112. {
  113. thread_preempt_disable ();
  114. int error = spinlock_lock_fast (lock);
  115. if (unlikely (error))
  116. thread_preempt_enable ();
  117. return (error);
  118. }
  119. /*
  120. * Lock a spin lock.
  121. *
  122. * If the spin lock is already locked, the calling thread spins until the
  123. * spin lock is unlocked.
  124. *
  125. * A spin lock can only be locked once.
  126. *
  127. * This function disables preemption.
  128. */
  129. static inline void
  130. spinlock_lock (struct spinlock *lock)
  131. {
  132. thread_preempt_disable ();
  133. spinlock_lock_common (lock);
  134. }
  135. /*
  136. * Unlock a spin lock.
  137. *
  138. * The spin lock must be locked, and must have been locked on the same
  139. * processor it is unlocked on.
  140. *
  141. * This function may reenable preemption.
  142. */
  143. static inline void
  144. spinlock_unlock (struct spinlock *lock)
  145. {
  146. spinlock_unlock_common (lock);
  147. thread_preempt_enable ();
  148. }
  149. /*
  150. * Versions of the spinlock functions that also disable interrupts during
  151. * critical sections.
  152. */
  153. /*
  154. * Attempt to lock the given spin lock.
  155. *
  156. * Return 0 on success, EBUSY if the spin lock is already locked.
  157. *
  158. * Preemption and interrupts are disabled on success, in which case the
  159. * flags passed by the caller are filled with the previous value of the
  160. * CPU flags.
  161. */
  162. static inline int
  163. spinlock_trylock_intr_save (struct spinlock *lock, cpu_flags_t *flags)
  164. {
  165. thread_preempt_disable_intr_save (flags);
  166. int error = spinlock_lock_fast (lock);
  167. if (unlikely (error))
  168. thread_preempt_enable_intr_restore (*flags);
  169. return (error);
  170. }
  171. /*
  172. * Lock a spin lock.
  173. *
  174. * If the spin lock is already locked, the calling thread spins until the
  175. * spin lock is unlocked.
  176. *
  177. * A spin lock can only be locked once.
  178. *
  179. * This function disables preemption and interrupts. The flags passed by
  180. * the caller are filled with the previous value of the CPU flags.
  181. */
  182. static inline void
  183. spinlock_lock_intr_save (struct spinlock *lock, cpu_flags_t *flags)
  184. {
  185. thread_preempt_disable_intr_save (flags);
  186. spinlock_lock_common (lock);
  187. }
  188. /*
  189. * Unlock a spin lock.
  190. *
  191. * The spin lock must be locked, and must have been locked on the same
  192. * processor it is unlocked on.
  193. *
  194. * This function may reenable preemption and interrupts, using the given
  195. * flags which must have been obtained with a lock or trylock operation.
  196. */
  197. static inline void
  198. spinlock_unlock_intr_restore (struct spinlock *lock, cpu_flags_t flags)
  199. {
  200. spinlock_unlock_common (lock);
  201. thread_preempt_enable_intr_restore (flags);
  202. }
  203. // Spinlock guards.
  204. struct spinlock_guard
  205. {
  206. struct spinlock *spinlock;
  207. cpu_flags_t flags;
  208. bool saved_flags;
  209. };
  210. static inline struct spinlock_guard
  211. spinlock_guard_make (struct spinlock *spinlock, bool save_flags)
  212. {
  213. struct spinlock_guard ret =
  214. {
  215. .spinlock = spinlock,
  216. .saved_flags = save_flags
  217. };
  218. if (save_flags)
  219. spinlock_lock_intr_save (spinlock, &ret.flags);
  220. else
  221. spinlock_lock (spinlock);
  222. return (ret);
  223. }
  224. static inline void
  225. spinlock_guard_fini (void *ptr)
  226. {
  227. struct spinlock_guard *guard = ptr;
  228. if (guard->saved_flags)
  229. spinlock_unlock_intr_restore (guard->spinlock, guard->flags);
  230. else
  231. spinlock_unlock (guard->spinlock);
  232. }
  233. #define SPINLOCK_GUARD_IMPL(spinlock, save_flags) \
  234. CLEANUP (spinlock_guard_fini) _Auto __unused UNIQ (sg) = \
  235. spinlock_guard_make ((spinlock), (save_flags))
  236. #define SPINLOCK_INTR_GUARD(spinlock) SPINLOCK_GUARD_IMPL ((spinlock), true)
  237. #define SPINLOCK_GUARD(spinlock) SPINLOCK_GUARD_IMPL ((spinlock), false)
  238. /*
  239. * This init operation provides :
  240. * - uncontended spinlock locking
  241. *
  242. * Contended locking may only occur after starting APs.
  243. */
  244. INIT_OP_DECLARE (spinlock_setup);
  245. #endif