spinlock.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __LINUX_SPINLOCK_H
  3. #define __LINUX_SPINLOCK_H
  4. /*
  5. * include/linux/spinlock.h - generic spinlock/rwlock declarations
  6. *
  7. * here's the role of the various spinlock/rwlock related include files:
  8. *
  9. * on SMP builds:
  10. *
  11. * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
  12. * initializers
  13. *
  14. * linux/spinlock_types.h:
  15. * defines the generic type and initializers
  16. *
  17. * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
  18. * implementations, mostly inline assembly code
  19. *
  20. * (also included on UP-debug builds:)
  21. *
  22. * linux/spinlock_api_smp.h:
  23. * contains the prototypes for the _spin_*() APIs.
  24. *
  25. * linux/spinlock.h: builds the final spin_*() APIs.
  26. *
  27. * on UP builds:
  28. *
  29. * linux/spinlock_type_up.h:
  30. * contains the generic, simplified UP spinlock type.
  31. * (which is an empty structure on non-debug builds)
  32. *
  33. * linux/spinlock_types.h:
  34. * defines the generic type and initializers
  35. *
  36. * linux/spinlock_up.h:
  37. * contains the arch_spin_*()/etc. version of UP
  38. * builds. (which are NOPs on non-debug, non-preempt
  39. * builds)
  40. *
  41. * (included on UP-non-debug builds:)
  42. *
  43. * linux/spinlock_api_up.h:
  44. * builds the _spin_*() APIs.
  45. *
  46. * linux/spinlock.h: builds the final spin_*() APIs.
  47. */
  48. #include <linux/typecheck.h>
  49. #include <linux/preempt.h>
  50. #include <linux/linkage.h>
  51. #include <linux/compiler.h>
  52. #include <linux/irqflags.h>
  53. #include <linux/thread_info.h>
  54. #include <linux/kernel.h>
  55. #include <linux/stringify.h>
  56. #include <linux/bottom_half.h>
  57. #include <asm/barrier.h>
  58. /*
  59. * Must define these before including other files, inline functions need them
  60. */
  61. #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
  62. #define LOCK_SECTION_START(extra) \
  63. ".subsection 1\n\t" \
  64. extra \
  65. ".ifndef " LOCK_SECTION_NAME "\n\t" \
  66. LOCK_SECTION_NAME ":\n\t" \
  67. ".endif\n"
  68. #define LOCK_SECTION_END \
  69. ".previous\n\t"
  70. #define __lockfunc __attribute__((section(".spinlock.text")))
  71. /*
  72. * Pull the arch_spinlock_t and arch_rwlock_t definitions:
  73. */
  74. #include <linux/spinlock_types.h>
  75. /*
  76. * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
  77. */
  78. #ifdef CONFIG_SMP
  79. # include <asm/spinlock.h>
  80. #else
  81. # include <linux/spinlock_up.h>
  82. #endif
  83. #ifdef CONFIG_DEBUG_SPINLOCK
  84. extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
  85. struct lock_class_key *key);
  86. # define raw_spin_lock_init(lock) \
  87. do { \
  88. static struct lock_class_key __key; \
  89. \
  90. __raw_spin_lock_init((lock), #lock, &__key); \
  91. } while (0)
  92. #else
  93. # define raw_spin_lock_init(lock) \
  94. do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  95. #endif
  96. #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
  97. #ifdef arch_spin_is_contended
  98. #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
  99. #else
  100. #define raw_spin_is_contended(lock) (((void)(lock), 0))
  101. #endif /*arch_spin_is_contended*/
  102. /*
  103. * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
  104. * between program-order earlier lock acquisitions and program-order later
  105. * memory accesses.
  106. *
  107. * This guarantees that the following two properties hold:
  108. *
  109. * 1) Given the snippet:
  110. *
  111. * { X = 0; Y = 0; }
  112. *
  113. * CPU0 CPU1
  114. *
  115. * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
  116. * spin_lock(S); smp_mb();
  117. * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
  118. * r0 = READ_ONCE(Y);
  119. * spin_unlock(S);
  120. *
  121. * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
  122. * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
  123. * preceding the call to smp_mb__after_spinlock() in __schedule() and in
  124. * try_to_wake_up().
  125. *
  126. * 2) Given the snippet:
  127. *
  128. * { X = 0; Y = 0; }
  129. *
  130. * CPU0 CPU1 CPU2
  131. *
  132. * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
  133. * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
  134. * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
  135. * WRITE_ONCE(Y, 1);
  136. * spin_unlock(S);
  137. *
  138. * it is forbidden that CPU0's critical section executes before CPU1's
  139. * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
  140. * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
  141. * preceding the calls to smp_rmb() in try_to_wake_up() for similar
  142. * snippets but "projected" onto two CPUs.
  143. *
  144. * Property (2) upgrades the lock to an RCsc lock.
  145. *
  146. * Since most load-store architectures implement ACQUIRE with an smp_mb() after
  147. * the LL/SC loop, they need no further barriers. Similarly all our TSO
  148. * architectures imply an smp_mb() for each atomic instruction and equally don't
  149. * need more.
  150. *
  151. * Architectures that can implement ACQUIRE better need to take care.
  152. */
  153. #ifndef smp_mb__after_spinlock
  154. #define smp_mb__after_spinlock() do { } while (0)
  155. #endif
  156. #ifdef CONFIG_DEBUG_SPINLOCK
  157. extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  158. #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  159. extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  160. extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  161. #else
  162. static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  163. {
  164. __acquire(lock);
  165. arch_spin_lock(&lock->raw_lock);
  166. }
  167. #ifndef arch_spin_lock_flags
  168. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  169. #endif
  170. static inline void
  171. do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  172. {
  173. __acquire(lock);
  174. arch_spin_lock_flags(&lock->raw_lock, *flags);
  175. }
  176. static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  177. {
  178. return arch_spin_trylock(&(lock)->raw_lock);
  179. }
  180. static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  181. {
  182. arch_spin_unlock(&lock->raw_lock);
  183. __release(lock);
  184. }
  185. #endif
  186. /*
  187. * Define the various spin_lock methods. Note we define these
  188. * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  189. * various methods are defined as nops in the case they are not
  190. * required.
  191. */
  192. #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
  193. #define raw_spin_lock(lock) _raw_spin_lock(lock)
  194. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  195. # define raw_spin_lock_nested(lock, subclass) \
  196. _raw_spin_lock_nested(lock, subclass)
  197. # define raw_spin_lock_nest_lock(lock, nest_lock) \
  198. do { \
  199. typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  200. _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
  201. } while (0)
  202. #else
  203. /*
  204. * Always evaluate the 'subclass' argument to avoid that the compiler
  205. * warns about set-but-not-used variables when building with
  206. * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
  207. */
  208. # define raw_spin_lock_nested(lock, subclass) \
  209. _raw_spin_lock(((void)(subclass), (lock)))
  210. # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
  211. #endif
  212. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  213. #define raw_spin_lock_irqsave(lock, flags) \
  214. do { \
  215. typecheck(unsigned long, flags); \
  216. flags = _raw_spin_lock_irqsave(lock); \
  217. } while (0)
  218. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  219. #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
  220. do { \
  221. typecheck(unsigned long, flags); \
  222. flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
  223. } while (0)
  224. #else
  225. #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
  226. do { \
  227. typecheck(unsigned long, flags); \
  228. flags = _raw_spin_lock_irqsave(lock); \
  229. } while (0)
  230. #endif
  231. #else
  232. #define raw_spin_lock_irqsave(lock, flags) \
  233. do { \
  234. typecheck(unsigned long, flags); \
  235. _raw_spin_lock_irqsave(lock, flags); \
  236. } while (0)
  237. #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
  238. raw_spin_lock_irqsave(lock, flags)
  239. #endif
  240. #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
  241. #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
  242. #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
  243. #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
  244. #define raw_spin_unlock_irqrestore(lock, flags) \
  245. do { \
  246. typecheck(unsigned long, flags); \
  247. _raw_spin_unlock_irqrestore(lock, flags); \
  248. } while (0)
  249. #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
  250. #define raw_spin_trylock_bh(lock) \
  251. __cond_lock(lock, _raw_spin_trylock_bh(lock))
  252. #define raw_spin_trylock_irq(lock) \
  253. ({ \
  254. local_irq_disable(); \
  255. raw_spin_trylock(lock) ? \
  256. 1 : ({ local_irq_enable(); 0; }); \
  257. })
  258. #define raw_spin_trylock_irqsave(lock, flags) \
  259. ({ \
  260. local_irq_save(flags); \
  261. raw_spin_trylock(lock) ? \
  262. 1 : ({ local_irq_restore(flags); 0; }); \
  263. })
  264. /* Include rwlock functions */
  265. #include <linux/rwlock.h>
  266. /*
  267. * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  268. */
  269. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  270. # include <linux/spinlock_api_smp.h>
  271. #else
  272. # include <linux/spinlock_api_up.h>
  273. #endif
  274. /*
  275. * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  276. */
  277. static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  278. {
  279. return &lock->rlock;
  280. }
  281. #define spin_lock_init(_lock) \
  282. do { \
  283. spinlock_check(_lock); \
  284. raw_spin_lock_init(&(_lock)->rlock); \
  285. } while (0)
  286. static __always_inline void spin_lock(spinlock_t *lock)
  287. {
  288. raw_spin_lock(&lock->rlock);
  289. }
  290. static __always_inline void spin_lock_bh(spinlock_t *lock)
  291. {
  292. raw_spin_lock_bh(&lock->rlock);
  293. }
  294. static __always_inline int spin_trylock(spinlock_t *lock)
  295. {
  296. return raw_spin_trylock(&lock->rlock);
  297. }
  298. #define spin_lock_nested(lock, subclass) \
  299. do { \
  300. raw_spin_lock_nested(spinlock_check(lock), subclass); \
  301. } while (0)
  302. #define spin_lock_nest_lock(lock, nest_lock) \
  303. do { \
  304. raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
  305. } while (0)
  306. static __always_inline void spin_lock_irq(spinlock_t *lock)
  307. {
  308. raw_spin_lock_irq(&lock->rlock);
  309. }
  310. #define spin_lock_irqsave(lock, flags) \
  311. do { \
  312. raw_spin_lock_irqsave(spinlock_check(lock), flags); \
  313. } while (0)
  314. #define spin_lock_irqsave_nested(lock, flags, subclass) \
  315. do { \
  316. raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  317. } while (0)
  318. static __always_inline void spin_unlock(spinlock_t *lock)
  319. {
  320. raw_spin_unlock(&lock->rlock);
  321. }
  322. static __always_inline void spin_unlock_bh(spinlock_t *lock)
  323. {
  324. raw_spin_unlock_bh(&lock->rlock);
  325. }
  326. static __always_inline void spin_unlock_irq(spinlock_t *lock)
  327. {
  328. raw_spin_unlock_irq(&lock->rlock);
  329. }
  330. static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  331. {
  332. raw_spin_unlock_irqrestore(&lock->rlock, flags);
  333. }
  334. static __always_inline int spin_trylock_bh(spinlock_t *lock)
  335. {
  336. return raw_spin_trylock_bh(&lock->rlock);
  337. }
  338. static __always_inline int spin_trylock_irq(spinlock_t *lock)
  339. {
  340. return raw_spin_trylock_irq(&lock->rlock);
  341. }
  342. #define spin_trylock_irqsave(lock, flags) \
  343. ({ \
  344. raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  345. })
  346. /**
  347. * spin_is_locked() - Check whether a spinlock is locked.
  348. * @lock: Pointer to the spinlock.
  349. *
  350. * This function is NOT required to provide any memory ordering
  351. * guarantees; it could be used for debugging purposes or, when
  352. * additional synchronization is needed, accompanied with other
  353. * constructs (memory barriers) enforcing the synchronization.
  354. *
  355. * Returns: 1 if @lock is locked, 0 otherwise.
  356. *
  357. * Note that the function only tells you that the spinlock is
  358. * seen to be locked, not that it is locked on your CPU.
  359. *
  360. * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
  361. * the return value is always 0 (see include/linux/spinlock_up.h).
  362. * Therefore you should not rely heavily on the return value.
  363. */
  364. static __always_inline int spin_is_locked(spinlock_t *lock)
  365. {
  366. return raw_spin_is_locked(&lock->rlock);
  367. }
  368. static __always_inline int spin_is_contended(spinlock_t *lock)
  369. {
  370. return raw_spin_is_contended(&lock->rlock);
  371. }
  372. #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
  373. /*
  374. * Pull the atomic_t declaration:
  375. * (asm-mips/atomic.h needs above definitions)
  376. */
  377. #include <linux/atomic.h>
  378. /**
  379. * atomic_dec_and_lock - lock on reaching reference count zero
  380. * @atomic: the atomic counter
  381. * @lock: the spinlock in question
  382. *
  383. * Decrements @atomic by 1. If the result is 0, returns true and locks
  384. * @lock. Returns false for all other cases.
  385. */
  386. extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  387. #define atomic_dec_and_lock(atomic, lock) \
  388. __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  389. extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
  390. unsigned long *flags);
  391. #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
  392. __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
  393. int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
  394. size_t max_size, unsigned int cpu_mult,
  395. gfp_t gfp, const char *name,
  396. struct lock_class_key *key);
  397. #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
  398. ({ \
  399. static struct lock_class_key key; \
  400. int ret; \
  401. \
  402. ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
  403. cpu_mult, gfp, #locks, &key); \
  404. ret; \
  405. })
  406. void free_bucket_spinlocks(spinlock_t *locks);
  407. #endif /* __LINUX_SPINLOCK_H */