spinlock.h 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef __ASM_SPINLOCK_H
  9. #define __ASM_SPINLOCK_H
  10. #include <asm/spinlock_types.h>
  11. #include <asm/processor.h>
  12. #include <asm/barrier.h>
  13. #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
  14. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  15. static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  16. {
  17. smp_cond_load_acquire(&lock->slock, !VAL);
  18. }
  19. #ifdef CONFIG_ARC_HAS_LLSC
  20. static inline void arch_spin_lock(arch_spinlock_t *lock)
  21. {
  22. unsigned int val;
  23. smp_mb();
  24. __asm__ __volatile__(
  25. "1: llock %[val], [%[slock]] \n"
  26. " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
  27. " scond %[LOCKED], [%[slock]] \n" /* acquire */
  28. " bnz 1b \n"
  29. " \n"
  30. : [val] "=&r" (val)
  31. : [slock] "r" (&(lock->slock)),
  32. [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
  33. : "memory", "cc");
  34. smp_mb();
  35. }
  36. /* 1 - lock taken successfully */
  37. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  38. {
  39. unsigned int val, got_it = 0;
  40. smp_mb();
  41. __asm__ __volatile__(
  42. "1: llock %[val], [%[slock]] \n"
  43. " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
  44. " scond %[LOCKED], [%[slock]] \n" /* acquire */
  45. " bnz 1b \n"
  46. " mov %[got_it], 1 \n"
  47. "4: \n"
  48. " \n"
  49. : [val] "=&r" (val),
  50. [got_it] "+&r" (got_it)
  51. : [slock] "r" (&(lock->slock)),
  52. [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
  53. : "memory", "cc");
  54. smp_mb();
  55. return got_it;
  56. }
  57. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  58. {
  59. smp_mb();
  60. lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
  61. smp_mb();
  62. }
  63. /*
  64. * Read-write spinlocks, allowing multiple readers but only one writer.
  65. * Unfair locking as Writers could be starved indefinitely by Reader(s)
  66. */
  67. static inline void arch_read_lock(arch_rwlock_t *rw)
  68. {
  69. unsigned int val;
  70. smp_mb();
  71. /*
  72. * zero means writer holds the lock exclusively, deny Reader.
  73. * Otherwise grant lock to first/subseq reader
  74. *
  75. * if (rw->counter > 0) {
  76. * rw->counter--;
  77. * ret = 1;
  78. * }
  79. */
  80. __asm__ __volatile__(
  81. "1: llock %[val], [%[rwlock]] \n"
  82. " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
  83. " sub %[val], %[val], 1 \n" /* reader lock */
  84. " scond %[val], [%[rwlock]] \n"
  85. " bnz 1b \n"
  86. " \n"
  87. : [val] "=&r" (val)
  88. : [rwlock] "r" (&(rw->counter)),
  89. [WR_LOCKED] "ir" (0)
  90. : "memory", "cc");
  91. smp_mb();
  92. }
  93. /* 1 - lock taken successfully */
  94. static inline int arch_read_trylock(arch_rwlock_t *rw)
  95. {
  96. unsigned int val, got_it = 0;
  97. smp_mb();
  98. __asm__ __volatile__(
  99. "1: llock %[val], [%[rwlock]] \n"
  100. " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
  101. " sub %[val], %[val], 1 \n" /* counter-- */
  102. " scond %[val], [%[rwlock]] \n"
  103. " bnz 1b \n" /* retry if collided with someone */
  104. " mov %[got_it], 1 \n"
  105. " \n"
  106. "4: ; --- done --- \n"
  107. : [val] "=&r" (val),
  108. [got_it] "+&r" (got_it)
  109. : [rwlock] "r" (&(rw->counter)),
  110. [WR_LOCKED] "ir" (0)
  111. : "memory", "cc");
  112. smp_mb();
  113. return got_it;
  114. }
  115. static inline void arch_write_lock(arch_rwlock_t *rw)
  116. {
  117. unsigned int val;
  118. smp_mb();
  119. /*
  120. * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
  121. * deny writer. Otherwise if unlocked grant to writer
  122. * Hence the claim that Linux rwlocks are unfair to writers.
  123. * (can be starved for an indefinite time by readers).
  124. *
  125. * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
  126. * rw->counter = 0;
  127. * ret = 1;
  128. * }
  129. */
  130. __asm__ __volatile__(
  131. "1: llock %[val], [%[rwlock]] \n"
  132. " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
  133. " mov %[val], %[WR_LOCKED] \n"
  134. " scond %[val], [%[rwlock]] \n"
  135. " bnz 1b \n"
  136. " \n"
  137. : [val] "=&r" (val)
  138. : [rwlock] "r" (&(rw->counter)),
  139. [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
  140. [WR_LOCKED] "ir" (0)
  141. : "memory", "cc");
  142. smp_mb();
  143. }
  144. /* 1 - lock taken successfully */
  145. static inline int arch_write_trylock(arch_rwlock_t *rw)
  146. {
  147. unsigned int val, got_it = 0;
  148. smp_mb();
  149. __asm__ __volatile__(
  150. "1: llock %[val], [%[rwlock]] \n"
  151. " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
  152. " mov %[val], %[WR_LOCKED] \n"
  153. " scond %[val], [%[rwlock]] \n"
  154. " bnz 1b \n" /* retry if collided with someone */
  155. " mov %[got_it], 1 \n"
  156. " \n"
  157. "4: ; --- done --- \n"
  158. : [val] "=&r" (val),
  159. [got_it] "+&r" (got_it)
  160. : [rwlock] "r" (&(rw->counter)),
  161. [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
  162. [WR_LOCKED] "ir" (0)
  163. : "memory", "cc");
  164. smp_mb();
  165. return got_it;
  166. }
  167. static inline void arch_read_unlock(arch_rwlock_t *rw)
  168. {
  169. unsigned int val;
  170. smp_mb();
  171. /*
  172. * rw->counter++;
  173. */
  174. __asm__ __volatile__(
  175. "1: llock %[val], [%[rwlock]] \n"
  176. " add %[val], %[val], 1 \n"
  177. " scond %[val], [%[rwlock]] \n"
  178. " bnz 1b \n"
  179. " \n"
  180. : [val] "=&r" (val)
  181. : [rwlock] "r" (&(rw->counter))
  182. : "memory", "cc");
  183. smp_mb();
  184. }
  185. static inline void arch_write_unlock(arch_rwlock_t *rw)
  186. {
  187. smp_mb();
  188. rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
  189. smp_mb();
  190. }
  191. #else /* !CONFIG_ARC_HAS_LLSC */
  192. static inline void arch_spin_lock(arch_spinlock_t *lock)
  193. {
  194. unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
  195. /*
  196. * This smp_mb() is technically superfluous, we only need the one
  197. * after the lock for providing the ACQUIRE semantics.
  198. * However doing the "right" thing was regressing hackbench
  199. * so keeping this, pending further investigation
  200. */
  201. smp_mb();
  202. __asm__ __volatile__(
  203. "1: ex %0, [%1] \n"
  204. " breq %0, %2, 1b \n"
  205. : "+&r" (val)
  206. : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
  207. : "memory");
  208. /*
  209. * ACQUIRE barrier to ensure load/store after taking the lock
  210. * don't "bleed-up" out of the critical section (leak-in is allowed)
  211. * http://www.spinics.net/lists/kernel/msg2010409.html
  212. *
  213. * ARCv2 only has load-load, store-store and all-all barrier
  214. * thus need the full all-all barrier
  215. */
  216. smp_mb();
  217. }
  218. /* 1 - lock taken successfully */
  219. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  220. {
  221. unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
  222. smp_mb();
  223. __asm__ __volatile__(
  224. "1: ex %0, [%1] \n"
  225. : "+r" (val)
  226. : "r"(&(lock->slock))
  227. : "memory");
  228. smp_mb();
  229. return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
  230. }
  231. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  232. {
  233. unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
  234. /*
  235. * RELEASE barrier: given the instructions avail on ARCv2, full barrier
  236. * is the only option
  237. */
  238. smp_mb();
  239. __asm__ __volatile__(
  240. " ex %0, [%1] \n"
  241. : "+r" (val)
  242. : "r"(&(lock->slock))
  243. : "memory");
  244. /*
  245. * superfluous, but keeping for now - see pairing version in
  246. * arch_spin_lock above
  247. */
  248. smp_mb();
  249. }
  250. /*
  251. * Read-write spinlocks, allowing multiple readers but only one writer.
  252. * Unfair locking as Writers could be starved indefinitely by Reader(s)
  253. *
  254. * The spinlock itself is contained in @counter and access to it is
  255. * serialized with @lock_mutex.
  256. */
  257. /* 1 - lock taken successfully */
  258. static inline int arch_read_trylock(arch_rwlock_t *rw)
  259. {
  260. int ret = 0;
  261. unsigned long flags;
  262. local_irq_save(flags);
  263. arch_spin_lock(&(rw->lock_mutex));
  264. /*
  265. * zero means writer holds the lock exclusively, deny Reader.
  266. * Otherwise grant lock to first/subseq reader
  267. */
  268. if (rw->counter > 0) {
  269. rw->counter--;
  270. ret = 1;
  271. }
  272. arch_spin_unlock(&(rw->lock_mutex));
  273. local_irq_restore(flags);
  274. smp_mb();
  275. return ret;
  276. }
  277. /* 1 - lock taken successfully */
  278. static inline int arch_write_trylock(arch_rwlock_t *rw)
  279. {
  280. int ret = 0;
  281. unsigned long flags;
  282. local_irq_save(flags);
  283. arch_spin_lock(&(rw->lock_mutex));
  284. /*
  285. * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
  286. * deny writer. Otherwise if unlocked grant to writer
  287. * Hence the claim that Linux rwlocks are unfair to writers.
  288. * (can be starved for an indefinite time by readers).
  289. */
  290. if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
  291. rw->counter = 0;
  292. ret = 1;
  293. }
  294. arch_spin_unlock(&(rw->lock_mutex));
  295. local_irq_restore(flags);
  296. return ret;
  297. }
  298. static inline void arch_read_lock(arch_rwlock_t *rw)
  299. {
  300. while (!arch_read_trylock(rw))
  301. cpu_relax();
  302. }
  303. static inline void arch_write_lock(arch_rwlock_t *rw)
  304. {
  305. while (!arch_write_trylock(rw))
  306. cpu_relax();
  307. }
  308. static inline void arch_read_unlock(arch_rwlock_t *rw)
  309. {
  310. unsigned long flags;
  311. local_irq_save(flags);
  312. arch_spin_lock(&(rw->lock_mutex));
  313. rw->counter++;
  314. arch_spin_unlock(&(rw->lock_mutex));
  315. local_irq_restore(flags);
  316. }
  317. static inline void arch_write_unlock(arch_rwlock_t *rw)
  318. {
  319. unsigned long flags;
  320. local_irq_save(flags);
  321. arch_spin_lock(&(rw->lock_mutex));
  322. rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
  323. arch_spin_unlock(&(rw->lock_mutex));
  324. local_irq_restore(flags);
  325. }
  326. #endif
  327. #define arch_read_can_lock(x) ((x)->counter > 0)
  328. #define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
  329. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  330. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  331. #define arch_spin_relax(lock) cpu_relax()
  332. #define arch_read_relax(lock) cpu_relax()
  333. #define arch_write_relax(lock) cpu_relax()
  334. #endif /* __ASM_SPINLOCK_H */