spinlock.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef __ASM_SPINLOCK_H
  9. #define __ASM_SPINLOCK_H
  10. #include <asm/spinlock_types.h>
  11. #include <asm/processor.h>
  12. #include <asm/barrier.h>
  13. #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
  14. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  15. #define arch_spin_unlock_wait(x) \
  16. do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
  17. static inline void arch_spin_lock(arch_spinlock_t *lock)
  18. {
  19. unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
  20. /*
  21. * This smp_mb() is technically superfluous, we only need the one
  22. * after the lock for providing the ACQUIRE semantics.
  23. * However doing the "right" thing was regressing hackbench
  24. * so keeping this, pending further investigation
  25. */
  26. smp_mb();
  27. __asm__ __volatile__(
  28. "1: ex %0, [%1] \n"
  29. " breq %0, %2, 1b \n"
  30. : "+&r" (tmp)
  31. : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
  32. : "memory");
  33. /*
  34. * ACQUIRE barrier to ensure load/store after taking the lock
  35. * don't "bleed-up" out of the critical section (leak-in is allowed)
  36. * http://www.spinics.net/lists/kernel/msg2010409.html
  37. *
  38. * ARCv2 only has load-load, store-store and all-all barrier
  39. * thus need the full all-all barrier
  40. */
  41. smp_mb();
  42. }
  43. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  44. {
  45. unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
  46. smp_mb();
  47. __asm__ __volatile__(
  48. "1: ex %0, [%1] \n"
  49. : "+r" (tmp)
  50. : "r"(&(lock->slock))
  51. : "memory");
  52. smp_mb();
  53. return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
  54. }
  55. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  56. {
  57. unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
  58. /*
  59. * RELEASE barrier: given the instructions avail on ARCv2, full barrier
  60. * is the only option
  61. */
  62. smp_mb();
  63. __asm__ __volatile__(
  64. " ex %0, [%1] \n"
  65. : "+r" (tmp)
  66. : "r"(&(lock->slock))
  67. : "memory");
  68. /*
  69. * superfluous, but keeping for now - see pairing version in
  70. * arch_spin_lock above
  71. */
  72. smp_mb();
  73. }
  74. /*
  75. * Read-write spinlocks, allowing multiple readers but only one writer.
  76. *
  77. * The spinlock itself is contained in @counter and access to it is
  78. * serialized with @lock_mutex.
  79. *
  80. * Unfair locking as Writers could be starved indefinitely by Reader(s)
  81. */
  82. /* Would read_trylock() succeed? */
  83. #define arch_read_can_lock(x) ((x)->counter > 0)
  84. /* Would write_trylock() succeed? */
  85. #define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
  86. /* 1 - lock taken successfully */
  87. static inline int arch_read_trylock(arch_rwlock_t *rw)
  88. {
  89. int ret = 0;
  90. arch_spin_lock(&(rw->lock_mutex));
  91. /*
  92. * zero means writer holds the lock exclusively, deny Reader.
  93. * Otherwise grant lock to first/subseq reader
  94. */
  95. if (rw->counter > 0) {
  96. rw->counter--;
  97. ret = 1;
  98. }
  99. arch_spin_unlock(&(rw->lock_mutex));
  100. smp_mb();
  101. return ret;
  102. }
  103. /* 1 - lock taken successfully */
  104. static inline int arch_write_trylock(arch_rwlock_t *rw)
  105. {
  106. int ret = 0;
  107. arch_spin_lock(&(rw->lock_mutex));
  108. /*
  109. * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
  110. * deny writer. Otherwise if unlocked grant to writer
  111. * Hence the claim that Linux rwlocks are unfair to writers.
  112. * (can be starved for an indefinite time by readers).
  113. */
  114. if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
  115. rw->counter = 0;
  116. ret = 1;
  117. }
  118. arch_spin_unlock(&(rw->lock_mutex));
  119. return ret;
  120. }
  121. static inline void arch_read_lock(arch_rwlock_t *rw)
  122. {
  123. while (!arch_read_trylock(rw))
  124. cpu_relax();
  125. }
  126. static inline void arch_write_lock(arch_rwlock_t *rw)
  127. {
  128. while (!arch_write_trylock(rw))
  129. cpu_relax();
  130. }
  131. static inline void arch_read_unlock(arch_rwlock_t *rw)
  132. {
  133. arch_spin_lock(&(rw->lock_mutex));
  134. rw->counter++;
  135. arch_spin_unlock(&(rw->lock_mutex));
  136. }
  137. static inline void arch_write_unlock(arch_rwlock_t *rw)
  138. {
  139. arch_spin_lock(&(rw->lock_mutex));
  140. rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
  141. arch_spin_unlock(&(rw->lock_mutex));
  142. }
  143. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  144. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  145. #define arch_spin_relax(lock) cpu_relax()
  146. #define arch_read_relax(lock) cpu_relax()
  147. #define arch_write_relax(lock) cpu_relax()
  148. #endif /* __ASM_SPINLOCK_H */