spinlock.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. * include/asm-xtensa/spinlock.h
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2001 - 2005 Tensilica Inc.
  9. */
  10. #ifndef _XTENSA_SPINLOCK_H
  11. #define _XTENSA_SPINLOCK_H
  12. #include <asm/barrier.h>
  13. #include <asm/processor.h>
  14. /*
  15. * spinlock
  16. *
  17. * There is at most one owner of a spinlock. There are not different
  18. * types of spinlock owners like there are for rwlocks (see below).
  19. *
  20. * When trying to obtain a spinlock, the function "spins" forever, or busy-
  21. * waits, until the lock is obtained. When spinning, presumably some other
  22. * owner will soon give up the spinlock making it available to others. Use
  23. * the trylock functions to avoid spinning forever.
  24. *
  25. * possible values:
  26. *
  27. * 0 nobody owns the spinlock
  28. * 1 somebody owns the spinlock
  29. */
  30. #define arch_spin_is_locked(x) ((x)->slock != 0)
  31. static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  32. {
  33. smp_cond_load_acquire(&lock->slock, !VAL);
  34. }
  35. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  36. static inline void arch_spin_lock(arch_spinlock_t *lock)
  37. {
  38. unsigned long tmp;
  39. __asm__ __volatile__(
  40. " movi %0, 0\n"
  41. " wsr %0, scompare1\n"
  42. "1: movi %0, 1\n"
  43. " s32c1i %0, %1, 0\n"
  44. " bnez %0, 1b\n"
  45. : "=&a" (tmp)
  46. : "a" (&lock->slock)
  47. : "memory");
  48. }
  49. /* Returns 1 if the lock is obtained, 0 otherwise. */
  50. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  51. {
  52. unsigned long tmp;
  53. __asm__ __volatile__(
  54. " movi %0, 0\n"
  55. " wsr %0, scompare1\n"
  56. " movi %0, 1\n"
  57. " s32c1i %0, %1, 0\n"
  58. : "=&a" (tmp)
  59. : "a" (&lock->slock)
  60. : "memory");
  61. return tmp == 0 ? 1 : 0;
  62. }
  63. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  64. {
  65. unsigned long tmp;
  66. __asm__ __volatile__(
  67. " movi %0, 0\n"
  68. " s32ri %0, %1, 0\n"
  69. : "=&a" (tmp)
  70. : "a" (&lock->slock)
  71. : "memory");
  72. }
  73. /*
  74. * rwlock
  75. *
  76. * Read-write locks are really a more flexible spinlock. They allow
  77. * multiple readers but only one writer. Write ownership is exclusive
  78. * (i.e., all other readers and writers are blocked from ownership while
  79. * there is a write owner). These rwlocks are unfair to writers. Writers
  80. * can be starved for an indefinite time by readers.
  81. *
  82. * possible values:
  83. *
  84. * 0 nobody owns the rwlock
  85. * >0 one or more readers own the rwlock
  86. * (the positive value is the actual number of readers)
  87. * 0x80000000 one writer owns the rwlock, no other writers, no readers
  88. */
  89. #define arch_write_can_lock(x) ((x)->lock == 0)
  90. static inline void arch_write_lock(arch_rwlock_t *rw)
  91. {
  92. unsigned long tmp;
  93. __asm__ __volatile__(
  94. " movi %0, 0\n"
  95. " wsr %0, scompare1\n"
  96. "1: movi %0, 1\n"
  97. " slli %0, %0, 31\n"
  98. " s32c1i %0, %1, 0\n"
  99. " bnez %0, 1b\n"
  100. : "=&a" (tmp)
  101. : "a" (&rw->lock)
  102. : "memory");
  103. }
  104. /* Returns 1 if the lock is obtained, 0 otherwise. */
  105. static inline int arch_write_trylock(arch_rwlock_t *rw)
  106. {
  107. unsigned long tmp;
  108. __asm__ __volatile__(
  109. " movi %0, 0\n"
  110. " wsr %0, scompare1\n"
  111. " movi %0, 1\n"
  112. " slli %0, %0, 31\n"
  113. " s32c1i %0, %1, 0\n"
  114. : "=&a" (tmp)
  115. : "a" (&rw->lock)
  116. : "memory");
  117. return tmp == 0 ? 1 : 0;
  118. }
  119. static inline void arch_write_unlock(arch_rwlock_t *rw)
  120. {
  121. unsigned long tmp;
  122. __asm__ __volatile__(
  123. " movi %0, 0\n"
  124. " s32ri %0, %1, 0\n"
  125. : "=&a" (tmp)
  126. : "a" (&rw->lock)
  127. : "memory");
  128. }
  129. static inline void arch_read_lock(arch_rwlock_t *rw)
  130. {
  131. unsigned long tmp;
  132. unsigned long result;
  133. __asm__ __volatile__(
  134. "1: l32i %1, %2, 0\n"
  135. " bltz %1, 1b\n"
  136. " wsr %1, scompare1\n"
  137. " addi %0, %1, 1\n"
  138. " s32c1i %0, %2, 0\n"
  139. " bne %0, %1, 1b\n"
  140. : "=&a" (result), "=&a" (tmp)
  141. : "a" (&rw->lock)
  142. : "memory");
  143. }
  144. /* Returns 1 if the lock is obtained, 0 otherwise. */
  145. static inline int arch_read_trylock(arch_rwlock_t *rw)
  146. {
  147. unsigned long result;
  148. unsigned long tmp;
  149. __asm__ __volatile__(
  150. " l32i %1, %2, 0\n"
  151. " addi %0, %1, 1\n"
  152. " bltz %0, 1f\n"
  153. " wsr %1, scompare1\n"
  154. " s32c1i %0, %2, 0\n"
  155. " sub %0, %0, %1\n"
  156. "1:\n"
  157. : "=&a" (result), "=&a" (tmp)
  158. : "a" (&rw->lock)
  159. : "memory");
  160. return result == 0;
  161. }
  162. static inline void arch_read_unlock(arch_rwlock_t *rw)
  163. {
  164. unsigned long tmp1, tmp2;
  165. __asm__ __volatile__(
  166. "1: l32i %1, %2, 0\n"
  167. " addi %0, %1, -1\n"
  168. " wsr %1, scompare1\n"
  169. " s32c1i %0, %2, 0\n"
  170. " bne %0, %1, 1b\n"
  171. : "=&a" (tmp1), "=&a" (tmp2)
  172. : "a" (&rw->lock)
  173. : "memory");
  174. }
  175. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  176. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  177. #endif /* _XTENSA_SPINLOCK_H */