spinlock-llsc.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. /*
  2. * include/asm-sh/spinlock-llsc.h
  3. *
  4. * Copyright (C) 2002, 2003 Paul Mundt
  5. * Copyright (C) 2006, 2007 Akio Idehara
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #ifndef __ASM_SH_SPINLOCK_LLSC_H
  12. #define __ASM_SH_SPINLOCK_LLSC_H
  13. #include <asm/barrier.h>
  14. #include <asm/processor.h>
  15. /*
  16. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  17. */
  18. #define arch_spin_is_locked(x) ((x)->lock <= 0)
  19. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  20. static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  21. {
  22. smp_cond_load_acquire(&lock->lock, VAL > 0);
  23. }
  24. /*
  25. * Simple spin lock operations. There are two variants, one clears IRQ's
  26. * on the local processor, one does not.
  27. *
  28. * We make no fairness assumptions. They have a cost.
  29. */
  30. static inline void arch_spin_lock(arch_spinlock_t *lock)
  31. {
  32. unsigned long tmp;
  33. unsigned long oldval;
  34. __asm__ __volatile__ (
  35. "1: \n\t"
  36. "movli.l @%2, %0 ! arch_spin_lock \n\t"
  37. "mov %0, %1 \n\t"
  38. "mov #0, %0 \n\t"
  39. "movco.l %0, @%2 \n\t"
  40. "bf 1b \n\t"
  41. "cmp/pl %1 \n\t"
  42. "bf 1b \n\t"
  43. : "=&z" (tmp), "=&r" (oldval)
  44. : "r" (&lock->lock)
  45. : "t", "memory"
  46. );
  47. }
  48. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  49. {
  50. unsigned long tmp;
  51. __asm__ __volatile__ (
  52. "mov #1, %0 ! arch_spin_unlock \n\t"
  53. "mov.l %0, @%1 \n\t"
  54. : "=&z" (tmp)
  55. : "r" (&lock->lock)
  56. : "t", "memory"
  57. );
  58. }
  59. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  60. {
  61. unsigned long tmp, oldval;
  62. __asm__ __volatile__ (
  63. "1: \n\t"
  64. "movli.l @%2, %0 ! arch_spin_trylock \n\t"
  65. "mov %0, %1 \n\t"
  66. "mov #0, %0 \n\t"
  67. "movco.l %0, @%2 \n\t"
  68. "bf 1b \n\t"
  69. "synco \n\t"
  70. : "=&z" (tmp), "=&r" (oldval)
  71. : "r" (&lock->lock)
  72. : "t", "memory"
  73. );
  74. return oldval;
  75. }
  76. /*
  77. * Read-write spinlocks, allowing multiple readers but only one writer.
  78. *
  79. * NOTE! it is quite common to have readers in interrupts but no interrupt
  80. * writers. For those circumstances we can "mix" irq-safe locks - any writer
  81. * needs to get a irq-safe write-lock, but readers can get non-irqsafe
  82. * read-locks.
  83. */
  84. /**
  85. * read_can_lock - would read_trylock() succeed?
  86. * @lock: the rwlock in question.
  87. */
  88. #define arch_read_can_lock(x) ((x)->lock > 0)
  89. /**
  90. * write_can_lock - would write_trylock() succeed?
  91. * @lock: the rwlock in question.
  92. */
  93. #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
  94. static inline void arch_read_lock(arch_rwlock_t *rw)
  95. {
  96. unsigned long tmp;
  97. __asm__ __volatile__ (
  98. "1: \n\t"
  99. "movli.l @%1, %0 ! arch_read_lock \n\t"
  100. "cmp/pl %0 \n\t"
  101. "bf 1b \n\t"
  102. "add #-1, %0 \n\t"
  103. "movco.l %0, @%1 \n\t"
  104. "bf 1b \n\t"
  105. : "=&z" (tmp)
  106. : "r" (&rw->lock)
  107. : "t", "memory"
  108. );
  109. }
  110. static inline void arch_read_unlock(arch_rwlock_t *rw)
  111. {
  112. unsigned long tmp;
  113. __asm__ __volatile__ (
  114. "1: \n\t"
  115. "movli.l @%1, %0 ! arch_read_unlock \n\t"
  116. "add #1, %0 \n\t"
  117. "movco.l %0, @%1 \n\t"
  118. "bf 1b \n\t"
  119. : "=&z" (tmp)
  120. : "r" (&rw->lock)
  121. : "t", "memory"
  122. );
  123. }
  124. static inline void arch_write_lock(arch_rwlock_t *rw)
  125. {
  126. unsigned long tmp;
  127. __asm__ __volatile__ (
  128. "1: \n\t"
  129. "movli.l @%1, %0 ! arch_write_lock \n\t"
  130. "cmp/hs %2, %0 \n\t"
  131. "bf 1b \n\t"
  132. "sub %2, %0 \n\t"
  133. "movco.l %0, @%1 \n\t"
  134. "bf 1b \n\t"
  135. : "=&z" (tmp)
  136. : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
  137. : "t", "memory"
  138. );
  139. }
  140. static inline void arch_write_unlock(arch_rwlock_t *rw)
  141. {
  142. __asm__ __volatile__ (
  143. "mov.l %1, @%0 ! arch_write_unlock \n\t"
  144. :
  145. : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
  146. : "t", "memory"
  147. );
  148. }
  149. static inline int arch_read_trylock(arch_rwlock_t *rw)
  150. {
  151. unsigned long tmp, oldval;
  152. __asm__ __volatile__ (
  153. "1: \n\t"
  154. "movli.l @%2, %0 ! arch_read_trylock \n\t"
  155. "mov %0, %1 \n\t"
  156. "cmp/pl %0 \n\t"
  157. "bf 2f \n\t"
  158. "add #-1, %0 \n\t"
  159. "movco.l %0, @%2 \n\t"
  160. "bf 1b \n\t"
  161. "2: \n\t"
  162. "synco \n\t"
  163. : "=&z" (tmp), "=&r" (oldval)
  164. : "r" (&rw->lock)
  165. : "t", "memory"
  166. );
  167. return (oldval > 0);
  168. }
  169. static inline int arch_write_trylock(arch_rwlock_t *rw)
  170. {
  171. unsigned long tmp, oldval;
  172. __asm__ __volatile__ (
  173. "1: \n\t"
  174. "movli.l @%2, %0 ! arch_write_trylock \n\t"
  175. "mov %0, %1 \n\t"
  176. "cmp/hs %3, %0 \n\t"
  177. "bf 2f \n\t"
  178. "sub %3, %0 \n\t"
  179. "2: \n\t"
  180. "movco.l %0, @%2 \n\t"
  181. "bf 1b \n\t"
  182. "synco \n\t"
  183. : "=&z" (tmp), "=&r" (oldval)
  184. : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
  185. : "t", "memory"
  186. );
  187. return (oldval > (RW_LOCK_BIAS - 1));
  188. }
  189. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  190. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  191. #define arch_spin_relax(lock) cpu_relax()
  192. #define arch_read_relax(lock) cpu_relax()
  193. #define arch_write_relax(lock) cpu_relax()
  194. #endif /* __ASM_SH_SPINLOCK_LLSC_H */