spinlock-llsc.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. /*
  2. * include/asm-sh/spinlock-llsc.h
  3. *
  4. * Copyright (C) 2002, 2003 Paul Mundt
  5. * Copyright (C) 2006, 2007 Akio Idehara
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #ifndef __ASM_SH_SPINLOCK_LLSC_H
  12. #define __ASM_SH_SPINLOCK_LLSC_H
  13. #include <asm/barrier.h>
  14. #include <asm/processor.h>
  15. /*
  16. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  17. */
  18. #define arch_spin_is_locked(x) ((x)->lock <= 0)
  19. /*
  20. * Simple spin lock operations. There are two variants, one clears IRQ's
  21. * on the local processor, one does not.
  22. *
  23. * We make no fairness assumptions. They have a cost.
  24. */
  25. static inline void arch_spin_lock(arch_spinlock_t *lock)
  26. {
  27. unsigned long tmp;
  28. unsigned long oldval;
  29. __asm__ __volatile__ (
  30. "1: \n\t"
  31. "movli.l @%2, %0 ! arch_spin_lock \n\t"
  32. "mov %0, %1 \n\t"
  33. "mov #0, %0 \n\t"
  34. "movco.l %0, @%2 \n\t"
  35. "bf 1b \n\t"
  36. "cmp/pl %1 \n\t"
  37. "bf 1b \n\t"
  38. : "=&z" (tmp), "=&r" (oldval)
  39. : "r" (&lock->lock)
  40. : "t", "memory"
  41. );
  42. }
  43. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  44. {
  45. unsigned long tmp;
  46. __asm__ __volatile__ (
  47. "mov #1, %0 ! arch_spin_unlock \n\t"
  48. "mov.l %0, @%1 \n\t"
  49. : "=&z" (tmp)
  50. : "r" (&lock->lock)
  51. : "t", "memory"
  52. );
  53. }
  54. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  55. {
  56. unsigned long tmp, oldval;
  57. __asm__ __volatile__ (
  58. "1: \n\t"
  59. "movli.l @%2, %0 ! arch_spin_trylock \n\t"
  60. "mov %0, %1 \n\t"
  61. "mov #0, %0 \n\t"
  62. "movco.l %0, @%2 \n\t"
  63. "bf 1b \n\t"
  64. "synco \n\t"
  65. : "=&z" (tmp), "=&r" (oldval)
  66. : "r" (&lock->lock)
  67. : "t", "memory"
  68. );
  69. return oldval;
  70. }
  71. /*
  72. * Read-write spinlocks, allowing multiple readers but only one writer.
  73. *
  74. * NOTE! it is quite common to have readers in interrupts but no interrupt
  75. * writers. For those circumstances we can "mix" irq-safe locks - any writer
  76. * needs to get a irq-safe write-lock, but readers can get non-irqsafe
  77. * read-locks.
  78. */
  79. static inline void arch_read_lock(arch_rwlock_t *rw)
  80. {
  81. unsigned long tmp;
  82. __asm__ __volatile__ (
  83. "1: \n\t"
  84. "movli.l @%1, %0 ! arch_read_lock \n\t"
  85. "cmp/pl %0 \n\t"
  86. "bf 1b \n\t"
  87. "add #-1, %0 \n\t"
  88. "movco.l %0, @%1 \n\t"
  89. "bf 1b \n\t"
  90. : "=&z" (tmp)
  91. : "r" (&rw->lock)
  92. : "t", "memory"
  93. );
  94. }
  95. static inline void arch_read_unlock(arch_rwlock_t *rw)
  96. {
  97. unsigned long tmp;
  98. __asm__ __volatile__ (
  99. "1: \n\t"
  100. "movli.l @%1, %0 ! arch_read_unlock \n\t"
  101. "add #1, %0 \n\t"
  102. "movco.l %0, @%1 \n\t"
  103. "bf 1b \n\t"
  104. : "=&z" (tmp)
  105. : "r" (&rw->lock)
  106. : "t", "memory"
  107. );
  108. }
  109. static inline void arch_write_lock(arch_rwlock_t *rw)
  110. {
  111. unsigned long tmp;
  112. __asm__ __volatile__ (
  113. "1: \n\t"
  114. "movli.l @%1, %0 ! arch_write_lock \n\t"
  115. "cmp/hs %2, %0 \n\t"
  116. "bf 1b \n\t"
  117. "sub %2, %0 \n\t"
  118. "movco.l %0, @%1 \n\t"
  119. "bf 1b \n\t"
  120. : "=&z" (tmp)
  121. : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
  122. : "t", "memory"
  123. );
  124. }
  125. static inline void arch_write_unlock(arch_rwlock_t *rw)
  126. {
  127. __asm__ __volatile__ (
  128. "mov.l %1, @%0 ! arch_write_unlock \n\t"
  129. :
  130. : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
  131. : "t", "memory"
  132. );
  133. }
  134. static inline int arch_read_trylock(arch_rwlock_t *rw)
  135. {
  136. unsigned long tmp, oldval;
  137. __asm__ __volatile__ (
  138. "1: \n\t"
  139. "movli.l @%2, %0 ! arch_read_trylock \n\t"
  140. "mov %0, %1 \n\t"
  141. "cmp/pl %0 \n\t"
  142. "bf 2f \n\t"
  143. "add #-1, %0 \n\t"
  144. "movco.l %0, @%2 \n\t"
  145. "bf 1b \n\t"
  146. "2: \n\t"
  147. "synco \n\t"
  148. : "=&z" (tmp), "=&r" (oldval)
  149. : "r" (&rw->lock)
  150. : "t", "memory"
  151. );
  152. return (oldval > 0);
  153. }
  154. static inline int arch_write_trylock(arch_rwlock_t *rw)
  155. {
  156. unsigned long tmp, oldval;
  157. __asm__ __volatile__ (
  158. "1: \n\t"
  159. "movli.l @%2, %0 ! arch_write_trylock \n\t"
  160. "mov %0, %1 \n\t"
  161. "cmp/hs %3, %0 \n\t"
  162. "bf 2f \n\t"
  163. "sub %3, %0 \n\t"
  164. "2: \n\t"
  165. "movco.l %0, @%2 \n\t"
  166. "bf 1b \n\t"
  167. "synco \n\t"
  168. : "=&z" (tmp), "=&r" (oldval)
  169. : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
  170. : "t", "memory"
  171. );
  172. return (oldval > (RW_LOCK_BIAS - 1));
  173. }
  174. #endif /* __ASM_SH_SPINLOCK_LLSC_H */