spinlock_64.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /* spinlock.h: 64-bit Sparc spinlock support.
  2. *
  3. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef __SPARC64_SPINLOCK_H
  6. #define __SPARC64_SPINLOCK_H
  7. #ifndef __ASSEMBLY__
  8. /* To get debugging spinlocks which detect and catch
  9. * deadlock situations, set CONFIG_DEBUG_SPINLOCK
  10. * and rebuild your kernel.
  11. */
  12. /* Because we play games to save cycles in the non-contention case, we
  13. * need to be extra careful about branch targets into the "spinning"
  14. * code. They live in their own section, but the newer V9 branches
  15. * have a shorter range than the traditional 32-bit sparc branch
  16. * variants. The rule is that the branches that go into and out of
  17. * the spinner sections must be pre-V9 branches.
  18. */
  19. #define arch_spin_is_locked(lp) ((lp)->lock != 0)
  20. #define arch_spin_unlock_wait(lp) \
  21. do { rmb(); \
  22. } while((lp)->lock)
  23. static inline void arch_spin_lock(arch_spinlock_t *lock)
  24. {
  25. unsigned long tmp;
  26. __asm__ __volatile__(
  27. "1: ldstub [%1], %0\n"
  28. " brnz,pn %0, 2f\n"
  29. " nop\n"
  30. " .subsection 2\n"
  31. "2: ldub [%1], %0\n"
  32. " brnz,pt %0, 2b\n"
  33. " nop\n"
  34. " ba,a,pt %%xcc, 1b\n"
  35. " .previous"
  36. : "=&r" (tmp)
  37. : "r" (lock)
  38. : "memory");
  39. }
  40. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  41. {
  42. unsigned long result;
  43. __asm__ __volatile__(
  44. " ldstub [%1], %0\n"
  45. : "=r" (result)
  46. : "r" (lock)
  47. : "memory");
  48. return (result == 0UL);
  49. }
  50. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  51. {
  52. __asm__ __volatile__(
  53. " stb %%g0, [%0]"
  54. : /* No outputs */
  55. : "r" (lock)
  56. : "memory");
  57. }
  58. static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
  59. {
  60. unsigned long tmp1, tmp2;
  61. __asm__ __volatile__(
  62. "1: ldstub [%2], %0\n"
  63. " brnz,pn %0, 2f\n"
  64. " nop\n"
  65. " .subsection 2\n"
  66. "2: rdpr %%pil, %1\n"
  67. " wrpr %3, %%pil\n"
  68. "3: ldub [%2], %0\n"
  69. " brnz,pt %0, 3b\n"
  70. " nop\n"
  71. " ba,pt %%xcc, 1b\n"
  72. " wrpr %1, %%pil\n"
  73. " .previous"
  74. : "=&r" (tmp1), "=&r" (tmp2)
  75. : "r"(lock), "r"(flags)
  76. : "memory");
  77. }
  78. /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
  79. static void inline arch_read_lock(arch_rwlock_t *lock)
  80. {
  81. unsigned long tmp1, tmp2;
  82. __asm__ __volatile__ (
  83. "1: ldsw [%2], %0\n"
  84. " brlz,pn %0, 2f\n"
  85. "4: add %0, 1, %1\n"
  86. " cas [%2], %0, %1\n"
  87. " cmp %0, %1\n"
  88. " bne,pn %%icc, 1b\n"
  89. " nop\n"
  90. " .subsection 2\n"
  91. "2: ldsw [%2], %0\n"
  92. " brlz,pt %0, 2b\n"
  93. " nop\n"
  94. " ba,a,pt %%xcc, 4b\n"
  95. " .previous"
  96. : "=&r" (tmp1), "=&r" (tmp2)
  97. : "r" (lock)
  98. : "memory");
  99. }
  100. static int inline arch_read_trylock(arch_rwlock_t *lock)
  101. {
  102. int tmp1, tmp2;
  103. __asm__ __volatile__ (
  104. "1: ldsw [%2], %0\n"
  105. " brlz,a,pn %0, 2f\n"
  106. " mov 0, %0\n"
  107. " add %0, 1, %1\n"
  108. " cas [%2], %0, %1\n"
  109. " cmp %0, %1\n"
  110. " bne,pn %%icc, 1b\n"
  111. " mov 1, %0\n"
  112. "2:"
  113. : "=&r" (tmp1), "=&r" (tmp2)
  114. : "r" (lock)
  115. : "memory");
  116. return tmp1;
  117. }
  118. static void inline arch_read_unlock(arch_rwlock_t *lock)
  119. {
  120. unsigned long tmp1, tmp2;
  121. __asm__ __volatile__(
  122. "1: lduw [%2], %0\n"
  123. " sub %0, 1, %1\n"
  124. " cas [%2], %0, %1\n"
  125. " cmp %0, %1\n"
  126. " bne,pn %%xcc, 1b\n"
  127. " nop"
  128. : "=&r" (tmp1), "=&r" (tmp2)
  129. : "r" (lock)
  130. : "memory");
  131. }
  132. static void inline arch_write_lock(arch_rwlock_t *lock)
  133. {
  134. unsigned long mask, tmp1, tmp2;
  135. mask = 0x80000000UL;
  136. __asm__ __volatile__(
  137. "1: lduw [%2], %0\n"
  138. " brnz,pn %0, 2f\n"
  139. "4: or %0, %3, %1\n"
  140. " cas [%2], %0, %1\n"
  141. " cmp %0, %1\n"
  142. " bne,pn %%icc, 1b\n"
  143. " nop\n"
  144. " .subsection 2\n"
  145. "2: lduw [%2], %0\n"
  146. " brnz,pt %0, 2b\n"
  147. " nop\n"
  148. " ba,a,pt %%xcc, 4b\n"
  149. " .previous"
  150. : "=&r" (tmp1), "=&r" (tmp2)
  151. : "r" (lock), "r" (mask)
  152. : "memory");
  153. }
  154. static void inline arch_write_unlock(arch_rwlock_t *lock)
  155. {
  156. __asm__ __volatile__(
  157. " stw %%g0, [%0]"
  158. : /* no outputs */
  159. : "r" (lock)
  160. : "memory");
  161. }
  162. static int inline arch_write_trylock(arch_rwlock_t *lock)
  163. {
  164. unsigned long mask, tmp1, tmp2, result;
  165. mask = 0x80000000UL;
  166. __asm__ __volatile__(
  167. " mov 0, %2\n"
  168. "1: lduw [%3], %0\n"
  169. " brnz,pn %0, 2f\n"
  170. " or %0, %4, %1\n"
  171. " cas [%3], %0, %1\n"
  172. " cmp %0, %1\n"
  173. " bne,pn %%icc, 1b\n"
  174. " nop\n"
  175. " mov 1, %2\n"
  176. "2:"
  177. : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
  178. : "r" (lock), "r" (mask)
  179. : "memory");
  180. return result;
  181. }
  182. #define arch_read_lock_flags(p, f) arch_read_lock(p)
  183. #define arch_write_lock_flags(p, f) arch_write_lock(p)
  184. #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
  185. #define arch_write_can_lock(rw) (!(rw)->lock)
  186. #define arch_spin_relax(lock) cpu_relax()
  187. #define arch_read_relax(lock) cpu_relax()
  188. #define arch_write_relax(lock) cpu_relax()
  189. #endif /* !(__ASSEMBLY__) */
  190. #endif /* !(__SPARC64_SPINLOCK_H) */