spinlock.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /*
  2. * Spinlock support for the Hexagon architecture
  3. *
  4. * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
  5. *
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 and
  9. * only version 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  19. * 02110-1301, USA.
  20. */
  21. #ifndef _ASM_SPINLOCK_H
  22. #define _ASM_SPINLOCK_H
  23. #include <asm/irqflags.h>
  24. #include <asm/barrier.h>
  25. #include <asm/processor.h>
  26. /*
  27. * This file is pulled in for SMP builds.
  28. * Really need to check all the barrier stuff for "true" SMP
  29. */
  30. /*
  31. * Read locks:
  32. * - load the lock value
  33. * - increment it
  34. * - if the lock value is still negative, go back and try again.
  35. * - unsuccessful store is unsuccessful. Go back and try again. Loser.
  36. * - successful store new lock value if positive -> lock acquired
  37. */
  38. static inline void arch_read_lock(arch_rwlock_t *lock)
  39. {
  40. __asm__ __volatile__(
  41. "1: R6 = memw_locked(%0);\n"
  42. " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
  43. " { if !P3 jump 1b; }\n"
  44. " memw_locked(%0,P3) = R6;\n"
  45. " { if !P3 jump 1b; }\n"
  46. :
  47. : "r" (&lock->lock)
  48. : "memory", "r6", "p3"
  49. );
  50. }
  51. static inline void arch_read_unlock(arch_rwlock_t *lock)
  52. {
  53. __asm__ __volatile__(
  54. "1: R6 = memw_locked(%0);\n"
  55. " R6 = add(R6,#-1);\n"
  56. " memw_locked(%0,P3) = R6\n"
  57. " if !P3 jump 1b;\n"
  58. :
  59. : "r" (&lock->lock)
  60. : "memory", "r6", "p3"
  61. );
  62. }
  63. /* I think this returns 0 on fail, 1 on success. */
  64. static inline int arch_read_trylock(arch_rwlock_t *lock)
  65. {
  66. int temp;
  67. __asm__ __volatile__(
  68. " R6 = memw_locked(%1);\n"
  69. " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
  70. " { if !P3 jump 1f; }\n"
  71. " memw_locked(%1,P3) = R6;\n"
  72. " { %0 = P3 }\n"
  73. "1:\n"
  74. : "=&r" (temp)
  75. : "r" (&lock->lock)
  76. : "memory", "r6", "p3"
  77. );
  78. return temp;
  79. }
  80. static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
  81. {
  82. return rwlock->lock == 0;
  83. }
  84. static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
  85. {
  86. return rwlock->lock == 0;
  87. }
  88. /* Stuffs a -1 in the lock value? */
  89. static inline void arch_write_lock(arch_rwlock_t *lock)
  90. {
  91. __asm__ __volatile__(
  92. "1: R6 = memw_locked(%0)\n"
  93. " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
  94. " { if !P3 jump 1b; }\n"
  95. " memw_locked(%0,P3) = R6;\n"
  96. " { if !P3 jump 1b; }\n"
  97. :
  98. : "r" (&lock->lock)
  99. : "memory", "r6", "p3"
  100. );
  101. }
  102. static inline int arch_write_trylock(arch_rwlock_t *lock)
  103. {
  104. int temp;
  105. __asm__ __volatile__(
  106. " R6 = memw_locked(%1)\n"
  107. " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
  108. " { if !P3 jump 1f; }\n"
  109. " memw_locked(%1,P3) = R6;\n"
  110. " %0 = P3;\n"
  111. "1:\n"
  112. : "=&r" (temp)
  113. : "r" (&lock->lock)
  114. : "memory", "r6", "p3"
  115. );
  116. return temp;
  117. }
  118. static inline void arch_write_unlock(arch_rwlock_t *lock)
  119. {
  120. smp_mb();
  121. lock->lock = 0;
  122. }
  123. static inline void arch_spin_lock(arch_spinlock_t *lock)
  124. {
  125. __asm__ __volatile__(
  126. "1: R6 = memw_locked(%0);\n"
  127. " P3 = cmp.eq(R6,#0);\n"
  128. " { if !P3 jump 1b; R6 = #1; }\n"
  129. " memw_locked(%0,P3) = R6;\n"
  130. " { if !P3 jump 1b; }\n"
  131. :
  132. : "r" (&lock->lock)
  133. : "memory", "r6", "p3"
  134. );
  135. }
  136. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  137. {
  138. smp_mb();
  139. lock->lock = 0;
  140. }
  141. static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
  142. {
  143. int temp;
  144. __asm__ __volatile__(
  145. " R6 = memw_locked(%1);\n"
  146. " P3 = cmp.eq(R6,#0);\n"
  147. " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
  148. " memw_locked(%1,P3) = R6;\n"
  149. " %0 = P3;\n"
  150. "1:\n"
  151. : "=&r" (temp)
  152. : "r" (&lock->lock)
  153. : "memory", "r6", "p3"
  154. );
  155. return temp;
  156. }
  157. /*
  158. * SMP spinlocks are intended to allow only a single CPU at the lock
  159. */
  160. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  161. static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  162. {
  163. smp_cond_load_acquire(&lock->lock, !VAL);
  164. }
  165. #define arch_spin_is_locked(x) ((x)->lock != 0)
  166. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  167. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  168. #endif