spinlock.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #include <asm/barrier.h>
  4. #include <asm/ldcw.h>
  5. #include <asm/processor.h>
  6. #include <asm/spinlock_types.h>
  7. static inline int arch_spin_is_locked(arch_spinlock_t *x)
  8. {
  9. volatile unsigned int *a = __ldcw_align(x);
  10. return *a == 0;
  11. }
  12. #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
  13. #define arch_spin_unlock_wait(x) \
  14. do { cpu_relax(); } while (arch_spin_is_locked(x))
  15. static inline void arch_spin_lock_flags(arch_spinlock_t *x,
  16. unsigned long flags)
  17. {
  18. volatile unsigned int *a;
  19. mb();
  20. a = __ldcw_align(x);
  21. while (__ldcw(a) == 0)
  22. while (*a == 0)
  23. if (flags & PSW_SM_I) {
  24. local_irq_enable();
  25. cpu_relax();
  26. local_irq_disable();
  27. } else
  28. cpu_relax();
  29. mb();
  30. }
  31. static inline void arch_spin_unlock(arch_spinlock_t *x)
  32. {
  33. volatile unsigned int *a;
  34. mb();
  35. a = __ldcw_align(x);
  36. *a = 1;
  37. mb();
  38. }
  39. static inline int arch_spin_trylock(arch_spinlock_t *x)
  40. {
  41. volatile unsigned int *a;
  42. int ret;
  43. mb();
  44. a = __ldcw_align(x);
  45. ret = __ldcw(a) != 0;
  46. mb();
  47. return ret;
  48. }
  49. /*
  50. * Read-write spinlocks, allowing multiple readers but only one writer.
  51. * Linux rwlocks are unfair to writers; they can be starved for an indefinite
  52. * time by readers. With care, they can also be taken in interrupt context.
  53. *
  54. * In the PA-RISC implementation, we have a spinlock and a counter.
  55. * Readers use the lock to serialise their access to the counter (which
  56. * records how many readers currently hold the lock).
  57. * Writers hold the spinlock, preventing any readers or other writers from
  58. * grabbing the rwlock.
  59. */
  60. /* Note that we have to ensure interrupts are disabled in case we're
  61. * interrupted by some other code that wants to grab the same read lock */
  62. static __inline__ void arch_read_lock(arch_rwlock_t *rw)
  63. {
  64. unsigned long flags;
  65. local_irq_save(flags);
  66. arch_spin_lock_flags(&rw->lock, flags);
  67. rw->counter++;
  68. arch_spin_unlock(&rw->lock);
  69. local_irq_restore(flags);
  70. }
  71. /* Note that we have to ensure interrupts are disabled in case we're
  72. * interrupted by some other code that wants to grab the same read lock */
  73. static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
  74. {
  75. unsigned long flags;
  76. local_irq_save(flags);
  77. arch_spin_lock_flags(&rw->lock, flags);
  78. rw->counter--;
  79. arch_spin_unlock(&rw->lock);
  80. local_irq_restore(flags);
  81. }
  82. /* Note that we have to ensure interrupts are disabled in case we're
  83. * interrupted by some other code that wants to grab the same read lock */
  84. static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
  85. {
  86. unsigned long flags;
  87. retry:
  88. local_irq_save(flags);
  89. if (arch_spin_trylock(&rw->lock)) {
  90. rw->counter++;
  91. arch_spin_unlock(&rw->lock);
  92. local_irq_restore(flags);
  93. return 1;
  94. }
  95. local_irq_restore(flags);
  96. /* If write-locked, we fail to acquire the lock */
  97. if (rw->counter < 0)
  98. return 0;
  99. /* Wait until we have a realistic chance at the lock */
  100. while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
  101. cpu_relax();
  102. goto retry;
  103. }
  104. /* Note that we have to ensure interrupts are disabled in case we're
  105. * interrupted by some other code that wants to read_trylock() this lock */
  106. static __inline__ void arch_write_lock(arch_rwlock_t *rw)
  107. {
  108. unsigned long flags;
  109. retry:
  110. local_irq_save(flags);
  111. arch_spin_lock_flags(&rw->lock, flags);
  112. if (rw->counter != 0) {
  113. arch_spin_unlock(&rw->lock);
  114. local_irq_restore(flags);
  115. while (rw->counter != 0)
  116. cpu_relax();
  117. goto retry;
  118. }
  119. rw->counter = -1; /* mark as write-locked */
  120. mb();
  121. local_irq_restore(flags);
  122. }
  123. static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
  124. {
  125. rw->counter = 0;
  126. arch_spin_unlock(&rw->lock);
  127. }
  128. /* Note that we have to ensure interrupts are disabled in case we're
  129. * interrupted by some other code that wants to read_trylock() this lock */
  130. static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
  131. {
  132. unsigned long flags;
  133. int result = 0;
  134. local_irq_save(flags);
  135. if (arch_spin_trylock(&rw->lock)) {
  136. if (rw->counter == 0) {
  137. rw->counter = -1;
  138. result = 1;
  139. } else {
  140. /* Read-locked. Oh well. */
  141. arch_spin_unlock(&rw->lock);
  142. }
  143. }
  144. local_irq_restore(flags);
  145. return result;
  146. }
  147. /*
  148. * read_can_lock - would read_trylock() succeed?
  149. * @lock: the rwlock in question.
  150. */
  151. static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
  152. {
  153. return rw->counter >= 0;
  154. }
  155. /*
  156. * write_can_lock - would write_trylock() succeed?
  157. * @lock: the rwlock in question.
  158. */
  159. static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
  160. {
  161. return !rw->counter;
  162. }
  163. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  164. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  165. #endif /* __ASM_SPINLOCK_H */