spinlock.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_SPINLOCK_H
  3. #define __ASM_SPINLOCK_H
  4. #include <asm/barrier.h>
  5. #include <asm/ldcw.h>
  6. #include <asm/processor.h>
  7. #include <asm/spinlock_types.h>
  8. static inline int arch_spin_is_locked(arch_spinlock_t *x)
  9. {
  10. volatile unsigned int *a = __ldcw_align(x);
  11. return *a == 0;
  12. }
  13. #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
  14. static inline void arch_spin_lock_flags(arch_spinlock_t *x,
  15. unsigned long flags)
  16. {
  17. volatile unsigned int *a;
  18. a = __ldcw_align(x);
  19. while (__ldcw(a) == 0)
  20. while (*a == 0)
  21. if (flags & PSW_SM_I) {
  22. local_irq_enable();
  23. cpu_relax();
  24. local_irq_disable();
  25. } else
  26. cpu_relax();
  27. }
  28. #define arch_spin_lock_flags arch_spin_lock_flags
  29. static inline void arch_spin_unlock(arch_spinlock_t *x)
  30. {
  31. volatile unsigned int *a;
  32. a = __ldcw_align(x);
  33. mb();
  34. *a = 1;
  35. }
  36. static inline int arch_spin_trylock(arch_spinlock_t *x)
  37. {
  38. volatile unsigned int *a;
  39. int ret;
  40. a = __ldcw_align(x);
  41. ret = __ldcw(a) != 0;
  42. return ret;
  43. }
  44. /*
  45. * Read-write spinlocks, allowing multiple readers but only one writer.
  46. * Linux rwlocks are unfair to writers; they can be starved for an indefinite
  47. * time by readers. With care, they can also be taken in interrupt context.
  48. *
  49. * In the PA-RISC implementation, we have a spinlock and a counter.
  50. * Readers use the lock to serialise their access to the counter (which
  51. * records how many readers currently hold the lock).
  52. * Writers hold the spinlock, preventing any readers or other writers from
  53. * grabbing the rwlock.
  54. */
  55. /* Note that we have to ensure interrupts are disabled in case we're
  56. * interrupted by some other code that wants to grab the same read lock */
  57. static __inline__ void arch_read_lock(arch_rwlock_t *rw)
  58. {
  59. unsigned long flags;
  60. local_irq_save(flags);
  61. arch_spin_lock_flags(&rw->lock, flags);
  62. rw->counter++;
  63. arch_spin_unlock(&rw->lock);
  64. local_irq_restore(flags);
  65. }
  66. /* Note that we have to ensure interrupts are disabled in case we're
  67. * interrupted by some other code that wants to grab the same read lock */
  68. static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
  69. {
  70. unsigned long flags;
  71. local_irq_save(flags);
  72. arch_spin_lock_flags(&rw->lock, flags);
  73. rw->counter--;
  74. arch_spin_unlock(&rw->lock);
  75. local_irq_restore(flags);
  76. }
  77. /* Note that we have to ensure interrupts are disabled in case we're
  78. * interrupted by some other code that wants to grab the same read lock */
  79. static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
  80. {
  81. unsigned long flags;
  82. retry:
  83. local_irq_save(flags);
  84. if (arch_spin_trylock(&rw->lock)) {
  85. rw->counter++;
  86. arch_spin_unlock(&rw->lock);
  87. local_irq_restore(flags);
  88. return 1;
  89. }
  90. local_irq_restore(flags);
  91. /* If write-locked, we fail to acquire the lock */
  92. if (rw->counter < 0)
  93. return 0;
  94. /* Wait until we have a realistic chance at the lock */
  95. while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
  96. cpu_relax();
  97. goto retry;
  98. }
  99. /* Note that we have to ensure interrupts are disabled in case we're
  100. * interrupted by some other code that wants to read_trylock() this lock */
  101. static __inline__ void arch_write_lock(arch_rwlock_t *rw)
  102. {
  103. unsigned long flags;
  104. retry:
  105. local_irq_save(flags);
  106. arch_spin_lock_flags(&rw->lock, flags);
  107. if (rw->counter != 0) {
  108. arch_spin_unlock(&rw->lock);
  109. local_irq_restore(flags);
  110. while (rw->counter != 0)
  111. cpu_relax();
  112. goto retry;
  113. }
  114. rw->counter = -1; /* mark as write-locked */
  115. mb();
  116. local_irq_restore(flags);
  117. }
  118. static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
  119. {
  120. rw->counter = 0;
  121. arch_spin_unlock(&rw->lock);
  122. }
  123. /* Note that we have to ensure interrupts are disabled in case we're
  124. * interrupted by some other code that wants to read_trylock() this lock */
  125. static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
  126. {
  127. unsigned long flags;
  128. int result = 0;
  129. local_irq_save(flags);
  130. if (arch_spin_trylock(&rw->lock)) {
  131. if (rw->counter == 0) {
  132. rw->counter = -1;
  133. result = 1;
  134. } else {
  135. /* Read-locked. Oh well. */
  136. arch_spin_unlock(&rw->lock);
  137. }
  138. }
  139. local_irq_restore(flags);
  140. return result;
  141. }
  142. #endif /* __ASM_SPINLOCK_H */