lockref.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. #include <linux/export.h>
  2. #include <linux/lockref.h>
  3. #if USE_CMPXCHG_LOCKREF
  4. /*
  5. * Allow weakly-ordered memory architectures to provide barrier-less
  6. * cmpxchg semantics for lockref updates.
  7. */
  8. #ifndef cmpxchg64_relaxed
  9. # define cmpxchg64_relaxed cmpxchg64
  10. #endif
  11. /*
  12. * Note that the "cmpxchg()" reloads the "old" value for the
  13. * failure case.
  14. */
  15. #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
  16. struct lockref old; \
  17. BUILD_BUG_ON(sizeof(old) != 8); \
  18. old.lock_count = READ_ONCE(lockref->lock_count); \
  19. while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
  20. struct lockref new = old, prev = old; \
  21. CODE \
  22. old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
  23. old.lock_count, \
  24. new.lock_count); \
  25. if (likely(old.lock_count == prev.lock_count)) { \
  26. SUCCESS; \
  27. } \
  28. cpu_relax_lowlatency(); \
  29. } \
  30. } while (0)
  31. #else
  32. #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
  33. #endif
  34. /**
  35. * lockref_get - Increments reference count unconditionally
  36. * @lockref: pointer to lockref structure
  37. *
  38. * This operation is only valid if you already hold a reference
  39. * to the object, so you know the count cannot be zero.
  40. */
  41. void lockref_get(struct lockref *lockref)
  42. {
  43. CMPXCHG_LOOP(
  44. new.count++;
  45. ,
  46. return;
  47. );
  48. spin_lock(&lockref->lock);
  49. lockref->count++;
  50. spin_unlock(&lockref->lock);
  51. }
  52. EXPORT_SYMBOL(lockref_get);
  53. /**
  54. * lockref_get_not_zero - Increments count unless the count is 0 or dead
  55. * @lockref: pointer to lockref structure
  56. * Return: 1 if count updated successfully or 0 if count was zero
  57. */
  58. int lockref_get_not_zero(struct lockref *lockref)
  59. {
  60. int retval;
  61. CMPXCHG_LOOP(
  62. new.count++;
  63. if (old.count <= 0)
  64. return 0;
  65. ,
  66. return 1;
  67. );
  68. spin_lock(&lockref->lock);
  69. retval = 0;
  70. if (lockref->count > 0) {
  71. lockref->count++;
  72. retval = 1;
  73. }
  74. spin_unlock(&lockref->lock);
  75. return retval;
  76. }
  77. EXPORT_SYMBOL(lockref_get_not_zero);
  78. /**
  79. * lockref_get_or_lock - Increments count unless the count is 0 or dead
  80. * @lockref: pointer to lockref structure
  81. * Return: 1 if count updated successfully or 0 if count was zero
  82. * and we got the lock instead.
  83. */
  84. int lockref_get_or_lock(struct lockref *lockref)
  85. {
  86. CMPXCHG_LOOP(
  87. new.count++;
  88. if (old.count <= 0)
  89. break;
  90. ,
  91. return 1;
  92. );
  93. spin_lock(&lockref->lock);
  94. if (lockref->count <= 0)
  95. return 0;
  96. lockref->count++;
  97. spin_unlock(&lockref->lock);
  98. return 1;
  99. }
  100. EXPORT_SYMBOL(lockref_get_or_lock);
  101. /**
  102. * lockref_put_return - Decrement reference count if possible
  103. * @lockref: pointer to lockref structure
  104. *
  105. * Decrement the reference count and return the new value.
  106. * If the lockref was dead or locked, return an error.
  107. */
  108. int lockref_put_return(struct lockref *lockref)
  109. {
  110. CMPXCHG_LOOP(
  111. new.count--;
  112. if (old.count <= 0)
  113. return -1;
  114. ,
  115. return new.count;
  116. );
  117. return -1;
  118. }
  119. EXPORT_SYMBOL(lockref_put_return);
  120. /**
  121. * lockref_put_or_lock - decrements count unless count <= 1 before decrement
  122. * @lockref: pointer to lockref structure
  123. * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
  124. */
  125. int lockref_put_or_lock(struct lockref *lockref)
  126. {
  127. CMPXCHG_LOOP(
  128. new.count--;
  129. if (old.count <= 1)
  130. break;
  131. ,
  132. return 1;
  133. );
  134. spin_lock(&lockref->lock);
  135. if (lockref->count <= 1)
  136. return 0;
  137. lockref->count--;
  138. spin_unlock(&lockref->lock);
  139. return 1;
  140. }
  141. EXPORT_SYMBOL(lockref_put_or_lock);
  142. /**
  143. * lockref_mark_dead - mark lockref dead
  144. * @lockref: pointer to lockref structure
  145. */
  146. void lockref_mark_dead(struct lockref *lockref)
  147. {
  148. assert_spin_locked(&lockref->lock);
  149. lockref->count = -128;
  150. }
  151. EXPORT_SYMBOL(lockref_mark_dead);
  152. /**
  153. * lockref_get_not_dead - Increments count unless the ref is dead
  154. * @lockref: pointer to lockref structure
  155. * Return: 1 if count updated successfully or 0 if lockref was dead
  156. */
  157. int lockref_get_not_dead(struct lockref *lockref)
  158. {
  159. int retval;
  160. CMPXCHG_LOOP(
  161. new.count++;
  162. if (old.count < 0)
  163. return 0;
  164. ,
  165. return 1;
  166. );
  167. spin_lock(&lockref->lock);
  168. retval = 0;
  169. if (lockref->count >= 0) {
  170. lockref->count++;
  171. retval = 1;
  172. }
  173. spin_unlock(&lockref->lock);
  174. return retval;
  175. }
  176. EXPORT_SYMBOL(lockref_get_not_dead);