atomic64.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. /*
  2. * Generic implementation of 64-bit atomics using spinlocks,
  3. * useful on processors that don't have 64-bit atomic instructions.
  4. *
  5. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/types.h>
  13. #include <linux/cache.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/init.h>
  16. #include <linux/export.h>
  17. #include <linux/atomic.h>
  18. /*
  19. * We use a hashed array of spinlocks to provide exclusive access
  20. * to each atomic64_t variable. Since this is expected to used on
  21. * systems with small numbers of CPUs (<= 4 or so), we use a
  22. * relatively small array of 16 spinlocks to avoid wasting too much
  23. * memory on the spinlock array.
  24. */
  25. #define NR_LOCKS 16
  26. /*
  27. * Ensure each lock is in a separate cacheline.
  28. */
  29. static union {
  30. raw_spinlock_t lock;
  31. char pad[L1_CACHE_BYTES];
  32. } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
  33. [0 ... (NR_LOCKS - 1)] = {
  34. .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
  35. },
  36. };
  37. static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
  38. {
  39. unsigned long addr = (unsigned long) v;
  40. addr >>= L1_CACHE_SHIFT;
  41. addr ^= (addr >> 8) ^ (addr >> 16);
  42. return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
  43. }
  44. long long atomic64_read(const atomic64_t *v)
  45. {
  46. unsigned long flags;
  47. raw_spinlock_t *lock = lock_addr(v);
  48. long long val;
  49. raw_spin_lock_irqsave(lock, flags);
  50. val = v->counter;
  51. raw_spin_unlock_irqrestore(lock, flags);
  52. return val;
  53. }
  54. EXPORT_SYMBOL(atomic64_read);
  55. void atomic64_set(atomic64_t *v, long long i)
  56. {
  57. unsigned long flags;
  58. raw_spinlock_t *lock = lock_addr(v);
  59. raw_spin_lock_irqsave(lock, flags);
  60. v->counter = i;
  61. raw_spin_unlock_irqrestore(lock, flags);
  62. }
  63. EXPORT_SYMBOL(atomic64_set);
  64. #define ATOMIC64_OP(op, c_op) \
  65. void atomic64_##op(long long a, atomic64_t *v) \
  66. { \
  67. unsigned long flags; \
  68. raw_spinlock_t *lock = lock_addr(v); \
  69. \
  70. raw_spin_lock_irqsave(lock, flags); \
  71. v->counter c_op a; \
  72. raw_spin_unlock_irqrestore(lock, flags); \
  73. } \
  74. EXPORT_SYMBOL(atomic64_##op);
  75. #define ATOMIC64_OP_RETURN(op, c_op) \
  76. long long atomic64_##op##_return(long long a, atomic64_t *v) \
  77. { \
  78. unsigned long flags; \
  79. raw_spinlock_t *lock = lock_addr(v); \
  80. long long val; \
  81. \
  82. raw_spin_lock_irqsave(lock, flags); \
  83. val = (v->counter c_op a); \
  84. raw_spin_unlock_irqrestore(lock, flags); \
  85. return val; \
  86. } \
  87. EXPORT_SYMBOL(atomic64_##op##_return);
  88. #define ATOMIC64_FETCH_OP(op, c_op) \
  89. long long atomic64_fetch_##op(long long a, atomic64_t *v) \
  90. { \
  91. unsigned long flags; \
  92. raw_spinlock_t *lock = lock_addr(v); \
  93. long long val; \
  94. \
  95. raw_spin_lock_irqsave(lock, flags); \
  96. val = v->counter; \
  97. v->counter c_op a; \
  98. raw_spin_unlock_irqrestore(lock, flags); \
  99. return val; \
  100. } \
  101. EXPORT_SYMBOL(atomic64_fetch_##op);
  102. #define ATOMIC64_OPS(op, c_op) \
  103. ATOMIC64_OP(op, c_op) \
  104. ATOMIC64_OP_RETURN(op, c_op) \
  105. ATOMIC64_FETCH_OP(op, c_op)
  106. ATOMIC64_OPS(add, +=)
  107. ATOMIC64_OPS(sub, -=)
  108. #undef ATOMIC64_OPS
  109. #define ATOMIC64_OPS(op, c_op) \
  110. ATOMIC64_OP(op, c_op) \
  111. ATOMIC64_OP_RETURN(op, c_op) \
  112. ATOMIC64_FETCH_OP(op, c_op)
  113. ATOMIC64_OPS(and, &=)
  114. ATOMIC64_OPS(or, |=)
  115. ATOMIC64_OPS(xor, ^=)
  116. #undef ATOMIC64_OPS
  117. #undef ATOMIC64_FETCH_OP
  118. #undef ATOMIC64_OP_RETURN
  119. #undef ATOMIC64_OP
  120. long long atomic64_dec_if_positive(atomic64_t *v)
  121. {
  122. unsigned long flags;
  123. raw_spinlock_t *lock = lock_addr(v);
  124. long long val;
  125. raw_spin_lock_irqsave(lock, flags);
  126. val = v->counter - 1;
  127. if (val >= 0)
  128. v->counter = val;
  129. raw_spin_unlock_irqrestore(lock, flags);
  130. return val;
  131. }
  132. EXPORT_SYMBOL(atomic64_dec_if_positive);
  133. long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
  134. {
  135. unsigned long flags;
  136. raw_spinlock_t *lock = lock_addr(v);
  137. long long val;
  138. raw_spin_lock_irqsave(lock, flags);
  139. val = v->counter;
  140. if (val == o)
  141. v->counter = n;
  142. raw_spin_unlock_irqrestore(lock, flags);
  143. return val;
  144. }
  145. EXPORT_SYMBOL(atomic64_cmpxchg);
  146. long long atomic64_xchg(atomic64_t *v, long long new)
  147. {
  148. unsigned long flags;
  149. raw_spinlock_t *lock = lock_addr(v);
  150. long long val;
  151. raw_spin_lock_irqsave(lock, flags);
  152. val = v->counter;
  153. v->counter = new;
  154. raw_spin_unlock_irqrestore(lock, flags);
  155. return val;
  156. }
  157. EXPORT_SYMBOL(atomic64_xchg);
  158. long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
  159. {
  160. unsigned long flags;
  161. raw_spinlock_t *lock = lock_addr(v);
  162. long long val;
  163. raw_spin_lock_irqsave(lock, flags);
  164. val = v->counter;
  165. if (val != u)
  166. v->counter += a;
  167. raw_spin_unlock_irqrestore(lock, flags);
  168. return val;
  169. }
  170. EXPORT_SYMBOL(atomic64_fetch_add_unless);