atomic64.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. /*
  2. * Generic implementation of 64-bit atomics using spinlocks,
  3. * useful on processors that don't have 64-bit atomic instructions.
  4. *
  5. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/types.h>
  13. #include <linux/cache.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <asm/atomic.h>
  18. /*
  19. * We use a hashed array of spinlocks to provide exclusive access
  20. * to each atomic64_t variable. Since this is expected to used on
  21. * systems with small numbers of CPUs (<= 4 or so), we use a
  22. * relatively small array of 16 spinlocks to avoid wasting too much
  23. * memory on the spinlock array.
  24. */
  25. #define NR_LOCKS 16
  26. /*
  27. * Ensure each lock is in a separate cacheline.
  28. */
  29. static union {
  30. spinlock_t lock;
  31. char pad[L1_CACHE_BYTES];
  32. } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
  33. static inline spinlock_t *lock_addr(const atomic64_t *v)
  34. {
  35. unsigned long addr = (unsigned long) v;
  36. addr >>= L1_CACHE_SHIFT;
  37. addr ^= (addr >> 8) ^ (addr >> 16);
  38. return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
  39. }
  40. long long atomic64_read(const atomic64_t *v)
  41. {
  42. unsigned long flags;
  43. spinlock_t *lock = lock_addr(v);
  44. long long val;
  45. spin_lock_irqsave(lock, flags);
  46. val = v->counter;
  47. spin_unlock_irqrestore(lock, flags);
  48. return val;
  49. }
  50. EXPORT_SYMBOL(atomic64_read);
  51. void atomic64_set(atomic64_t *v, long long i)
  52. {
  53. unsigned long flags;
  54. spinlock_t *lock = lock_addr(v);
  55. spin_lock_irqsave(lock, flags);
  56. v->counter = i;
  57. spin_unlock_irqrestore(lock, flags);
  58. }
  59. EXPORT_SYMBOL(atomic64_set);
  60. void atomic64_add(long long a, atomic64_t *v)
  61. {
  62. unsigned long flags;
  63. spinlock_t *lock = lock_addr(v);
  64. spin_lock_irqsave(lock, flags);
  65. v->counter += a;
  66. spin_unlock_irqrestore(lock, flags);
  67. }
  68. EXPORT_SYMBOL(atomic64_add);
  69. long long atomic64_add_return(long long a, atomic64_t *v)
  70. {
  71. unsigned long flags;
  72. spinlock_t *lock = lock_addr(v);
  73. long long val;
  74. spin_lock_irqsave(lock, flags);
  75. val = v->counter += a;
  76. spin_unlock_irqrestore(lock, flags);
  77. return val;
  78. }
  79. EXPORT_SYMBOL(atomic64_add_return);
  80. void atomic64_sub(long long a, atomic64_t *v)
  81. {
  82. unsigned long flags;
  83. spinlock_t *lock = lock_addr(v);
  84. spin_lock_irqsave(lock, flags);
  85. v->counter -= a;
  86. spin_unlock_irqrestore(lock, flags);
  87. }
  88. EXPORT_SYMBOL(atomic64_sub);
  89. long long atomic64_sub_return(long long a, atomic64_t *v)
  90. {
  91. unsigned long flags;
  92. spinlock_t *lock = lock_addr(v);
  93. long long val;
  94. spin_lock_irqsave(lock, flags);
  95. val = v->counter -= a;
  96. spin_unlock_irqrestore(lock, flags);
  97. return val;
  98. }
  99. EXPORT_SYMBOL(atomic64_sub_return);
  100. long long atomic64_dec_if_positive(atomic64_t *v)
  101. {
  102. unsigned long flags;
  103. spinlock_t *lock = lock_addr(v);
  104. long long val;
  105. spin_lock_irqsave(lock, flags);
  106. val = v->counter - 1;
  107. if (val >= 0)
  108. v->counter = val;
  109. spin_unlock_irqrestore(lock, flags);
  110. return val;
  111. }
  112. EXPORT_SYMBOL(atomic64_dec_if_positive);
  113. long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
  114. {
  115. unsigned long flags;
  116. spinlock_t *lock = lock_addr(v);
  117. long long val;
  118. spin_lock_irqsave(lock, flags);
  119. val = v->counter;
  120. if (val == o)
  121. v->counter = n;
  122. spin_unlock_irqrestore(lock, flags);
  123. return val;
  124. }
  125. EXPORT_SYMBOL(atomic64_cmpxchg);
  126. long long atomic64_xchg(atomic64_t *v, long long new)
  127. {
  128. unsigned long flags;
  129. spinlock_t *lock = lock_addr(v);
  130. long long val;
  131. spin_lock_irqsave(lock, flags);
  132. val = v->counter;
  133. v->counter = new;
  134. spin_unlock_irqrestore(lock, flags);
  135. return val;
  136. }
  137. EXPORT_SYMBOL(atomic64_xchg);
  138. int atomic64_add_unless(atomic64_t *v, long long a, long long u)
  139. {
  140. unsigned long flags;
  141. spinlock_t *lock = lock_addr(v);
  142. int ret = 0;
  143. spin_lock_irqsave(lock, flags);
  144. if (v->counter != u) {
  145. v->counter += a;
  146. ret = 1;
  147. }
  148. spin_unlock_irqrestore(lock, flags);
  149. return ret;
  150. }
  151. EXPORT_SYMBOL(atomic64_add_unless);
  152. static int init_atomic64_lock(void)
  153. {
  154. int i;
  155. for (i = 0; i < NR_LOCKS; ++i)
  156. spin_lock_init(&atomic64_lock[i].lock);
  157. return 0;
  158. }
  159. pure_initcall(init_atomic64_lock);