atomic32.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /*
  2. * atomic32.c: 32-bit atomic_t implementation
  3. *
  4. * Copyright (C) 2004 Keith M Wesolowski
  5. * Copyright (C) 2007 Kyle McMartin
  6. *
  7. * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
  8. */
  9. #include <linux/atomic.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/module.h>
  12. #ifdef CONFIG_SMP
  13. #define ATOMIC_HASH_SIZE 4
  14. #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
  15. spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
  16. [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
  17. };
  18. #else /* SMP */
  19. static DEFINE_SPINLOCK(dummy);
  20. #define ATOMIC_HASH_SIZE 1
  21. #define ATOMIC_HASH(a) (&dummy)
  22. #endif /* SMP */
  23. #define ATOMIC_FETCH_OP(op, c_op) \
  24. int atomic_fetch_##op(int i, atomic_t *v) \
  25. { \
  26. int ret; \
  27. unsigned long flags; \
  28. spin_lock_irqsave(ATOMIC_HASH(v), flags); \
  29. \
  30. ret = v->counter; \
  31. v->counter c_op i; \
  32. \
  33. spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
  34. return ret; \
  35. } \
  36. EXPORT_SYMBOL(atomic_fetch_##op);
  37. #define ATOMIC_OP_RETURN(op, c_op) \
  38. int atomic_##op##_return(int i, atomic_t *v) \
  39. { \
  40. int ret; \
  41. unsigned long flags; \
  42. spin_lock_irqsave(ATOMIC_HASH(v), flags); \
  43. \
  44. ret = (v->counter c_op i); \
  45. \
  46. spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
  47. return ret; \
  48. } \
  49. EXPORT_SYMBOL(atomic_##op##_return);
  50. ATOMIC_OP_RETURN(add, +=)
  51. ATOMIC_FETCH_OP(add, +=)
  52. ATOMIC_FETCH_OP(and, &=)
  53. ATOMIC_FETCH_OP(or, |=)
  54. ATOMIC_FETCH_OP(xor, ^=)
  55. #undef ATOMIC_FETCH_OP
  56. #undef ATOMIC_OP_RETURN
  57. int atomic_xchg(atomic_t *v, int new)
  58. {
  59. int ret;
  60. unsigned long flags;
  61. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  62. ret = v->counter;
  63. v->counter = new;
  64. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  65. return ret;
  66. }
  67. EXPORT_SYMBOL(atomic_xchg);
  68. int atomic_cmpxchg(atomic_t *v, int old, int new)
  69. {
  70. int ret;
  71. unsigned long flags;
  72. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  73. ret = v->counter;
  74. if (likely(ret == old))
  75. v->counter = new;
  76. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  77. return ret;
  78. }
  79. EXPORT_SYMBOL(atomic_cmpxchg);
  80. int __atomic_add_unless(atomic_t *v, int a, int u)
  81. {
  82. int ret;
  83. unsigned long flags;
  84. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  85. ret = v->counter;
  86. if (ret != u)
  87. v->counter += a;
  88. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  89. return ret;
  90. }
  91. EXPORT_SYMBOL(__atomic_add_unless);
  92. /* Atomic operations are already serializing */
  93. void atomic_set(atomic_t *v, int i)
  94. {
  95. unsigned long flags;
  96. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  97. v->counter = i;
  98. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  99. }
  100. EXPORT_SYMBOL(atomic_set);
  101. unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
  102. {
  103. unsigned long old, flags;
  104. spin_lock_irqsave(ATOMIC_HASH(addr), flags);
  105. old = *addr;
  106. *addr = old | mask;
  107. spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
  108. return old & mask;
  109. }
  110. EXPORT_SYMBOL(___set_bit);
  111. unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
  112. {
  113. unsigned long old, flags;
  114. spin_lock_irqsave(ATOMIC_HASH(addr), flags);
  115. old = *addr;
  116. *addr = old & ~mask;
  117. spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
  118. return old & mask;
  119. }
  120. EXPORT_SYMBOL(___clear_bit);
  121. unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
  122. {
  123. unsigned long old, flags;
  124. spin_lock_irqsave(ATOMIC_HASH(addr), flags);
  125. old = *addr;
  126. *addr = old ^ mask;
  127. spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
  128. return old & mask;
  129. }
  130. EXPORT_SYMBOL(___change_bit);
  131. unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
  132. {
  133. unsigned long flags;
  134. u32 prev;
  135. spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
  136. if ((prev = *ptr) == old)
  137. *ptr = new;
  138. spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
  139. return (unsigned long)prev;
  140. }
  141. EXPORT_SYMBOL(__cmpxchg_u32);
  142. unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
  143. {
  144. unsigned long flags;
  145. u32 prev;
  146. spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
  147. prev = *ptr;
  148. *ptr = new;
  149. spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
  150. return (unsigned long)prev;
  151. }
  152. EXPORT_SYMBOL(__xchg_u32);