atomic.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. #ifndef _ALPHA_ATOMIC_H
  2. #define _ALPHA_ATOMIC_H
  3. #include <linux/types.h>
  4. #include <asm/barrier.h>
  5. #include <asm/system.h>
  6. /*
  7. * Atomic operations that C can't guarantee us. Useful for
  8. * resource counting etc...
  9. *
  10. * But use these as seldom as possible since they are much slower
  11. * than regular operations.
  12. */
  13. #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
  14. #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
  15. #define atomic_read(v) (*(volatile int *)&(v)->counter)
  16. #define atomic64_read(v) (*(volatile long *)&(v)->counter)
  17. #define atomic_set(v,i) ((v)->counter = (i))
  18. #define atomic64_set(v,i) ((v)->counter = (i))
  19. /*
  20. * To get proper branch prediction for the main line, we must branch
  21. * forward to code at the end of this object's .text section, then
  22. * branch back to restart the operation.
  23. */
  24. static __inline__ void atomic_add(int i, atomic_t * v)
  25. {
  26. unsigned long temp;
  27. __asm__ __volatile__(
  28. "1: ldl_l %0,%1\n"
  29. " addl %0,%2,%0\n"
  30. " stl_c %0,%1\n"
  31. " beq %0,2f\n"
  32. ".subsection 2\n"
  33. "2: br 1b\n"
  34. ".previous"
  35. :"=&r" (temp), "=m" (v->counter)
  36. :"Ir" (i), "m" (v->counter));
  37. }
  38. static __inline__ void atomic64_add(long i, atomic64_t * v)
  39. {
  40. unsigned long temp;
  41. __asm__ __volatile__(
  42. "1: ldq_l %0,%1\n"
  43. " addq %0,%2,%0\n"
  44. " stq_c %0,%1\n"
  45. " beq %0,2f\n"
  46. ".subsection 2\n"
  47. "2: br 1b\n"
  48. ".previous"
  49. :"=&r" (temp), "=m" (v->counter)
  50. :"Ir" (i), "m" (v->counter));
  51. }
  52. static __inline__ void atomic_sub(int i, atomic_t * v)
  53. {
  54. unsigned long temp;
  55. __asm__ __volatile__(
  56. "1: ldl_l %0,%1\n"
  57. " subl %0,%2,%0\n"
  58. " stl_c %0,%1\n"
  59. " beq %0,2f\n"
  60. ".subsection 2\n"
  61. "2: br 1b\n"
  62. ".previous"
  63. :"=&r" (temp), "=m" (v->counter)
  64. :"Ir" (i), "m" (v->counter));
  65. }
  66. static __inline__ void atomic64_sub(long i, atomic64_t * v)
  67. {
  68. unsigned long temp;
  69. __asm__ __volatile__(
  70. "1: ldq_l %0,%1\n"
  71. " subq %0,%2,%0\n"
  72. " stq_c %0,%1\n"
  73. " beq %0,2f\n"
  74. ".subsection 2\n"
  75. "2: br 1b\n"
  76. ".previous"
  77. :"=&r" (temp), "=m" (v->counter)
  78. :"Ir" (i), "m" (v->counter));
  79. }
  80. /*
  81. * Same as above, but return the result value
  82. */
  83. static inline int atomic_add_return(int i, atomic_t *v)
  84. {
  85. long temp, result;
  86. smp_mb();
  87. __asm__ __volatile__(
  88. "1: ldl_l %0,%1\n"
  89. " addl %0,%3,%2\n"
  90. " addl %0,%3,%0\n"
  91. " stl_c %0,%1\n"
  92. " beq %0,2f\n"
  93. ".subsection 2\n"
  94. "2: br 1b\n"
  95. ".previous"
  96. :"=&r" (temp), "=m" (v->counter), "=&r" (result)
  97. :"Ir" (i), "m" (v->counter) : "memory");
  98. smp_mb();
  99. return result;
  100. }
  101. static __inline__ long atomic64_add_return(long i, atomic64_t * v)
  102. {
  103. long temp, result;
  104. smp_mb();
  105. __asm__ __volatile__(
  106. "1: ldq_l %0,%1\n"
  107. " addq %0,%3,%2\n"
  108. " addq %0,%3,%0\n"
  109. " stq_c %0,%1\n"
  110. " beq %0,2f\n"
  111. ".subsection 2\n"
  112. "2: br 1b\n"
  113. ".previous"
  114. :"=&r" (temp), "=m" (v->counter), "=&r" (result)
  115. :"Ir" (i), "m" (v->counter) : "memory");
  116. smp_mb();
  117. return result;
  118. }
  119. static __inline__ long atomic_sub_return(int i, atomic_t * v)
  120. {
  121. long temp, result;
  122. smp_mb();
  123. __asm__ __volatile__(
  124. "1: ldl_l %0,%1\n"
  125. " subl %0,%3,%2\n"
  126. " subl %0,%3,%0\n"
  127. " stl_c %0,%1\n"
  128. " beq %0,2f\n"
  129. ".subsection 2\n"
  130. "2: br 1b\n"
  131. ".previous"
  132. :"=&r" (temp), "=m" (v->counter), "=&r" (result)
  133. :"Ir" (i), "m" (v->counter) : "memory");
  134. smp_mb();
  135. return result;
  136. }
  137. static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
  138. {
  139. long temp, result;
  140. smp_mb();
  141. __asm__ __volatile__(
  142. "1: ldq_l %0,%1\n"
  143. " subq %0,%3,%2\n"
  144. " subq %0,%3,%0\n"
  145. " stq_c %0,%1\n"
  146. " beq %0,2f\n"
  147. ".subsection 2\n"
  148. "2: br 1b\n"
  149. ".previous"
  150. :"=&r" (temp), "=m" (v->counter), "=&r" (result)
  151. :"Ir" (i), "m" (v->counter) : "memory");
  152. smp_mb();
  153. return result;
  154. }
  155. #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
  156. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  157. #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
  158. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  159. /**
  160. * atomic_add_unless - add unless the number is a given value
  161. * @v: pointer of type atomic_t
  162. * @a: the amount to add to v...
  163. * @u: ...unless v is equal to u.
  164. *
  165. * Atomically adds @a to @v, so long as it was not @u.
  166. * Returns non-zero if @v was not @u, and zero otherwise.
  167. */
  168. static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
  169. {
  170. int c, old;
  171. c = atomic_read(v);
  172. for (;;) {
  173. if (unlikely(c == (u)))
  174. break;
  175. old = atomic_cmpxchg((v), c, c + (a));
  176. if (likely(old == c))
  177. break;
  178. c = old;
  179. }
  180. return c != (u);
  181. }
  182. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  183. /**
  184. * atomic64_add_unless - add unless the number is a given value
  185. * @v: pointer of type atomic64_t
  186. * @a: the amount to add to v...
  187. * @u: ...unless v is equal to u.
  188. *
  189. * Atomically adds @a to @v, so long as it was not @u.
  190. * Returns non-zero if @v was not @u, and zero otherwise.
  191. */
  192. static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  193. {
  194. long c, old;
  195. c = atomic64_read(v);
  196. for (;;) {
  197. if (unlikely(c == (u)))
  198. break;
  199. old = atomic64_cmpxchg((v), c, c + (a));
  200. if (likely(old == c))
  201. break;
  202. c = old;
  203. }
  204. return c != (u);
  205. }
  206. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  207. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  208. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  209. #define atomic_dec_return(v) atomic_sub_return(1,(v))
  210. #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
  211. #define atomic_inc_return(v) atomic_add_return(1,(v))
  212. #define atomic64_inc_return(v) atomic64_add_return(1,(v))
  213. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  214. #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
  215. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  216. #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
  217. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  218. #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
  219. #define atomic_inc(v) atomic_add(1,(v))
  220. #define atomic64_inc(v) atomic64_add(1,(v))
  221. #define atomic_dec(v) atomic_sub(1,(v))
  222. #define atomic64_dec(v) atomic64_sub(1,(v))
  223. #define smp_mb__before_atomic_dec() smp_mb()
  224. #define smp_mb__after_atomic_dec() smp_mb()
  225. #define smp_mb__before_atomic_inc() smp_mb()
  226. #define smp_mb__after_atomic_inc() smp_mb()
  227. #include <asm-generic/atomic-long.h>
  228. #endif /* _ALPHA_ATOMIC_H */