atomic.h 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. #ifndef _ASM_IA64_ATOMIC_H
  2. #define _ASM_IA64_ATOMIC_H
  3. /*
  4. * Atomic operations that C can't guarantee us. Useful for
  5. * resource counting etc..
  6. *
  7. * NOTE: don't mess with the types below! The "unsigned long" and
  8. * "int" types were carefully placed so as to ensure proper operation
  9. * of the macros.
  10. *
  11. * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
  12. * David Mosberger-Tang <davidm@hpl.hp.com>
  13. */
  14. #include <linux/types.h>
  15. #include <asm/intrinsics.h>
  16. #include <asm/barrier.h>
  17. #define ATOMIC_INIT(i) { (i) }
  18. #define ATOMIC64_INIT(i) { (i) }
  19. #define atomic_read(v) READ_ONCE((v)->counter)
  20. #define atomic64_read(v) READ_ONCE((v)->counter)
  21. #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
  22. #define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
  23. #define ATOMIC_OP(op, c_op) \
  24. static __inline__ int \
  25. ia64_atomic_##op (int i, atomic_t *v) \
  26. { \
  27. __s32 old, new; \
  28. CMPXCHG_BUGCHECK_DECL \
  29. \
  30. do { \
  31. CMPXCHG_BUGCHECK(v); \
  32. old = atomic_read(v); \
  33. new = old c_op i; \
  34. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
  35. return new; \
  36. }
  37. #define ATOMIC_FETCH_OP(op, c_op) \
  38. static __inline__ int \
  39. ia64_atomic_fetch_##op (int i, atomic_t *v) \
  40. { \
  41. __s32 old, new; \
  42. CMPXCHG_BUGCHECK_DECL \
  43. \
  44. do { \
  45. CMPXCHG_BUGCHECK(v); \
  46. old = atomic_read(v); \
  47. new = old c_op i; \
  48. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
  49. return old; \
  50. }
  51. #define ATOMIC_OPS(op, c_op) \
  52. ATOMIC_OP(op, c_op) \
  53. ATOMIC_FETCH_OP(op, c_op)
  54. ATOMIC_OPS(add, +)
  55. ATOMIC_OPS(sub, -)
  56. #define atomic_add_return(i,v) \
  57. ({ \
  58. int __ia64_aar_i = (i); \
  59. (__builtin_constant_p(i) \
  60. && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
  61. || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
  62. || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
  63. || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
  64. ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
  65. : ia64_atomic_add(__ia64_aar_i, v); \
  66. })
  67. #define atomic_sub_return(i,v) \
  68. ({ \
  69. int __ia64_asr_i = (i); \
  70. (__builtin_constant_p(i) \
  71. && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
  72. || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
  73. || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
  74. || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
  75. ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
  76. : ia64_atomic_sub(__ia64_asr_i, v); \
  77. })
  78. #define atomic_fetch_add(i,v) \
  79. ({ \
  80. int __ia64_aar_i = (i); \
  81. (__builtin_constant_p(i) \
  82. && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
  83. || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
  84. || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
  85. || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
  86. ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
  87. : ia64_atomic_fetch_add(__ia64_aar_i, v); \
  88. })
  89. #define atomic_fetch_sub(i,v) \
  90. ({ \
  91. int __ia64_asr_i = (i); \
  92. (__builtin_constant_p(i) \
  93. && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
  94. || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
  95. || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
  96. || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
  97. ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
  98. : ia64_atomic_fetch_sub(__ia64_asr_i, v); \
  99. })
  100. ATOMIC_FETCH_OP(and, &)
  101. ATOMIC_FETCH_OP(or, |)
  102. ATOMIC_FETCH_OP(xor, ^)
  103. #define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
  104. #define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
  105. #define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
  106. #define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
  107. #define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
  108. #define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
  109. #undef ATOMIC_OPS
  110. #undef ATOMIC_FETCH_OP
  111. #undef ATOMIC_OP
  112. #define ATOMIC64_OP(op, c_op) \
  113. static __inline__ long \
  114. ia64_atomic64_##op (__s64 i, atomic64_t *v) \
  115. { \
  116. __s64 old, new; \
  117. CMPXCHG_BUGCHECK_DECL \
  118. \
  119. do { \
  120. CMPXCHG_BUGCHECK(v); \
  121. old = atomic64_read(v); \
  122. new = old c_op i; \
  123. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
  124. return new; \
  125. }
  126. #define ATOMIC64_FETCH_OP(op, c_op) \
  127. static __inline__ long \
  128. ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v) \
  129. { \
  130. __s64 old, new; \
  131. CMPXCHG_BUGCHECK_DECL \
  132. \
  133. do { \
  134. CMPXCHG_BUGCHECK(v); \
  135. old = atomic64_read(v); \
  136. new = old c_op i; \
  137. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
  138. return old; \
  139. }
  140. #define ATOMIC64_OPS(op, c_op) \
  141. ATOMIC64_OP(op, c_op) \
  142. ATOMIC64_FETCH_OP(op, c_op)
  143. ATOMIC64_OPS(add, +)
  144. ATOMIC64_OPS(sub, -)
  145. #define atomic64_add_return(i,v) \
  146. ({ \
  147. long __ia64_aar_i = (i); \
  148. (__builtin_constant_p(i) \
  149. && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
  150. || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
  151. || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
  152. || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
  153. ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
  154. : ia64_atomic64_add(__ia64_aar_i, v); \
  155. })
  156. #define atomic64_sub_return(i,v) \
  157. ({ \
  158. long __ia64_asr_i = (i); \
  159. (__builtin_constant_p(i) \
  160. && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
  161. || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
  162. || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
  163. || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
  164. ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
  165. : ia64_atomic64_sub(__ia64_asr_i, v); \
  166. })
  167. #define atomic64_fetch_add(i,v) \
  168. ({ \
  169. long __ia64_aar_i = (i); \
  170. (__builtin_constant_p(i) \
  171. && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
  172. || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
  173. || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
  174. || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
  175. ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
  176. : ia64_atomic64_fetch_add(__ia64_aar_i, v); \
  177. })
  178. #define atomic64_fetch_sub(i,v) \
  179. ({ \
  180. long __ia64_asr_i = (i); \
  181. (__builtin_constant_p(i) \
  182. && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
  183. || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
  184. || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
  185. || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
  186. ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
  187. : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
  188. })
  189. ATOMIC64_FETCH_OP(and, &)
  190. ATOMIC64_FETCH_OP(or, |)
  191. ATOMIC64_FETCH_OP(xor, ^)
  192. #define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
  193. #define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
  194. #define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
  195. #define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
  196. #define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
  197. #define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
  198. #undef ATOMIC64_OPS
  199. #undef ATOMIC64_FETCH_OP
  200. #undef ATOMIC64_OP
  201. #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
  202. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  203. #define atomic64_cmpxchg(v, old, new) \
  204. (cmpxchg(&((v)->counter), old, new))
  205. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  206. static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  207. {
  208. int c, old;
  209. c = atomic_read(v);
  210. for (;;) {
  211. if (unlikely(c == (u)))
  212. break;
  213. old = atomic_cmpxchg((v), c, c + (a));
  214. if (likely(old == c))
  215. break;
  216. c = old;
  217. }
  218. return c;
  219. }
  220. static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
  221. {
  222. long c, old;
  223. c = atomic64_read(v);
  224. for (;;) {
  225. if (unlikely(c == (u)))
  226. break;
  227. old = atomic64_cmpxchg((v), c, c + (a));
  228. if (likely(old == c))
  229. break;
  230. c = old;
  231. }
  232. return c != (u);
  233. }
  234. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  235. static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
  236. {
  237. long c, old, dec;
  238. c = atomic64_read(v);
  239. for (;;) {
  240. dec = c - 1;
  241. if (unlikely(dec < 0))
  242. break;
  243. old = atomic64_cmpxchg((v), c, dec);
  244. if (likely(old == c))
  245. break;
  246. c = old;
  247. }
  248. return dec;
  249. }
  250. /*
  251. * Atomically add I to V and return TRUE if the resulting value is
  252. * negative.
  253. */
  254. static __inline__ int
  255. atomic_add_negative (int i, atomic_t *v)
  256. {
  257. return atomic_add_return(i, v) < 0;
  258. }
  259. static __inline__ long
  260. atomic64_add_negative (__s64 i, atomic64_t *v)
  261. {
  262. return atomic64_add_return(i, v) < 0;
  263. }
  264. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  265. #define atomic_inc_return(v) atomic_add_return(1, (v))
  266. #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
  267. #define atomic64_inc_return(v) atomic64_add_return(1, (v))
  268. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  269. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  270. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  271. #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
  272. #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
  273. #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
  274. #define atomic_add(i,v) (void)atomic_add_return((i), (v))
  275. #define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
  276. #define atomic_inc(v) atomic_add(1, (v))
  277. #define atomic_dec(v) atomic_sub(1, (v))
  278. #define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
  279. #define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
  280. #define atomic64_inc(v) atomic64_add(1, (v))
  281. #define atomic64_dec(v) atomic64_sub(1, (v))
  282. #endif /* _ASM_IA64_ATOMIC_H */