atomic.h 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. /*
  2. * include/asm-xtensa/atomic.h
  3. *
  4. * Atomic operations that C can't guarantee us. Useful for resource counting..
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. *
  10. * Copyright (C) 2001 - 2008 Tensilica Inc.
  11. */
  12. #ifndef _XTENSA_ATOMIC_H
  13. #define _XTENSA_ATOMIC_H
  14. #include <linux/stringify.h>
  15. #include <linux/types.h>
  16. #ifdef __KERNEL__
  17. #include <asm/processor.h>
  18. #include <asm/cmpxchg.h>
  19. #include <asm/barrier.h>
  20. #define ATOMIC_INIT(i) { (i) }
  21. /*
  22. * This Xtensa implementation assumes that the right mechanism
  23. * for exclusion is for locking interrupts to level EXCM_LEVEL.
  24. *
  25. * Locking interrupts looks like this:
  26. *
  27. * rsil a15, TOPLEVEL
  28. * <code>
  29. * wsr a15, PS
  30. * rsync
  31. *
  32. * Note that a15 is used here because the register allocation
  33. * done by the compiler is not guaranteed and a window overflow
  34. * may not occur between the rsil and wsr instructions. By using
  35. * a15 in the rsil, the machine is guaranteed to be in a state
  36. * where no register reference will cause an overflow.
  37. */
  38. /**
  39. * atomic_read - read atomic variable
  40. * @v: pointer of type atomic_t
  41. *
  42. * Atomically reads the value of @v.
  43. */
  44. #define atomic_read(v) READ_ONCE((v)->counter)
  45. /**
  46. * atomic_set - set atomic variable
  47. * @v: pointer of type atomic_t
  48. * @i: required value
  49. *
  50. * Atomically sets the value of @v to @i.
  51. */
  52. #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
  53. #if XCHAL_HAVE_S32C1I
  54. #define ATOMIC_OP(op) \
  55. static inline void atomic_##op(int i, atomic_t * v) \
  56. { \
  57. unsigned long tmp; \
  58. int result; \
  59. \
  60. __asm__ __volatile__( \
  61. "1: l32i %1, %3, 0\n" \
  62. " wsr %1, scompare1\n" \
  63. " " #op " %0, %1, %2\n" \
  64. " s32c1i %0, %3, 0\n" \
  65. " bne %0, %1, 1b\n" \
  66. : "=&a" (result), "=&a" (tmp) \
  67. : "a" (i), "a" (v) \
  68. : "memory" \
  69. ); \
  70. } \
  71. #define ATOMIC_OP_RETURN(op) \
  72. static inline int atomic_##op##_return(int i, atomic_t * v) \
  73. { \
  74. unsigned long tmp; \
  75. int result; \
  76. \
  77. __asm__ __volatile__( \
  78. "1: l32i %1, %3, 0\n" \
  79. " wsr %1, scompare1\n" \
  80. " " #op " %0, %1, %2\n" \
  81. " s32c1i %0, %3, 0\n" \
  82. " bne %0, %1, 1b\n" \
  83. " " #op " %0, %0, %2\n" \
  84. : "=&a" (result), "=&a" (tmp) \
  85. : "a" (i), "a" (v) \
  86. : "memory" \
  87. ); \
  88. \
  89. return result; \
  90. }
  91. #define ATOMIC_FETCH_OP(op) \
  92. static inline int atomic_fetch_##op(int i, atomic_t * v) \
  93. { \
  94. unsigned long tmp; \
  95. int result; \
  96. \
  97. __asm__ __volatile__( \
  98. "1: l32i %1, %3, 0\n" \
  99. " wsr %1, scompare1\n" \
  100. " " #op " %0, %1, %2\n" \
  101. " s32c1i %0, %3, 0\n" \
  102. " bne %0, %1, 1b\n" \
  103. : "=&a" (result), "=&a" (tmp) \
  104. : "a" (i), "a" (v) \
  105. : "memory" \
  106. ); \
  107. \
  108. return result; \
  109. }
  110. #else /* XCHAL_HAVE_S32C1I */
  111. #define ATOMIC_OP(op) \
  112. static inline void atomic_##op(int i, atomic_t * v) \
  113. { \
  114. unsigned int vval; \
  115. \
  116. __asm__ __volatile__( \
  117. " rsil a15, "__stringify(TOPLEVEL)"\n"\
  118. " l32i %0, %2, 0\n" \
  119. " " #op " %0, %0, %1\n" \
  120. " s32i %0, %2, 0\n" \
  121. " wsr a15, ps\n" \
  122. " rsync\n" \
  123. : "=&a" (vval) \
  124. : "a" (i), "a" (v) \
  125. : "a15", "memory" \
  126. ); \
  127. } \
  128. #define ATOMIC_OP_RETURN(op) \
  129. static inline int atomic_##op##_return(int i, atomic_t * v) \
  130. { \
  131. unsigned int vval; \
  132. \
  133. __asm__ __volatile__( \
  134. " rsil a15,"__stringify(TOPLEVEL)"\n" \
  135. " l32i %0, %2, 0\n" \
  136. " " #op " %0, %0, %1\n" \
  137. " s32i %0, %2, 0\n" \
  138. " wsr a15, ps\n" \
  139. " rsync\n" \
  140. : "=&a" (vval) \
  141. : "a" (i), "a" (v) \
  142. : "a15", "memory" \
  143. ); \
  144. \
  145. return vval; \
  146. }
  147. #define ATOMIC_FETCH_OP(op) \
  148. static inline int atomic_fetch_##op(int i, atomic_t * v) \
  149. { \
  150. unsigned int tmp, vval; \
  151. \
  152. __asm__ __volatile__( \
  153. " rsil a15,"__stringify(TOPLEVEL)"\n" \
  154. " l32i %0, %3, 0\n" \
  155. " " #op " %1, %0, %2\n" \
  156. " s32i %1, %3, 0\n" \
  157. " wsr a15, ps\n" \
  158. " rsync\n" \
  159. : "=&a" (vval), "=&a" (tmp) \
  160. : "a" (i), "a" (v) \
  161. : "a15", "memory" \
  162. ); \
  163. \
  164. return vval; \
  165. }
  166. #endif /* XCHAL_HAVE_S32C1I */
  167. #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
  168. ATOMIC_OPS(add)
  169. ATOMIC_OPS(sub)
  170. #undef ATOMIC_OPS
  171. #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
  172. ATOMIC_OPS(and)
  173. ATOMIC_OPS(or)
  174. ATOMIC_OPS(xor)
  175. #undef ATOMIC_OPS
  176. #undef ATOMIC_FETCH_OP
  177. #undef ATOMIC_OP_RETURN
  178. #undef ATOMIC_OP
  179. /**
  180. * atomic_sub_and_test - subtract value from variable and test result
  181. * @i: integer value to subtract
  182. * @v: pointer of type atomic_t
  183. *
  184. * Atomically subtracts @i from @v and returns
  185. * true if the result is zero, or false for all
  186. * other cases.
  187. */
  188. #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
  189. /**
  190. * atomic_inc - increment atomic variable
  191. * @v: pointer of type atomic_t
  192. *
  193. * Atomically increments @v by 1.
  194. */
  195. #define atomic_inc(v) atomic_add(1,(v))
  196. /**
  197. * atomic_inc - increment atomic variable
  198. * @v: pointer of type atomic_t
  199. *
  200. * Atomically increments @v by 1.
  201. */
  202. #define atomic_inc_return(v) atomic_add_return(1,(v))
  203. /**
  204. * atomic_dec - decrement atomic variable
  205. * @v: pointer of type atomic_t
  206. *
  207. * Atomically decrements @v by 1.
  208. */
  209. #define atomic_dec(v) atomic_sub(1,(v))
  210. /**
  211. * atomic_dec_return - decrement atomic variable
  212. * @v: pointer of type atomic_t
  213. *
  214. * Atomically decrements @v by 1.
  215. */
  216. #define atomic_dec_return(v) atomic_sub_return(1,(v))
  217. /**
  218. * atomic_dec_and_test - decrement and test
  219. * @v: pointer of type atomic_t
  220. *
  221. * Atomically decrements @v by 1 and
  222. * returns true if the result is 0, or false for all other
  223. * cases.
  224. */
  225. #define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
  226. /**
  227. * atomic_inc_and_test - increment and test
  228. * @v: pointer of type atomic_t
  229. *
  230. * Atomically increments @v by 1
  231. * and returns true if the result is zero, or false for all
  232. * other cases.
  233. */
  234. #define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
  235. /**
  236. * atomic_add_negative - add and test if negative
  237. * @v: pointer of type atomic_t
  238. * @i: integer value to add
  239. *
  240. * Atomically adds @i to @v and returns true
  241. * if the result is negative, or false when
  242. * result is greater than or equal to zero.
  243. */
  244. #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
  245. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  246. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  247. /**
  248. * __atomic_add_unless - add unless the number is a given value
  249. * @v: pointer of type atomic_t
  250. * @a: the amount to add to v...
  251. * @u: ...unless v is equal to u.
  252. *
  253. * Atomically adds @a to @v, so long as it was not @u.
  254. * Returns the old value of @v.
  255. */
  256. static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  257. {
  258. int c, old;
  259. c = atomic_read(v);
  260. for (;;) {
  261. if (unlikely(c == (u)))
  262. break;
  263. old = atomic_cmpxchg((v), c, c + (a));
  264. if (likely(old == c))
  265. break;
  266. c = old;
  267. }
  268. return c;
  269. }
  270. #endif /* __KERNEL__ */
  271. #endif /* _XTENSA_ATOMIC_H */