atomic.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. /*
  2. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  3. * Copyright (C) 2012 Regents of the University of California
  4. * Copyright (C) 2017 SiFive
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_RISCV_ATOMIC_H
  12. #define _ASM_RISCV_ATOMIC_H
  13. #ifdef CONFIG_GENERIC_ATOMIC64
  14. # include <asm-generic/atomic64.h>
  15. #else
  16. # if (__riscv_xlen < 64)
  17. # error "64-bit atomics require XLEN to be at least 64"
  18. # endif
  19. #endif
  20. #include <asm/cmpxchg.h>
  21. #include <asm/barrier.h>
  22. #define ATOMIC_INIT(i) { (i) }
  23. #define __atomic_acquire_fence() \
  24. __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
  25. #define __atomic_release_fence() \
  26. __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
  27. static __always_inline int atomic_read(const atomic_t *v)
  28. {
  29. return READ_ONCE(v->counter);
  30. }
  31. static __always_inline void atomic_set(atomic_t *v, int i)
  32. {
  33. WRITE_ONCE(v->counter, i);
  34. }
  35. #ifndef CONFIG_GENERIC_ATOMIC64
  36. #define ATOMIC64_INIT(i) { (i) }
  37. static __always_inline long atomic64_read(const atomic64_t *v)
  38. {
  39. return READ_ONCE(v->counter);
  40. }
  41. static __always_inline void atomic64_set(atomic64_t *v, long i)
  42. {
  43. WRITE_ONCE(v->counter, i);
  44. }
  45. #endif
  46. /*
  47. * First, the atomic ops that have no ordering constraints and therefor don't
  48. * have the AQ or RL bits set. These don't return anything, so there's only
  49. * one version to worry about.
  50. */
  51. #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
  52. static __always_inline \
  53. void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
  54. { \
  55. __asm__ __volatile__ ( \
  56. " amo" #asm_op "." #asm_type " zero, %1, %0" \
  57. : "+A" (v->counter) \
  58. : "r" (I) \
  59. : "memory"); \
  60. } \
  61. #ifdef CONFIG_GENERIC_ATOMIC64
  62. #define ATOMIC_OPS(op, asm_op, I) \
  63. ATOMIC_OP (op, asm_op, I, w, int, )
  64. #else
  65. #define ATOMIC_OPS(op, asm_op, I) \
  66. ATOMIC_OP (op, asm_op, I, w, int, ) \
  67. ATOMIC_OP (op, asm_op, I, d, long, 64)
  68. #endif
  69. ATOMIC_OPS(add, add, i)
  70. ATOMIC_OPS(sub, add, -i)
  71. ATOMIC_OPS(and, and, i)
  72. ATOMIC_OPS( or, or, i)
  73. ATOMIC_OPS(xor, xor, i)
  74. #undef ATOMIC_OP
  75. #undef ATOMIC_OPS
  76. /*
  77. * Atomic ops that have ordered, relaxed, acquire, and release variants.
  78. * There's two flavors of these: the arithmatic ops have both fetch and return
  79. * versions, while the logical ops only have fetch versions.
  80. */
  81. #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
  82. static __always_inline \
  83. c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
  84. atomic##prefix##_t *v) \
  85. { \
  86. register c_type ret; \
  87. __asm__ __volatile__ ( \
  88. " amo" #asm_op "." #asm_type " %1, %2, %0" \
  89. : "+A" (v->counter), "=r" (ret) \
  90. : "r" (I) \
  91. : "memory"); \
  92. return ret; \
  93. } \
  94. static __always_inline \
  95. c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
  96. { \
  97. register c_type ret; \
  98. __asm__ __volatile__ ( \
  99. " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
  100. : "+A" (v->counter), "=r" (ret) \
  101. : "r" (I) \
  102. : "memory"); \
  103. return ret; \
  104. }
  105. #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
  106. static __always_inline \
  107. c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
  108. atomic##prefix##_t *v) \
  109. { \
  110. return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
  111. } \
  112. static __always_inline \
  113. c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
  114. { \
  115. return atomic##prefix##_fetch_##op(i, v) c_op I; \
  116. }
  117. #ifdef CONFIG_GENERIC_ATOMIC64
  118. #define ATOMIC_OPS(op, asm_op, c_op, I) \
  119. ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
  120. ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
  121. #else
  122. #define ATOMIC_OPS(op, asm_op, c_op, I) \
  123. ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
  124. ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
  125. ATOMIC_FETCH_OP( op, asm_op, I, d, long, 64) \
  126. ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
  127. #endif
  128. ATOMIC_OPS(add, add, +, i)
  129. ATOMIC_OPS(sub, add, +, -i)
  130. #define atomic_add_return_relaxed atomic_add_return_relaxed
  131. #define atomic_sub_return_relaxed atomic_sub_return_relaxed
  132. #define atomic_add_return atomic_add_return
  133. #define atomic_sub_return atomic_sub_return
  134. #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
  135. #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
  136. #define atomic_fetch_add atomic_fetch_add
  137. #define atomic_fetch_sub atomic_fetch_sub
  138. #ifndef CONFIG_GENERIC_ATOMIC64
  139. #define atomic64_add_return_relaxed atomic64_add_return_relaxed
  140. #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
  141. #define atomic64_add_return atomic64_add_return
  142. #define atomic64_sub_return atomic64_sub_return
  143. #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
  144. #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
  145. #define atomic64_fetch_add atomic64_fetch_add
  146. #define atomic64_fetch_sub atomic64_fetch_sub
  147. #endif
  148. #undef ATOMIC_OPS
  149. #ifdef CONFIG_GENERIC_ATOMIC64
  150. #define ATOMIC_OPS(op, asm_op, I) \
  151. ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
  152. #else
  153. #define ATOMIC_OPS(op, asm_op, I) \
  154. ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
  155. ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
  156. #endif
  157. ATOMIC_OPS(and, and, i)
  158. ATOMIC_OPS( or, or, i)
  159. ATOMIC_OPS(xor, xor, i)
  160. #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
  161. #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
  162. #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
  163. #define atomic_fetch_and atomic_fetch_and
  164. #define atomic_fetch_or atomic_fetch_or
  165. #define atomic_fetch_xor atomic_fetch_xor
  166. #ifndef CONFIG_GENERIC_ATOMIC64
  167. #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
  168. #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
  169. #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
  170. #define atomic64_fetch_and atomic64_fetch_and
  171. #define atomic64_fetch_or atomic64_fetch_or
  172. #define atomic64_fetch_xor atomic64_fetch_xor
  173. #endif
  174. #undef ATOMIC_OPS
  175. #undef ATOMIC_FETCH_OP
  176. #undef ATOMIC_OP_RETURN
  177. /* This is required to provide a full barrier on success. */
  178. static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
  179. {
  180. int prev, rc;
  181. __asm__ __volatile__ (
  182. "0: lr.w %[p], %[c]\n"
  183. " beq %[p], %[u], 1f\n"
  184. " add %[rc], %[p], %[a]\n"
  185. " sc.w.rl %[rc], %[rc], %[c]\n"
  186. " bnez %[rc], 0b\n"
  187. " fence rw, rw\n"
  188. "1:\n"
  189. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  190. : [a]"r" (a), [u]"r" (u)
  191. : "memory");
  192. return prev;
  193. }
  194. #define atomic_fetch_add_unless atomic_fetch_add_unless
  195. #ifndef CONFIG_GENERIC_ATOMIC64
  196. static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
  197. {
  198. long prev, rc;
  199. __asm__ __volatile__ (
  200. "0: lr.d %[p], %[c]\n"
  201. " beq %[p], %[u], 1f\n"
  202. " add %[rc], %[p], %[a]\n"
  203. " sc.d.rl %[rc], %[rc], %[c]\n"
  204. " bnez %[rc], 0b\n"
  205. " fence rw, rw\n"
  206. "1:\n"
  207. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  208. : [a]"r" (a), [u]"r" (u)
  209. : "memory");
  210. return prev;
  211. }
  212. #define atomic64_fetch_add_unless atomic64_fetch_add_unless
  213. #endif
  214. /*
  215. * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
  216. * {cmp,}xchg and the operations that return, so they need a full barrier.
  217. */
  218. #define ATOMIC_OP(c_t, prefix, size) \
  219. static __always_inline \
  220. c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
  221. { \
  222. return __xchg_relaxed(&(v->counter), n, size); \
  223. } \
  224. static __always_inline \
  225. c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
  226. { \
  227. return __xchg_acquire(&(v->counter), n, size); \
  228. } \
  229. static __always_inline \
  230. c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
  231. { \
  232. return __xchg_release(&(v->counter), n, size); \
  233. } \
  234. static __always_inline \
  235. c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
  236. { \
  237. return __xchg(&(v->counter), n, size); \
  238. } \
  239. static __always_inline \
  240. c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
  241. c_t o, c_t n) \
  242. { \
  243. return __cmpxchg_relaxed(&(v->counter), o, n, size); \
  244. } \
  245. static __always_inline \
  246. c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
  247. c_t o, c_t n) \
  248. { \
  249. return __cmpxchg_acquire(&(v->counter), o, n, size); \
  250. } \
  251. static __always_inline \
  252. c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
  253. c_t o, c_t n) \
  254. { \
  255. return __cmpxchg_release(&(v->counter), o, n, size); \
  256. } \
  257. static __always_inline \
  258. c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
  259. { \
  260. return __cmpxchg(&(v->counter), o, n, size); \
  261. }
  262. #ifdef CONFIG_GENERIC_ATOMIC64
  263. #define ATOMIC_OPS() \
  264. ATOMIC_OP( int, , 4)
  265. #else
  266. #define ATOMIC_OPS() \
  267. ATOMIC_OP( int, , 4) \
  268. ATOMIC_OP(long, 64, 8)
  269. #endif
  270. ATOMIC_OPS()
  271. #undef ATOMIC_OPS
  272. #undef ATOMIC_OP
  273. static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
  274. {
  275. int prev, rc;
  276. __asm__ __volatile__ (
  277. "0: lr.w %[p], %[c]\n"
  278. " sub %[rc], %[p], %[o]\n"
  279. " bltz %[rc], 1f\n"
  280. " sc.w.rl %[rc], %[rc], %[c]\n"
  281. " bnez %[rc], 0b\n"
  282. " fence rw, rw\n"
  283. "1:\n"
  284. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  285. : [o]"r" (offset)
  286. : "memory");
  287. return prev - offset;
  288. }
  289. #define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
  290. #ifndef CONFIG_GENERIC_ATOMIC64
  291. static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
  292. {
  293. long prev, rc;
  294. __asm__ __volatile__ (
  295. "0: lr.d %[p], %[c]\n"
  296. " sub %[rc], %[p], %[o]\n"
  297. " bltz %[rc], 1f\n"
  298. " sc.d.rl %[rc], %[rc], %[c]\n"
  299. " bnez %[rc], 0b\n"
  300. " fence rw, rw\n"
  301. "1:\n"
  302. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  303. : [o]"r" (offset)
  304. : "memory");
  305. return prev - offset;
  306. }
  307. #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
  308. #endif
  309. #endif /* _ASM_RISCV_ATOMIC_H */