atomic.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. /*
  2. * Atomic operations that C can't guarantee us. Useful for
  3. * resource counting etc..
  4. *
  5. * But use these as seldom as possible since they are much more slower
  6. * than regular operations.
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. *
  12. * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
  13. */
  14. #ifndef _ASM_ATOMIC_H
  15. #define _ASM_ATOMIC_H
  16. #include <linux/irqflags.h>
  17. #include <linux/types.h>
  18. #include <asm/barrier.h>
  19. #include <asm/compiler.h>
  20. #include <asm/cpu-features.h>
  21. #include <asm/cmpxchg.h>
  22. #include <asm/war.h>
  23. /*
  24. * Using a branch-likely instruction to check the result of an sc instruction
  25. * works around a bug present in R10000 CPUs prior to revision 3.0 that could
  26. * cause ll-sc sequences to execute non-atomically.
  27. */
  28. #if R10000_LLSC_WAR
  29. # define __scbeqz "beqzl"
  30. #else
  31. # define __scbeqz "beqz"
  32. #endif
  33. #define ATOMIC_INIT(i) { (i) }
  34. /*
  35. * atomic_read - read atomic variable
  36. * @v: pointer of type atomic_t
  37. *
  38. * Atomically reads the value of @v.
  39. */
  40. #define atomic_read(v) READ_ONCE((v)->counter)
  41. /*
  42. * atomic_set - set atomic variable
  43. * @v: pointer of type atomic_t
  44. * @i: required value
  45. *
  46. * Atomically sets the value of @v to @i.
  47. */
  48. #define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
  49. #define ATOMIC_OP(op, c_op, asm_op) \
  50. static __inline__ void atomic_##op(int i, atomic_t * v) \
  51. { \
  52. if (kernel_uses_llsc) { \
  53. int temp; \
  54. \
  55. loongson_llsc_mb(); \
  56. __asm__ __volatile__( \
  57. " .set "MIPS_ISA_LEVEL" \n" \
  58. "1: ll %0, %1 # atomic_" #op " \n" \
  59. " " #asm_op " %0, %2 \n" \
  60. " sc %0, %1 \n" \
  61. "\t" __scbeqz " %0, 1b \n" \
  62. " .set mips0 \n" \
  63. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
  64. : "Ir" (i)); \
  65. } else { \
  66. unsigned long flags; \
  67. \
  68. raw_local_irq_save(flags); \
  69. v->counter c_op i; \
  70. raw_local_irq_restore(flags); \
  71. } \
  72. }
  73. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  74. static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
  75. { \
  76. int result; \
  77. \
  78. if (kernel_uses_llsc) { \
  79. int temp; \
  80. \
  81. loongson_llsc_mb(); \
  82. __asm__ __volatile__( \
  83. " .set "MIPS_ISA_LEVEL" \n" \
  84. "1: ll %1, %2 # atomic_" #op "_return \n" \
  85. " " #asm_op " %0, %1, %3 \n" \
  86. " sc %0, %2 \n" \
  87. "\t" __scbeqz " %0, 1b \n" \
  88. " " #asm_op " %0, %1, %3 \n" \
  89. " .set mips0 \n" \
  90. : "=&r" (result), "=&r" (temp), \
  91. "+" GCC_OFF_SMALL_ASM() (v->counter) \
  92. : "Ir" (i)); \
  93. } else { \
  94. unsigned long flags; \
  95. \
  96. raw_local_irq_save(flags); \
  97. result = v->counter; \
  98. result c_op i; \
  99. v->counter = result; \
  100. raw_local_irq_restore(flags); \
  101. } \
  102. \
  103. return result; \
  104. }
  105. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  106. static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
  107. { \
  108. int result; \
  109. \
  110. if (kernel_uses_llsc) { \
  111. int temp; \
  112. \
  113. loongson_llsc_mb(); \
  114. __asm__ __volatile__( \
  115. " .set "MIPS_ISA_LEVEL" \n" \
  116. "1: ll %1, %2 # atomic_fetch_" #op " \n" \
  117. " " #asm_op " %0, %1, %3 \n" \
  118. " sc %0, %2 \n" \
  119. "\t" __scbeqz " %0, 1b \n" \
  120. " .set mips0 \n" \
  121. " move %0, %1 \n" \
  122. : "=&r" (result), "=&r" (temp), \
  123. "+" GCC_OFF_SMALL_ASM() (v->counter) \
  124. : "Ir" (i)); \
  125. } else { \
  126. unsigned long flags; \
  127. \
  128. raw_local_irq_save(flags); \
  129. result = v->counter; \
  130. v->counter c_op i; \
  131. raw_local_irq_restore(flags); \
  132. } \
  133. \
  134. return result; \
  135. }
  136. #define ATOMIC_OPS(op, c_op, asm_op) \
  137. ATOMIC_OP(op, c_op, asm_op) \
  138. ATOMIC_OP_RETURN(op, c_op, asm_op) \
  139. ATOMIC_FETCH_OP(op, c_op, asm_op)
  140. ATOMIC_OPS(add, +=, addu)
  141. ATOMIC_OPS(sub, -=, subu)
  142. #define atomic_add_return_relaxed atomic_add_return_relaxed
  143. #define atomic_sub_return_relaxed atomic_sub_return_relaxed
  144. #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
  145. #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
  146. #undef ATOMIC_OPS
  147. #define ATOMIC_OPS(op, c_op, asm_op) \
  148. ATOMIC_OP(op, c_op, asm_op) \
  149. ATOMIC_FETCH_OP(op, c_op, asm_op)
  150. ATOMIC_OPS(and, &=, and)
  151. ATOMIC_OPS(or, |=, or)
  152. ATOMIC_OPS(xor, ^=, xor)
  153. #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
  154. #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
  155. #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
  156. #undef ATOMIC_OPS
  157. #undef ATOMIC_FETCH_OP
  158. #undef ATOMIC_OP_RETURN
  159. #undef ATOMIC_OP
  160. /*
  161. * atomic_sub_if_positive - conditionally subtract integer from atomic variable
  162. * @i: integer value to subtract
  163. * @v: pointer of type atomic_t
  164. *
  165. * Atomically test @v and subtract @i if @v is greater or equal than @i.
  166. * The function returns the old value of @v minus @i.
  167. */
  168. static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
  169. {
  170. int result;
  171. smp_mb__before_llsc();
  172. if (kernel_uses_llsc) {
  173. int temp;
  174. __asm__ __volatile__(
  175. " .set "MIPS_ISA_LEVEL" \n"
  176. "1: ll %1, %2 # atomic_sub_if_positive\n"
  177. " .set mips0 \n"
  178. " subu %0, %1, %3 \n"
  179. " move %1, %0 \n"
  180. " bltz %0, 1f \n"
  181. " .set "MIPS_ISA_LEVEL" \n"
  182. " sc %1, %2 \n"
  183. "\t" __scbeqz " %1, 1b \n"
  184. "1: \n"
  185. " .set mips0 \n"
  186. : "=&r" (result), "=&r" (temp),
  187. "+" GCC_OFF_SMALL_ASM() (v->counter)
  188. : "Ir" (i));
  189. } else {
  190. unsigned long flags;
  191. raw_local_irq_save(flags);
  192. result = v->counter;
  193. result -= i;
  194. if (result >= 0)
  195. v->counter = result;
  196. raw_local_irq_restore(flags);
  197. }
  198. smp_llsc_mb();
  199. return result;
  200. }
  201. #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
  202. #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
  203. /*
  204. * atomic_dec_if_positive - decrement by 1 if old value positive
  205. * @v: pointer of type atomic_t
  206. */
  207. #define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
  208. #ifdef CONFIG_64BIT
  209. #define ATOMIC64_INIT(i) { (i) }
  210. /*
  211. * atomic64_read - read atomic variable
  212. * @v: pointer of type atomic64_t
  213. *
  214. */
  215. #define atomic64_read(v) READ_ONCE((v)->counter)
  216. /*
  217. * atomic64_set - set atomic variable
  218. * @v: pointer of type atomic64_t
  219. * @i: required value
  220. */
  221. #define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
  222. #define ATOMIC64_OP(op, c_op, asm_op) \
  223. static __inline__ void atomic64_##op(long i, atomic64_t * v) \
  224. { \
  225. if (kernel_uses_llsc) { \
  226. long temp; \
  227. \
  228. loongson_llsc_mb(); \
  229. __asm__ __volatile__( \
  230. " .set "MIPS_ISA_LEVEL" \n" \
  231. "1: lld %0, %1 # atomic64_" #op " \n" \
  232. " " #asm_op " %0, %2 \n" \
  233. " scd %0, %1 \n" \
  234. "\t" __scbeqz " %0, 1b \n" \
  235. " .set mips0 \n" \
  236. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
  237. : "Ir" (i)); \
  238. } else { \
  239. unsigned long flags; \
  240. \
  241. raw_local_irq_save(flags); \
  242. v->counter c_op i; \
  243. raw_local_irq_restore(flags); \
  244. } \
  245. }
  246. #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
  247. static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
  248. { \
  249. long result; \
  250. \
  251. if (kernel_uses_llsc) { \
  252. long temp; \
  253. \
  254. loongson_llsc_mb(); \
  255. __asm__ __volatile__( \
  256. " .set "MIPS_ISA_LEVEL" \n" \
  257. "1: lld %1, %2 # atomic64_" #op "_return\n" \
  258. " " #asm_op " %0, %1, %3 \n" \
  259. " scd %0, %2 \n" \
  260. "\t" __scbeqz " %0, 1b \n" \
  261. " " #asm_op " %0, %1, %3 \n" \
  262. " .set mips0 \n" \
  263. : "=&r" (result), "=&r" (temp), \
  264. "+" GCC_OFF_SMALL_ASM() (v->counter) \
  265. : "Ir" (i)); \
  266. } else { \
  267. unsigned long flags; \
  268. \
  269. raw_local_irq_save(flags); \
  270. result = v->counter; \
  271. result c_op i; \
  272. v->counter = result; \
  273. raw_local_irq_restore(flags); \
  274. } \
  275. \
  276. return result; \
  277. }
  278. #define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
  279. static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
  280. { \
  281. long result; \
  282. \
  283. if (kernel_uses_llsc) { \
  284. long temp; \
  285. \
  286. loongson_llsc_mb(); \
  287. __asm__ __volatile__( \
  288. " .set "MIPS_ISA_LEVEL" \n" \
  289. "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
  290. " " #asm_op " %0, %1, %3 \n" \
  291. " scd %0, %2 \n" \
  292. "\t" __scbeqz " %0, 1b \n" \
  293. " move %0, %1 \n" \
  294. " .set mips0 \n" \
  295. : "=&r" (result), "=&r" (temp), \
  296. "+" GCC_OFF_SMALL_ASM() (v->counter) \
  297. : "Ir" (i)); \
  298. } else { \
  299. unsigned long flags; \
  300. \
  301. raw_local_irq_save(flags); \
  302. result = v->counter; \
  303. v->counter c_op i; \
  304. raw_local_irq_restore(flags); \
  305. } \
  306. \
  307. return result; \
  308. }
  309. #define ATOMIC64_OPS(op, c_op, asm_op) \
  310. ATOMIC64_OP(op, c_op, asm_op) \
  311. ATOMIC64_OP_RETURN(op, c_op, asm_op) \
  312. ATOMIC64_FETCH_OP(op, c_op, asm_op)
  313. ATOMIC64_OPS(add, +=, daddu)
  314. ATOMIC64_OPS(sub, -=, dsubu)
  315. #define atomic64_add_return_relaxed atomic64_add_return_relaxed
  316. #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
  317. #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
  318. #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
  319. #undef ATOMIC64_OPS
  320. #define ATOMIC64_OPS(op, c_op, asm_op) \
  321. ATOMIC64_OP(op, c_op, asm_op) \
  322. ATOMIC64_FETCH_OP(op, c_op, asm_op)
  323. ATOMIC64_OPS(and, &=, and)
  324. ATOMIC64_OPS(or, |=, or)
  325. ATOMIC64_OPS(xor, ^=, xor)
  326. #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
  327. #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
  328. #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
  329. #undef ATOMIC64_OPS
  330. #undef ATOMIC64_FETCH_OP
  331. #undef ATOMIC64_OP_RETURN
  332. #undef ATOMIC64_OP
  333. /*
  334. * atomic64_sub_if_positive - conditionally subtract integer from atomic
  335. * variable
  336. * @i: integer value to subtract
  337. * @v: pointer of type atomic64_t
  338. *
  339. * Atomically test @v and subtract @i if @v is greater or equal than @i.
  340. * The function returns the old value of @v minus @i.
  341. */
  342. static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
  343. {
  344. long result;
  345. smp_mb__before_llsc();
  346. if (kernel_uses_llsc) {
  347. long temp;
  348. __asm__ __volatile__(
  349. " .set "MIPS_ISA_LEVEL" \n"
  350. "1: lld %1, %2 # atomic64_sub_if_positive\n"
  351. " dsubu %0, %1, %3 \n"
  352. " move %1, %0 \n"
  353. " bltz %0, 1f \n"
  354. " scd %1, %2 \n"
  355. "\t" __scbeqz " %1, 1b \n"
  356. "1: \n"
  357. " .set mips0 \n"
  358. : "=&r" (result), "=&r" (temp),
  359. "+" GCC_OFF_SMALL_ASM() (v->counter)
  360. : "Ir" (i));
  361. } else {
  362. unsigned long flags;
  363. raw_local_irq_save(flags);
  364. result = v->counter;
  365. result -= i;
  366. if (result >= 0)
  367. v->counter = result;
  368. raw_local_irq_restore(flags);
  369. }
  370. smp_llsc_mb();
  371. return result;
  372. }
  373. #define atomic64_cmpxchg(v, o, n) \
  374. ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
  375. #define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
  376. /*
  377. * atomic64_dec_if_positive - decrement by 1 if old value positive
  378. * @v: pointer of type atomic64_t
  379. */
  380. #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
  381. #endif /* CONFIG_64BIT */
  382. #endif /* _ASM_ATOMIC_H */