atomic.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589
  1. #ifndef _ASM_POWERPC_ATOMIC_H_
  2. #define _ASM_POWERPC_ATOMIC_H_
  3. /*
  4. * PowerPC atomic operations
  5. */
  6. #ifdef __KERNEL__
  7. #include <linux/types.h>
  8. #include <asm/cmpxchg.h>
  9. #include <asm/barrier.h>
  10. #define ATOMIC_INIT(i) { (i) }
  11. /*
  12. * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
  13. * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
  14. * on the platform without lwsync.
  15. */
  16. #define __atomic_op_acquire(op, args...) \
  17. ({ \
  18. typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
  19. __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
  20. __ret; \
  21. })
  22. #define __atomic_op_release(op, args...) \
  23. ({ \
  24. __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
  25. op##_relaxed(args); \
  26. })
  27. static __inline__ int atomic_read(const atomic_t *v)
  28. {
  29. int t;
  30. __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
  31. return t;
  32. }
  33. static __inline__ void atomic_set(atomic_t *v, int i)
  34. {
  35. __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
  36. }
  37. #define ATOMIC_OP(op, asm_op) \
  38. static __inline__ void atomic_##op(int a, atomic_t *v) \
  39. { \
  40. int t; \
  41. \
  42. __asm__ __volatile__( \
  43. "1: lwarx %0,0,%3 # atomic_" #op "\n" \
  44. #asm_op " %0,%2,%0\n" \
  45. PPC405_ERR77(0,%3) \
  46. " stwcx. %0,0,%3 \n" \
  47. " bne- 1b\n" \
  48. : "=&r" (t), "+m" (v->counter) \
  49. : "r" (a), "r" (&v->counter) \
  50. : "cc"); \
  51. } \
  52. #define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
  53. static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
  54. { \
  55. int t; \
  56. \
  57. __asm__ __volatile__( \
  58. "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
  59. #asm_op " %0,%2,%0\n" \
  60. PPC405_ERR77(0, %3) \
  61. " stwcx. %0,0,%3\n" \
  62. " bne- 1b\n" \
  63. : "=&r" (t), "+m" (v->counter) \
  64. : "r" (a), "r" (&v->counter) \
  65. : "cc"); \
  66. \
  67. return t; \
  68. }
  69. #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
  70. static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
  71. { \
  72. int res, t; \
  73. \
  74. __asm__ __volatile__( \
  75. "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
  76. #asm_op " %1,%3,%0\n" \
  77. PPC405_ERR77(0, %4) \
  78. " stwcx. %1,0,%4\n" \
  79. " bne- 1b\n" \
  80. : "=&r" (res), "=&r" (t), "+m" (v->counter) \
  81. : "r" (a), "r" (&v->counter) \
  82. : "cc"); \
  83. \
  84. return res; \
  85. }
  86. #define ATOMIC_OPS(op, asm_op) \
  87. ATOMIC_OP(op, asm_op) \
  88. ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
  89. ATOMIC_FETCH_OP_RELAXED(op, asm_op)
  90. ATOMIC_OPS(add, add)
  91. ATOMIC_OPS(sub, subf)
  92. #define atomic_add_return_relaxed atomic_add_return_relaxed
  93. #define atomic_sub_return_relaxed atomic_sub_return_relaxed
  94. #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
  95. #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
  96. #undef ATOMIC_OPS
  97. #define ATOMIC_OPS(op, asm_op) \
  98. ATOMIC_OP(op, asm_op) \
  99. ATOMIC_FETCH_OP_RELAXED(op, asm_op)
  100. ATOMIC_OPS(and, and)
  101. ATOMIC_OPS(or, or)
  102. ATOMIC_OPS(xor, xor)
  103. #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
  104. #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
  105. #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
  106. #undef ATOMIC_OPS
  107. #undef ATOMIC_FETCH_OP_RELAXED
  108. #undef ATOMIC_OP_RETURN_RELAXED
  109. #undef ATOMIC_OP
  110. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  111. static __inline__ void atomic_inc(atomic_t *v)
  112. {
  113. int t;
  114. __asm__ __volatile__(
  115. "1: lwarx %0,0,%2 # atomic_inc\n\
  116. addic %0,%0,1\n"
  117. PPC405_ERR77(0,%2)
  118. " stwcx. %0,0,%2 \n\
  119. bne- 1b"
  120. : "=&r" (t), "+m" (v->counter)
  121. : "r" (&v->counter)
  122. : "cc", "xer");
  123. }
  124. static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
  125. {
  126. int t;
  127. __asm__ __volatile__(
  128. "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
  129. " addic %0,%0,1\n"
  130. PPC405_ERR77(0, %2)
  131. " stwcx. %0,0,%2\n"
  132. " bne- 1b"
  133. : "=&r" (t), "+m" (v->counter)
  134. : "r" (&v->counter)
  135. : "cc", "xer");
  136. return t;
  137. }
  138. /*
  139. * atomic_inc_and_test - increment and test
  140. * @v: pointer of type atomic_t
  141. *
  142. * Atomically increments @v by 1
  143. * and returns true if the result is zero, or false for all
  144. * other cases.
  145. */
  146. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  147. static __inline__ void atomic_dec(atomic_t *v)
  148. {
  149. int t;
  150. __asm__ __volatile__(
  151. "1: lwarx %0,0,%2 # atomic_dec\n\
  152. addic %0,%0,-1\n"
  153. PPC405_ERR77(0,%2)\
  154. " stwcx. %0,0,%2\n\
  155. bne- 1b"
  156. : "=&r" (t), "+m" (v->counter)
  157. : "r" (&v->counter)
  158. : "cc", "xer");
  159. }
  160. static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
  161. {
  162. int t;
  163. __asm__ __volatile__(
  164. "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
  165. " addic %0,%0,-1\n"
  166. PPC405_ERR77(0, %2)
  167. " stwcx. %0,0,%2\n"
  168. " bne- 1b"
  169. : "=&r" (t), "+m" (v->counter)
  170. : "r" (&v->counter)
  171. : "cc", "xer");
  172. return t;
  173. }
  174. #define atomic_inc_return_relaxed atomic_inc_return_relaxed
  175. #define atomic_dec_return_relaxed atomic_dec_return_relaxed
  176. #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
  177. #define atomic_cmpxchg_relaxed(v, o, n) \
  178. cmpxchg_relaxed(&((v)->counter), (o), (n))
  179. #define atomic_cmpxchg_acquire(v, o, n) \
  180. cmpxchg_acquire(&((v)->counter), (o), (n))
  181. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  182. #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
  183. /**
  184. * __atomic_add_unless - add unless the number is a given value
  185. * @v: pointer of type atomic_t
  186. * @a: the amount to add to v...
  187. * @u: ...unless v is equal to u.
  188. *
  189. * Atomically adds @a to @v, so long as it was not @u.
  190. * Returns the old value of @v.
  191. */
  192. static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  193. {
  194. int t;
  195. __asm__ __volatile__ (
  196. PPC_ATOMIC_ENTRY_BARRIER
  197. "1: lwarx %0,0,%1 # __atomic_add_unless\n\
  198. cmpw 0,%0,%3 \n\
  199. beq 2f \n\
  200. add %0,%2,%0 \n"
  201. PPC405_ERR77(0,%2)
  202. " stwcx. %0,0,%1 \n\
  203. bne- 1b \n"
  204. PPC_ATOMIC_EXIT_BARRIER
  205. " subf %0,%2,%0 \n\
  206. 2:"
  207. : "=&r" (t)
  208. : "r" (&v->counter), "r" (a), "r" (u)
  209. : "cc", "memory");
  210. return t;
  211. }
  212. /**
  213. * atomic_inc_not_zero - increment unless the number is zero
  214. * @v: pointer of type atomic_t
  215. *
  216. * Atomically increments @v by 1, so long as @v is non-zero.
  217. * Returns non-zero if @v was non-zero, and zero otherwise.
  218. */
  219. static __inline__ int atomic_inc_not_zero(atomic_t *v)
  220. {
  221. int t1, t2;
  222. __asm__ __volatile__ (
  223. PPC_ATOMIC_ENTRY_BARRIER
  224. "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
  225. cmpwi 0,%0,0\n\
  226. beq- 2f\n\
  227. addic %1,%0,1\n"
  228. PPC405_ERR77(0,%2)
  229. " stwcx. %1,0,%2\n\
  230. bne- 1b\n"
  231. PPC_ATOMIC_EXIT_BARRIER
  232. "\n\
  233. 2:"
  234. : "=&r" (t1), "=&r" (t2)
  235. : "r" (&v->counter)
  236. : "cc", "xer", "memory");
  237. return t1;
  238. }
  239. #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
  240. #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
  241. #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
  242. /*
  243. * Atomically test *v and decrement if it is greater than 0.
  244. * The function returns the old value of *v minus 1, even if
  245. * the atomic variable, v, was not decremented.
  246. */
  247. static __inline__ int atomic_dec_if_positive(atomic_t *v)
  248. {
  249. int t;
  250. __asm__ __volatile__(
  251. PPC_ATOMIC_ENTRY_BARRIER
  252. "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
  253. cmpwi %0,1\n\
  254. addi %0,%0,-1\n\
  255. blt- 2f\n"
  256. PPC405_ERR77(0,%1)
  257. " stwcx. %0,0,%1\n\
  258. bne- 1b"
  259. PPC_ATOMIC_EXIT_BARRIER
  260. "\n\
  261. 2:" : "=&b" (t)
  262. : "r" (&v->counter)
  263. : "cc", "memory");
  264. return t;
  265. }
  266. #define atomic_dec_if_positive atomic_dec_if_positive
  267. #ifdef __powerpc64__
  268. #define ATOMIC64_INIT(i) { (i) }
  269. static __inline__ long atomic64_read(const atomic64_t *v)
  270. {
  271. long t;
  272. __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
  273. return t;
  274. }
  275. static __inline__ void atomic64_set(atomic64_t *v, long i)
  276. {
  277. __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
  278. }
  279. #define ATOMIC64_OP(op, asm_op) \
  280. static __inline__ void atomic64_##op(long a, atomic64_t *v) \
  281. { \
  282. long t; \
  283. \
  284. __asm__ __volatile__( \
  285. "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
  286. #asm_op " %0,%2,%0\n" \
  287. " stdcx. %0,0,%3 \n" \
  288. " bne- 1b\n" \
  289. : "=&r" (t), "+m" (v->counter) \
  290. : "r" (a), "r" (&v->counter) \
  291. : "cc"); \
  292. }
  293. #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
  294. static inline long \
  295. atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
  296. { \
  297. long t; \
  298. \
  299. __asm__ __volatile__( \
  300. "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
  301. #asm_op " %0,%2,%0\n" \
  302. " stdcx. %0,0,%3\n" \
  303. " bne- 1b\n" \
  304. : "=&r" (t), "+m" (v->counter) \
  305. : "r" (a), "r" (&v->counter) \
  306. : "cc"); \
  307. \
  308. return t; \
  309. }
  310. #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
  311. static inline long \
  312. atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
  313. { \
  314. long res, t; \
  315. \
  316. __asm__ __volatile__( \
  317. "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
  318. #asm_op " %1,%3,%0\n" \
  319. " stdcx. %1,0,%4\n" \
  320. " bne- 1b\n" \
  321. : "=&r" (res), "=&r" (t), "+m" (v->counter) \
  322. : "r" (a), "r" (&v->counter) \
  323. : "cc"); \
  324. \
  325. return res; \
  326. }
  327. #define ATOMIC64_OPS(op, asm_op) \
  328. ATOMIC64_OP(op, asm_op) \
  329. ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
  330. ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
  331. ATOMIC64_OPS(add, add)
  332. ATOMIC64_OPS(sub, subf)
  333. #define atomic64_add_return_relaxed atomic64_add_return_relaxed
  334. #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
  335. #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
  336. #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
  337. #undef ATOMIC64_OPS
  338. #define ATOMIC64_OPS(op, asm_op) \
  339. ATOMIC64_OP(op, asm_op) \
  340. ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
  341. ATOMIC64_OPS(and, and)
  342. ATOMIC64_OPS(or, or)
  343. ATOMIC64_OPS(xor, xor)
  344. #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
  345. #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
  346. #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
  347. #undef ATOPIC64_OPS
  348. #undef ATOMIC64_FETCH_OP_RELAXED
  349. #undef ATOMIC64_OP_RETURN_RELAXED
  350. #undef ATOMIC64_OP
  351. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  352. static __inline__ void atomic64_inc(atomic64_t *v)
  353. {
  354. long t;
  355. __asm__ __volatile__(
  356. "1: ldarx %0,0,%2 # atomic64_inc\n\
  357. addic %0,%0,1\n\
  358. stdcx. %0,0,%2 \n\
  359. bne- 1b"
  360. : "=&r" (t), "+m" (v->counter)
  361. : "r" (&v->counter)
  362. : "cc", "xer");
  363. }
  364. static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
  365. {
  366. long t;
  367. __asm__ __volatile__(
  368. "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
  369. " addic %0,%0,1\n"
  370. " stdcx. %0,0,%2\n"
  371. " bne- 1b"
  372. : "=&r" (t), "+m" (v->counter)
  373. : "r" (&v->counter)
  374. : "cc", "xer");
  375. return t;
  376. }
  377. /*
  378. * atomic64_inc_and_test - increment and test
  379. * @v: pointer of type atomic64_t
  380. *
  381. * Atomically increments @v by 1
  382. * and returns true if the result is zero, or false for all
  383. * other cases.
  384. */
  385. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  386. static __inline__ void atomic64_dec(atomic64_t *v)
  387. {
  388. long t;
  389. __asm__ __volatile__(
  390. "1: ldarx %0,0,%2 # atomic64_dec\n\
  391. addic %0,%0,-1\n\
  392. stdcx. %0,0,%2\n\
  393. bne- 1b"
  394. : "=&r" (t), "+m" (v->counter)
  395. : "r" (&v->counter)
  396. : "cc", "xer");
  397. }
  398. static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
  399. {
  400. long t;
  401. __asm__ __volatile__(
  402. "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
  403. " addic %0,%0,-1\n"
  404. " stdcx. %0,0,%2\n"
  405. " bne- 1b"
  406. : "=&r" (t), "+m" (v->counter)
  407. : "r" (&v->counter)
  408. : "cc", "xer");
  409. return t;
  410. }
  411. #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
  412. #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
  413. #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
  414. #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
  415. /*
  416. * Atomically test *v and decrement if it is greater than 0.
  417. * The function returns the old value of *v minus 1.
  418. */
  419. static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
  420. {
  421. long t;
  422. __asm__ __volatile__(
  423. PPC_ATOMIC_ENTRY_BARRIER
  424. "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
  425. addic. %0,%0,-1\n\
  426. blt- 2f\n\
  427. stdcx. %0,0,%1\n\
  428. bne- 1b"
  429. PPC_ATOMIC_EXIT_BARRIER
  430. "\n\
  431. 2:" : "=&r" (t)
  432. : "r" (&v->counter)
  433. : "cc", "xer", "memory");
  434. return t;
  435. }
  436. #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
  437. #define atomic64_cmpxchg_relaxed(v, o, n) \
  438. cmpxchg_relaxed(&((v)->counter), (o), (n))
  439. #define atomic64_cmpxchg_acquire(v, o, n) \
  440. cmpxchg_acquire(&((v)->counter), (o), (n))
  441. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  442. #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
  443. /**
  444. * atomic64_add_unless - add unless the number is a given value
  445. * @v: pointer of type atomic64_t
  446. * @a: the amount to add to v...
  447. * @u: ...unless v is equal to u.
  448. *
  449. * Atomically adds @a to @v, so long as it was not @u.
  450. * Returns the old value of @v.
  451. */
  452. static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  453. {
  454. long t;
  455. __asm__ __volatile__ (
  456. PPC_ATOMIC_ENTRY_BARRIER
  457. "1: ldarx %0,0,%1 # __atomic_add_unless\n\
  458. cmpd 0,%0,%3 \n\
  459. beq 2f \n\
  460. add %0,%2,%0 \n"
  461. " stdcx. %0,0,%1 \n\
  462. bne- 1b \n"
  463. PPC_ATOMIC_EXIT_BARRIER
  464. " subf %0,%2,%0 \n\
  465. 2:"
  466. : "=&r" (t)
  467. : "r" (&v->counter), "r" (a), "r" (u)
  468. : "cc", "memory");
  469. return t != u;
  470. }
  471. /**
  472. * atomic_inc64_not_zero - increment unless the number is zero
  473. * @v: pointer of type atomic64_t
  474. *
  475. * Atomically increments @v by 1, so long as @v is non-zero.
  476. * Returns non-zero if @v was non-zero, and zero otherwise.
  477. */
  478. static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
  479. {
  480. long t1, t2;
  481. __asm__ __volatile__ (
  482. PPC_ATOMIC_ENTRY_BARRIER
  483. "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
  484. cmpdi 0,%0,0\n\
  485. beq- 2f\n\
  486. addic %1,%0,1\n\
  487. stdcx. %1,0,%2\n\
  488. bne- 1b\n"
  489. PPC_ATOMIC_EXIT_BARRIER
  490. "\n\
  491. 2:"
  492. : "=&r" (t1), "=&r" (t2)
  493. : "r" (&v->counter)
  494. : "cc", "xer", "memory");
  495. return t1 != 0;
  496. }
  497. #endif /* __powerpc64__ */
  498. #endif /* __KERNEL__ */
  499. #endif /* _ASM_POWERPC_ATOMIC_H_ */