atomic.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. /*
  2. * arch/arm/include/asm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef __ASM_ARM_ATOMIC_H
  12. #define __ASM_ARM_ATOMIC_H
  13. #include <linux/compiler.h>
  14. #include <linux/prefetch.h>
  15. #include <linux/types.h>
  16. #include <linux/irqflags.h>
  17. #include <asm/barrier.h>
  18. #include <asm/cmpxchg.h>
  19. #define ATOMIC_INIT(i) { (i) }
  20. #ifdef __KERNEL__
  21. /*
  22. * On ARM, ordinary assignment (str instruction) doesn't clear the local
  23. * strex/ldrex monitor on some implementations. The reason we can use it for
  24. * atomic_set() is the clrex or dummy strex done on every exception return.
  25. */
  26. #define atomic_read(v) READ_ONCE((v)->counter)
  27. #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
  28. #if __LINUX_ARM_ARCH__ >= 6
  29. /*
  30. * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
  31. * store exclusive to ensure that these are atomic. We may loop
  32. * to ensure that the update happens.
  33. */
  34. #define ATOMIC_OP(op, c_op, asm_op) \
  35. static inline void atomic_##op(int i, atomic_t *v) \
  36. { \
  37. unsigned long tmp; \
  38. int result; \
  39. \
  40. prefetchw(&v->counter); \
  41. __asm__ __volatile__("@ atomic_" #op "\n" \
  42. "1: ldrex %0, [%3]\n" \
  43. " " #asm_op " %0, %0, %4\n" \
  44. " strex %1, %0, [%3]\n" \
  45. " teq %1, #0\n" \
  46. " bne 1b" \
  47. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  48. : "r" (&v->counter), "Ir" (i) \
  49. : "cc"); \
  50. } \
  51. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  52. static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
  53. { \
  54. unsigned long tmp; \
  55. int result; \
  56. \
  57. prefetchw(&v->counter); \
  58. \
  59. __asm__ __volatile__("@ atomic_" #op "_return\n" \
  60. "1: ldrex %0, [%3]\n" \
  61. " " #asm_op " %0, %0, %4\n" \
  62. " strex %1, %0, [%3]\n" \
  63. " teq %1, #0\n" \
  64. " bne 1b" \
  65. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  66. : "r" (&v->counter), "Ir" (i) \
  67. : "cc"); \
  68. \
  69. return result; \
  70. }
  71. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  72. static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
  73. { \
  74. unsigned long tmp; \
  75. int result, val; \
  76. \
  77. prefetchw(&v->counter); \
  78. \
  79. __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
  80. "1: ldrex %0, [%4]\n" \
  81. " " #asm_op " %1, %0, %5\n" \
  82. " strex %2, %1, [%4]\n" \
  83. " teq %2, #0\n" \
  84. " bne 1b" \
  85. : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
  86. : "r" (&v->counter), "Ir" (i) \
  87. : "cc"); \
  88. \
  89. return result; \
  90. }
  91. #define atomic_add_return_relaxed atomic_add_return_relaxed
  92. #define atomic_sub_return_relaxed atomic_sub_return_relaxed
  93. #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
  94. #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
  95. #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
  96. #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
  97. #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
  98. #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
  99. static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
  100. {
  101. int oldval;
  102. unsigned long res;
  103. prefetchw(&ptr->counter);
  104. do {
  105. __asm__ __volatile__("@ atomic_cmpxchg\n"
  106. "ldrex %1, [%3]\n"
  107. "mov %0, #0\n"
  108. "teq %1, %4\n"
  109. "strexeq %0, %5, [%3]\n"
  110. : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
  111. : "r" (&ptr->counter), "Ir" (old), "r" (new)
  112. : "cc");
  113. } while (res);
  114. return oldval;
  115. }
  116. #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
  117. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  118. {
  119. int oldval, newval;
  120. unsigned long tmp;
  121. smp_mb();
  122. prefetchw(&v->counter);
  123. __asm__ __volatile__ ("@ atomic_add_unless\n"
  124. "1: ldrex %0, [%4]\n"
  125. " teq %0, %5\n"
  126. " beq 2f\n"
  127. " add %1, %0, %6\n"
  128. " strex %2, %1, [%4]\n"
  129. " teq %2, #0\n"
  130. " bne 1b\n"
  131. "2:"
  132. : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
  133. : "r" (&v->counter), "r" (u), "r" (a)
  134. : "cc");
  135. if (oldval != u)
  136. smp_mb();
  137. return oldval;
  138. }
  139. #else /* ARM_ARCH_6 */
  140. #ifdef CONFIG_SMP
  141. #error SMP not supported on pre-ARMv6 CPUs
  142. #endif
  143. #define ATOMIC_OP(op, c_op, asm_op) \
  144. static inline void atomic_##op(int i, atomic_t *v) \
  145. { \
  146. unsigned long flags; \
  147. \
  148. raw_local_irq_save(flags); \
  149. v->counter c_op i; \
  150. raw_local_irq_restore(flags); \
  151. } \
  152. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  153. static inline int atomic_##op##_return(int i, atomic_t *v) \
  154. { \
  155. unsigned long flags; \
  156. int val; \
  157. \
  158. raw_local_irq_save(flags); \
  159. v->counter c_op i; \
  160. val = v->counter; \
  161. raw_local_irq_restore(flags); \
  162. \
  163. return val; \
  164. }
  165. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  166. static inline int atomic_fetch_##op(int i, atomic_t *v) \
  167. { \
  168. unsigned long flags; \
  169. int val; \
  170. \
  171. raw_local_irq_save(flags); \
  172. val = v->counter; \
  173. v->counter c_op i; \
  174. raw_local_irq_restore(flags); \
  175. \
  176. return val; \
  177. }
  178. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  179. {
  180. int ret;
  181. unsigned long flags;
  182. raw_local_irq_save(flags);
  183. ret = v->counter;
  184. if (likely(ret == old))
  185. v->counter = new;
  186. raw_local_irq_restore(flags);
  187. return ret;
  188. }
  189. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  190. {
  191. int c, old;
  192. c = atomic_read(v);
  193. while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
  194. c = old;
  195. return c;
  196. }
  197. #endif /* __LINUX_ARM_ARCH__ */
  198. #define ATOMIC_OPS(op, c_op, asm_op) \
  199. ATOMIC_OP(op, c_op, asm_op) \
  200. ATOMIC_OP_RETURN(op, c_op, asm_op) \
  201. ATOMIC_FETCH_OP(op, c_op, asm_op)
  202. ATOMIC_OPS(add, +=, add)
  203. ATOMIC_OPS(sub, -=, sub)
  204. #define atomic_andnot atomic_andnot
  205. #undef ATOMIC_OPS
  206. #define ATOMIC_OPS(op, c_op, asm_op) \
  207. ATOMIC_OP(op, c_op, asm_op) \
  208. ATOMIC_FETCH_OP(op, c_op, asm_op)
  209. ATOMIC_OPS(and, &=, and)
  210. ATOMIC_OPS(andnot, &= ~, bic)
  211. ATOMIC_OPS(or, |=, orr)
  212. ATOMIC_OPS(xor, ^=, eor)
  213. #undef ATOMIC_OPS
  214. #undef ATOMIC_FETCH_OP
  215. #undef ATOMIC_OP_RETURN
  216. #undef ATOMIC_OP
  217. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  218. #define atomic_inc(v) atomic_add(1, v)
  219. #define atomic_dec(v) atomic_sub(1, v)
  220. #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
  221. #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
  222. #define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
  223. #define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
  224. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
  225. #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
  226. #ifndef CONFIG_GENERIC_ATOMIC64
  227. typedef struct {
  228. long long counter;
  229. } atomic64_t;
  230. #define ATOMIC64_INIT(i) { (i) }
  231. #ifdef CONFIG_ARM_LPAE
  232. static inline long long atomic64_read(const atomic64_t *v)
  233. {
  234. long long result;
  235. __asm__ __volatile__("@ atomic64_read\n"
  236. " ldrd %0, %H0, [%1]"
  237. : "=&r" (result)
  238. : "r" (&v->counter), "Qo" (v->counter)
  239. );
  240. return result;
  241. }
  242. static inline void atomic64_set(atomic64_t *v, long long i)
  243. {
  244. __asm__ __volatile__("@ atomic64_set\n"
  245. " strd %2, %H2, [%1]"
  246. : "=Qo" (v->counter)
  247. : "r" (&v->counter), "r" (i)
  248. );
  249. }
  250. #else
  251. static inline long long atomic64_read(const atomic64_t *v)
  252. {
  253. long long result;
  254. __asm__ __volatile__("@ atomic64_read\n"
  255. " ldrexd %0, %H0, [%1]"
  256. : "=&r" (result)
  257. : "r" (&v->counter), "Qo" (v->counter)
  258. );
  259. return result;
  260. }
  261. static inline void atomic64_set(atomic64_t *v, long long i)
  262. {
  263. long long tmp;
  264. prefetchw(&v->counter);
  265. __asm__ __volatile__("@ atomic64_set\n"
  266. "1: ldrexd %0, %H0, [%2]\n"
  267. " strexd %0, %3, %H3, [%2]\n"
  268. " teq %0, #0\n"
  269. " bne 1b"
  270. : "=&r" (tmp), "=Qo" (v->counter)
  271. : "r" (&v->counter), "r" (i)
  272. : "cc");
  273. }
  274. #endif
  275. #define ATOMIC64_OP(op, op1, op2) \
  276. static inline void atomic64_##op(long long i, atomic64_t *v) \
  277. { \
  278. long long result; \
  279. unsigned long tmp; \
  280. \
  281. prefetchw(&v->counter); \
  282. __asm__ __volatile__("@ atomic64_" #op "\n" \
  283. "1: ldrexd %0, %H0, [%3]\n" \
  284. " " #op1 " %Q0, %Q0, %Q4\n" \
  285. " " #op2 " %R0, %R0, %R4\n" \
  286. " strexd %1, %0, %H0, [%3]\n" \
  287. " teq %1, #0\n" \
  288. " bne 1b" \
  289. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  290. : "r" (&v->counter), "r" (i) \
  291. : "cc"); \
  292. } \
  293. #define ATOMIC64_OP_RETURN(op, op1, op2) \
  294. static inline long long \
  295. atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
  296. { \
  297. long long result; \
  298. unsigned long tmp; \
  299. \
  300. prefetchw(&v->counter); \
  301. \
  302. __asm__ __volatile__("@ atomic64_" #op "_return\n" \
  303. "1: ldrexd %0, %H0, [%3]\n" \
  304. " " #op1 " %Q0, %Q0, %Q4\n" \
  305. " " #op2 " %R0, %R0, %R4\n" \
  306. " strexd %1, %0, %H0, [%3]\n" \
  307. " teq %1, #0\n" \
  308. " bne 1b" \
  309. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  310. : "r" (&v->counter), "r" (i) \
  311. : "cc"); \
  312. \
  313. return result; \
  314. }
  315. #define ATOMIC64_FETCH_OP(op, op1, op2) \
  316. static inline long long \
  317. atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
  318. { \
  319. long long result, val; \
  320. unsigned long tmp; \
  321. \
  322. prefetchw(&v->counter); \
  323. \
  324. __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
  325. "1: ldrexd %0, %H0, [%4]\n" \
  326. " " #op1 " %Q1, %Q0, %Q5\n" \
  327. " " #op2 " %R1, %R0, %R5\n" \
  328. " strexd %2, %1, %H1, [%4]\n" \
  329. " teq %2, #0\n" \
  330. " bne 1b" \
  331. : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
  332. : "r" (&v->counter), "r" (i) \
  333. : "cc"); \
  334. \
  335. return result; \
  336. }
  337. #define ATOMIC64_OPS(op, op1, op2) \
  338. ATOMIC64_OP(op, op1, op2) \
  339. ATOMIC64_OP_RETURN(op, op1, op2) \
  340. ATOMIC64_FETCH_OP(op, op1, op2)
  341. ATOMIC64_OPS(add, adds, adc)
  342. ATOMIC64_OPS(sub, subs, sbc)
  343. #define atomic64_add_return_relaxed atomic64_add_return_relaxed
  344. #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
  345. #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
  346. #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
  347. #undef ATOMIC64_OPS
  348. #define ATOMIC64_OPS(op, op1, op2) \
  349. ATOMIC64_OP(op, op1, op2) \
  350. ATOMIC64_FETCH_OP(op, op1, op2)
  351. #define atomic64_andnot atomic64_andnot
  352. ATOMIC64_OPS(and, and, and)
  353. ATOMIC64_OPS(andnot, bic, bic)
  354. ATOMIC64_OPS(or, orr, orr)
  355. ATOMIC64_OPS(xor, eor, eor)
  356. #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
  357. #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
  358. #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
  359. #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
  360. #undef ATOMIC64_OPS
  361. #undef ATOMIC64_FETCH_OP
  362. #undef ATOMIC64_OP_RETURN
  363. #undef ATOMIC64_OP
  364. static inline long long
  365. atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
  366. {
  367. long long oldval;
  368. unsigned long res;
  369. prefetchw(&ptr->counter);
  370. do {
  371. __asm__ __volatile__("@ atomic64_cmpxchg\n"
  372. "ldrexd %1, %H1, [%3]\n"
  373. "mov %0, #0\n"
  374. "teq %1, %4\n"
  375. "teqeq %H1, %H4\n"
  376. "strexdeq %0, %5, %H5, [%3]"
  377. : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
  378. : "r" (&ptr->counter), "r" (old), "r" (new)
  379. : "cc");
  380. } while (res);
  381. return oldval;
  382. }
  383. #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
  384. static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
  385. {
  386. long long result;
  387. unsigned long tmp;
  388. prefetchw(&ptr->counter);
  389. __asm__ __volatile__("@ atomic64_xchg\n"
  390. "1: ldrexd %0, %H0, [%3]\n"
  391. " strexd %1, %4, %H4, [%3]\n"
  392. " teq %1, #0\n"
  393. " bne 1b"
  394. : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
  395. : "r" (&ptr->counter), "r" (new)
  396. : "cc");
  397. return result;
  398. }
  399. #define atomic64_xchg_relaxed atomic64_xchg_relaxed
  400. static inline long long atomic64_dec_if_positive(atomic64_t *v)
  401. {
  402. long long result;
  403. unsigned long tmp;
  404. smp_mb();
  405. prefetchw(&v->counter);
  406. __asm__ __volatile__("@ atomic64_dec_if_positive\n"
  407. "1: ldrexd %0, %H0, [%3]\n"
  408. " subs %Q0, %Q0, #1\n"
  409. " sbc %R0, %R0, #0\n"
  410. " teq %R0, #0\n"
  411. " bmi 2f\n"
  412. " strexd %1, %0, %H0, [%3]\n"
  413. " teq %1, #0\n"
  414. " bne 1b\n"
  415. "2:"
  416. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
  417. : "r" (&v->counter)
  418. : "cc");
  419. smp_mb();
  420. return result;
  421. }
  422. static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
  423. {
  424. long long val;
  425. unsigned long tmp;
  426. int ret = 1;
  427. smp_mb();
  428. prefetchw(&v->counter);
  429. __asm__ __volatile__("@ atomic64_add_unless\n"
  430. "1: ldrexd %0, %H0, [%4]\n"
  431. " teq %0, %5\n"
  432. " teqeq %H0, %H5\n"
  433. " moveq %1, #0\n"
  434. " beq 2f\n"
  435. " adds %Q0, %Q0, %Q6\n"
  436. " adc %R0, %R0, %R6\n"
  437. " strexd %2, %0, %H0, [%4]\n"
  438. " teq %2, #0\n"
  439. " bne 1b\n"
  440. "2:"
  441. : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
  442. : "r" (&v->counter), "r" (u), "r" (a)
  443. : "cc");
  444. if (ret)
  445. smp_mb();
  446. return ret;
  447. }
  448. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  449. #define atomic64_inc(v) atomic64_add(1LL, (v))
  450. #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
  451. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  452. #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
  453. #define atomic64_dec(v) atomic64_sub(1LL, (v))
  454. #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
  455. #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
  456. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
  457. #endif /* !CONFIG_GENERIC_ATOMIC64 */
  458. #endif
  459. #endif