atomic.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. * Atomic operations for the Hexagon architecture
  3. *
  4. * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  5. *
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 and
  9. * only version 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  19. * 02110-1301, USA.
  20. */
  21. #ifndef _ASM_ATOMIC_H
  22. #define _ASM_ATOMIC_H
  23. #include <linux/types.h>
  24. #include <asm/cmpxchg.h>
  25. #include <asm/barrier.h>
  26. #define ATOMIC_INIT(i) { (i) }
  27. /* Normal writes in our arch don't clear lock reservations */
  28. static inline void atomic_set(atomic_t *v, int new)
  29. {
  30. asm volatile(
  31. "1: r6 = memw_locked(%0);\n"
  32. " memw_locked(%0,p0) = %1;\n"
  33. " if (!P0) jump 1b;\n"
  34. :
  35. : "r" (&v->counter), "r" (new)
  36. : "memory", "p0", "r6"
  37. );
  38. }
  39. /**
  40. * atomic_read - reads a word, atomically
  41. * @v: pointer to atomic value
  42. *
  43. * Assumes all word reads on our architecture are atomic.
  44. */
  45. #define atomic_read(v) READ_ONCE((v)->counter)
  46. /**
  47. * atomic_xchg - atomic
  48. * @v: pointer to memory to change
  49. * @new: new value (technically passed in a register -- see xchg)
  50. */
  51. #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
  52. /**
  53. * atomic_cmpxchg - atomic compare-and-exchange values
  54. * @v: pointer to value to change
  55. * @old: desired old value to match
  56. * @new: new value to put in
  57. *
  58. * Parameters are then pointer, value-in-register, value-in-register,
  59. * and the output is the old value.
  60. *
  61. * Apparently this is complicated for archs that don't support
  62. * the memw_locked like we do (or it's broken or whatever).
  63. *
  64. * Kind of the lynchpin of the rest of the generically defined routines.
  65. * Remember V2 had that bug with dotnew predicate set by memw_locked.
  66. *
  67. * "old" is "expected" old val, __oldval is actual old value
  68. */
  69. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  70. {
  71. int __oldval;
  72. asm volatile(
  73. "1: %0 = memw_locked(%1);\n"
  74. " { P0 = cmp.eq(%0,%2);\n"
  75. " if (!P0.new) jump:nt 2f; }\n"
  76. " memw_locked(%1,P0) = %3;\n"
  77. " if (!P0) jump 1b;\n"
  78. "2:\n"
  79. : "=&r" (__oldval)
  80. : "r" (&v->counter), "r" (old), "r" (new)
  81. : "memory", "p0"
  82. );
  83. return __oldval;
  84. }
  85. #define ATOMIC_OP(op) \
  86. static inline void atomic_##op(int i, atomic_t *v) \
  87. { \
  88. int output; \
  89. \
  90. __asm__ __volatile__ ( \
  91. "1: %0 = memw_locked(%1);\n" \
  92. " %0 = "#op "(%0,%2);\n" \
  93. " memw_locked(%1,P3)=%0;\n" \
  94. " if !P3 jump 1b;\n" \
  95. : "=&r" (output) \
  96. : "r" (&v->counter), "r" (i) \
  97. : "memory", "p3" \
  98. ); \
  99. } \
  100. #define ATOMIC_OP_RETURN(op) \
  101. static inline int atomic_##op##_return(int i, atomic_t *v) \
  102. { \
  103. int output; \
  104. \
  105. __asm__ __volatile__ ( \
  106. "1: %0 = memw_locked(%1);\n" \
  107. " %0 = "#op "(%0,%2);\n" \
  108. " memw_locked(%1,P3)=%0;\n" \
  109. " if !P3 jump 1b;\n" \
  110. : "=&r" (output) \
  111. : "r" (&v->counter), "r" (i) \
  112. : "memory", "p3" \
  113. ); \
  114. return output; \
  115. }
  116. #define ATOMIC_FETCH_OP(op) \
  117. static inline int atomic_fetch_##op(int i, atomic_t *v) \
  118. { \
  119. int output, val; \
  120. \
  121. __asm__ __volatile__ ( \
  122. "1: %0 = memw_locked(%2);\n" \
  123. " %1 = "#op "(%0,%3);\n" \
  124. " memw_locked(%2,P3)=%1;\n" \
  125. " if !P3 jump 1b;\n" \
  126. : "=&r" (output), "=&r" (val) \
  127. : "r" (&v->counter), "r" (i) \
  128. : "memory", "p3" \
  129. ); \
  130. return output; \
  131. }
  132. #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
  133. ATOMIC_OPS(add)
  134. ATOMIC_OPS(sub)
  135. #undef ATOMIC_OPS
  136. #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
  137. ATOMIC_OPS(and)
  138. ATOMIC_OPS(or)
  139. ATOMIC_OPS(xor)
  140. #undef ATOMIC_OPS
  141. #undef ATOMIC_FETCH_OP
  142. #undef ATOMIC_OP_RETURN
  143. #undef ATOMIC_OP
  144. /**
  145. * __atomic_add_unless - add unless the number is a given value
  146. * @v: pointer to value
  147. * @a: amount to add
  148. * @u: unless value is equal to u
  149. *
  150. * Returns old value.
  151. *
  152. */
  153. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  154. {
  155. int __oldval;
  156. register int tmp;
  157. asm volatile(
  158. "1: %0 = memw_locked(%2);"
  159. " {"
  160. " p3 = cmp.eq(%0, %4);"
  161. " if (p3.new) jump:nt 2f;"
  162. " %1 = add(%0, %3);"
  163. " }"
  164. " memw_locked(%2, p3) = %1;"
  165. " {"
  166. " if !p3 jump 1b;"
  167. " }"
  168. "2:"
  169. : "=&r" (__oldval), "=&r" (tmp)
  170. : "r" (v), "r" (a), "r" (u)
  171. : "memory", "p3"
  172. );
  173. return __oldval;
  174. }
  175. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  176. #define atomic_inc(v) atomic_add(1, (v))
  177. #define atomic_dec(v) atomic_sub(1, (v))
  178. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  179. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  180. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
  181. #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
  182. #define atomic_inc_return(v) (atomic_add_return(1, v))
  183. #define atomic_dec_return(v) (atomic_sub_return(1, v))
  184. #endif