atomic.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /*
  2. * Copyright (c) 2018 Richard Braun.
  3. * Copyright (c) 2017-2018 Agustina Arzille.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. *
  19. * Type-generic memory-model-aware atomic operations.
  20. *
  21. * For portability reasons, this interface restricts atomic operation
  22. * sizes to 32-bit and 64-bit.
  23. *
  24. * Some configurations may not support 64-bit operations. Check if the
  25. * ATOMIC_HAVE_64B_OPS macro is defined to find out.
  26. *
  27. * TODO Replace mentions of "memory barriers" throughout the code with
  28. * C11 memory model terminology.
  29. */
  30. #ifndef KERN_ATOMIC_H
  31. #define KERN_ATOMIC_H
  32. #if !defined(CONFIG_SMP) && defined(CONFIG_LATOMIC_REPLACE_ATOMIC_ON_UP)
  33. #define ATOMIC_USE_LATOMIC
  34. #endif
  35. #ifdef ATOMIC_USE_LATOMIC
  36. #include <kern/latomic.h>
  37. /*
  38. * Local atomic operations always provide 64-bit support, by using the
  39. * generic versions which disable interrupts as a last resort.
  40. */
  41. #define ATOMIC_HAVE_64B_OPS
  42. /*
  43. * Memory orders.
  44. *
  45. * XXX Consume ordering is currently aliased to acquire.
  46. */
  47. #define ATOMIC_RELAXED LATOMIC_RELAXED
  48. #define ATOMIC_CONSUME LATOMIC_ACQUIRE
  49. #define ATOMIC_ACQUIRE LATOMIC_ACQUIRE
  50. #define ATOMIC_RELEASE LATOMIC_RELEASE
  51. #define ATOMIC_ACQ_REL LATOMIC_ACQ_REL
  52. #define ATOMIC_SEQ_CST LATOMIC_SEQ_CST
  53. #define atomic_load latomic_load
  54. #define atomic_store latomic_store
  55. #define atomic_swap latomic_swap
  56. #define atomic_cas latomic_cas
  57. #define atomic_fetch_add latomic_fetch_add
  58. #define atomic_fetch_sub latomic_fetch_sub
  59. #define atomic_fetch_and latomic_fetch_and
  60. #define atomic_fetch_or latomic_fetch_or
  61. #define atomic_fetch_xor latomic_fetch_xor
  62. #define atomic_add latomic_add
  63. #define atomic_sub latomic_sub
  64. #define atomic_and latomic_and
  65. #define atomic_or latomic_or
  66. #define atomic_xor latomic_xor
  67. #define atomic_fence latomic_fence
  68. #else /* ATOMIC_USE_LATOMIC */
  69. #include <assert.h>
  70. #include <stdbool.h>
  71. #include <kern/macros.h>
  72. /*
  73. * Supported memory orders.
  74. */
  75. #define ATOMIC_RELAXED __ATOMIC_RELAXED
  76. #define ATOMIC_CONSUME __ATOMIC_CONSUME
  77. #define ATOMIC_ACQUIRE __ATOMIC_ACQUIRE
  78. #define ATOMIC_RELEASE __ATOMIC_RELEASE
  79. #define ATOMIC_ACQ_REL __ATOMIC_ACQ_REL
  80. #define ATOMIC_SEQ_CST __ATOMIC_SEQ_CST
  81. #include <kern/atomic_i.h>
  82. #define atomic_load(ptr, memorder) \
  83. MACRO_BEGIN \
  84. assert(atomic_ptr_aligned(ptr)); \
  85. ((typeof(*(ptr)))atomic_select(ptr, load)(ptr, memorder)); \
  86. MACRO_END
  87. #define atomic_store(ptr, val, memorder) \
  88. MACRO_BEGIN \
  89. assert(atomic_ptr_aligned(ptr)); \
  90. atomic_select(ptr, store)(ptr, val, memorder); \
  91. MACRO_END
  92. /*
  93. * For compare-and-swap, deviate a little from the standard, and only
  94. * return the value before the comparison, leaving it up to the user to
  95. * determine whether the swap was actually performed or not.
  96. *
  97. * Also, note that the memory order in case of failure is relaxed. This is
  98. * because atomic CAS is typically used in a loop. However, if a different
  99. * code path is taken on failure (rather than retrying), then the user
  100. * should be aware that a memory fence might be necessary.
  101. */
  102. #define atomic_cas(ptr, oval, nval, memorder) \
  103. MACRO_BEGIN \
  104. assert(atomic_ptr_aligned(ptr)); \
  105. ((typeof(*(ptr)))atomic_select(ptr, cas)(ptr, oval, nval, memorder)); \
  106. MACRO_END
  107. #define atomic_swap(ptr, val, memorder) \
  108. MACRO_BEGIN \
  109. assert(atomic_ptr_aligned(ptr)); \
  110. ((typeof(*(ptr)))atomic_select(ptr, swap)(ptr, val, memorder)); \
  111. MACRO_END
  112. #define atomic_fetch_add(ptr, val, memorder) \
  113. MACRO_BEGIN \
  114. assert(atomic_ptr_aligned(ptr)); \
  115. ((typeof(*(ptr)))atomic_select(ptr, fetch_add)(ptr, val, memorder)); \
  116. MACRO_END
  117. #define atomic_fetch_sub(ptr, val, memorder) \
  118. MACRO_BEGIN \
  119. assert(atomic_ptr_aligned(ptr)); \
  120. ((typeof(*(ptr)))atomic_select(ptr, fetch_sub)(ptr, val, memorder)); \
  121. MACRO_END
  122. #define atomic_fetch_and(ptr, val, memorder) \
  123. MACRO_BEGIN \
  124. assert(atomic_ptr_aligned(ptr)); \
  125. ((typeof(*(ptr)))atomic_select(ptr, fetch_and)(ptr, val, memorder)); \
  126. MACRO_END
  127. #define atomic_fetch_or(ptr, val, memorder) \
  128. MACRO_BEGIN \
  129. assert(atomic_ptr_aligned(ptr)); \
  130. ((typeof(*(ptr)))atomic_select(ptr, fetch_or)(ptr, val, memorder)); \
  131. MACRO_END
  132. #define atomic_fetch_xor(ptr, val, memorder) \
  133. MACRO_BEGIN \
  134. assert(atomic_ptr_aligned(ptr)); \
  135. ((typeof(*(ptr)))atomic_select(ptr, fetch_xor)(ptr, val, memorder)); \
  136. MACRO_END
  137. #define atomic_add(ptr, val, memorder) \
  138. MACRO_BEGIN \
  139. assert(atomic_ptr_aligned(ptr)); \
  140. atomic_select(ptr, add)(ptr, val, memorder); \
  141. MACRO_END
  142. #define atomic_sub(ptr, val, memorder) \
  143. MACRO_BEGIN \
  144. assert(atomic_ptr_aligned(ptr)); \
  145. atomic_select(ptr, sub)(ptr, val, memorder); \
  146. MACRO_END
  147. #define atomic_and(ptr, val, memorder) \
  148. MACRO_BEGIN \
  149. assert(atomic_ptr_aligned(ptr)); \
  150. atomic_select(ptr, and)(ptr, val, memorder); \
  151. MACRO_END
  152. #define atomic_or(ptr, val, memorder) \
  153. MACRO_BEGIN \
  154. assert(atomic_ptr_aligned(ptr)); \
  155. atomic_select(ptr, or)(ptr, val, memorder); \
  156. MACRO_END
  157. #define atomic_xor(ptr, val, memorder) \
  158. MACRO_BEGIN \
  159. assert(atomic_ptr_aligned(ptr)); \
  160. atomic_select(ptr, xor)(ptr, val, memorder); \
  161. MACRO_END
  162. #define atomic_fence(memorder) __atomic_thread_fence(memorder)
  163. #endif /* ATOMIC_USE_LATOMIC */
  164. #endif /* KERN_ATOMIC_H */