bitops.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /*
  2. * Copyright (C) 2012 Regents of the University of California
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #ifndef _ASM_RISCV_BITOPS_H
  14. #define _ASM_RISCV_BITOPS_H
  15. #ifndef _LINUX_BITOPS_H
  16. #error "Only <linux/bitops.h> can be included directly"
  17. #endif /* _LINUX_BITOPS_H */
  18. #include <linux/compiler.h>
  19. #include <linux/irqflags.h>
  20. #include <asm/barrier.h>
  21. #include <asm/bitsperlong.h>
  22. #ifndef smp_mb__before_clear_bit
  23. #define smp_mb__before_clear_bit() smp_mb()
  24. #define smp_mb__after_clear_bit() smp_mb()
  25. #endif /* smp_mb__before_clear_bit */
  26. #include <asm-generic/bitops/__ffs.h>
  27. #include <asm-generic/bitops/ffz.h>
  28. #include <asm-generic/bitops/fls.h>
  29. #include <asm-generic/bitops/__fls.h>
  30. #include <asm-generic/bitops/fls64.h>
  31. #include <asm-generic/bitops/find.h>
  32. #include <asm-generic/bitops/sched.h>
  33. #include <asm-generic/bitops/ffs.h>
  34. #include <asm-generic/bitops/hweight.h>
  35. #if (BITS_PER_LONG == 64)
  36. #define __AMO(op) "amo" #op ".d"
  37. #elif (BITS_PER_LONG == 32)
  38. #define __AMO(op) "amo" #op ".w"
  39. #else
  40. #error "Unexpected BITS_PER_LONG"
  41. #endif
  42. #define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
  43. ({ \
  44. unsigned long __res, __mask; \
  45. __mask = BIT_MASK(nr); \
  46. __asm__ __volatile__ ( \
  47. __AMO(op) #ord " %0, %2, %1" \
  48. : "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \
  49. : "r" (mod(__mask)) \
  50. : "memory"); \
  51. ((__res & __mask) != 0); \
  52. })
  53. #define __op_bit_ord(op, mod, nr, addr, ord) \
  54. __asm__ __volatile__ ( \
  55. __AMO(op) #ord " zero, %1, %0" \
  56. : "+A" (addr[BIT_WORD(nr)]) \
  57. : "r" (mod(BIT_MASK(nr))) \
  58. : "memory");
  59. #define __test_and_op_bit(op, mod, nr, addr) \
  60. __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
  61. #define __op_bit(op, mod, nr, addr) \
  62. __op_bit_ord(op, mod, nr, addr, )
  63. /* Bitmask modifiers */
  64. #define __NOP(x) (x)
  65. #define __NOT(x) (~(x))
  66. /**
  67. * test_and_set_bit - Set a bit and return its old value
  68. * @nr: Bit to set
  69. * @addr: Address to count from
  70. *
  71. * This operation may be reordered on other architectures than x86.
  72. */
  73. static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
  74. {
  75. return __test_and_op_bit(or, __NOP, nr, addr);
  76. }
  77. /**
  78. * test_and_clear_bit - Clear a bit and return its old value
  79. * @nr: Bit to clear
  80. * @addr: Address to count from
  81. *
  82. * This operation can be reordered on other architectures other than x86.
  83. */
  84. static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
  85. {
  86. return __test_and_op_bit(and, __NOT, nr, addr);
  87. }
  88. /**
  89. * test_and_change_bit - Change a bit and return its old value
  90. * @nr: Bit to change
  91. * @addr: Address to count from
  92. *
  93. * This operation is atomic and cannot be reordered.
  94. * It also implies a memory barrier.
  95. */
  96. static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
  97. {
  98. return __test_and_op_bit(xor, __NOP, nr, addr);
  99. }
  100. /**
  101. * set_bit - Atomically set a bit in memory
  102. * @nr: the bit to set
  103. * @addr: the address to start counting from
  104. *
  105. * Note: there are no guarantees that this function will not be reordered
  106. * on non x86 architectures, so if you are writing portable code,
  107. * make sure not to rely on its reordering guarantees.
  108. *
  109. * Note that @nr may be almost arbitrarily large; this function is not
  110. * restricted to acting on a single-word quantity.
  111. */
  112. static inline void set_bit(int nr, volatile unsigned long *addr)
  113. {
  114. __op_bit(or, __NOP, nr, addr);
  115. }
  116. /**
  117. * clear_bit - Clears a bit in memory
  118. * @nr: Bit to clear
  119. * @addr: Address to start counting from
  120. *
  121. * Note: there are no guarantees that this function will not be reordered
  122. * on non x86 architectures, so if you are writing portable code,
  123. * make sure not to rely on its reordering guarantees.
  124. */
  125. static inline void clear_bit(int nr, volatile unsigned long *addr)
  126. {
  127. __op_bit(and, __NOT, nr, addr);
  128. }
  129. /**
  130. * change_bit - Toggle a bit in memory
  131. * @nr: Bit to change
  132. * @addr: Address to start counting from
  133. *
  134. * change_bit() may be reordered on other architectures than x86.
  135. * Note that @nr may be almost arbitrarily large; this function is not
  136. * restricted to acting on a single-word quantity.
  137. */
  138. static inline void change_bit(int nr, volatile unsigned long *addr)
  139. {
  140. __op_bit(xor, __NOP, nr, addr);
  141. }
  142. /**
  143. * test_and_set_bit_lock - Set a bit and return its old value, for lock
  144. * @nr: Bit to set
  145. * @addr: Address to count from
  146. *
  147. * This operation is atomic and provides acquire barrier semantics.
  148. * It can be used to implement bit locks.
  149. */
  150. static inline int test_and_set_bit_lock(
  151. unsigned long nr, volatile unsigned long *addr)
  152. {
  153. return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
  154. }
  155. /**
  156. * clear_bit_unlock - Clear a bit in memory, for unlock
  157. * @nr: the bit to set
  158. * @addr: the address to start counting from
  159. *
  160. * This operation is atomic and provides release barrier semantics.
  161. */
  162. static inline void clear_bit_unlock(
  163. unsigned long nr, volatile unsigned long *addr)
  164. {
  165. __op_bit_ord(and, __NOT, nr, addr, .rl);
  166. }
  167. /**
  168. * __clear_bit_unlock - Clear a bit in memory, for unlock
  169. * @nr: the bit to set
  170. * @addr: the address to start counting from
  171. *
  172. * This operation is like clear_bit_unlock, however it is not atomic.
  173. * It does provide release barrier semantics so it can be used to unlock
  174. * a bit lock, however it would only be used if no other CPU can modify
  175. * any bits in the memory until the lock is released (a good example is
  176. * if the bit lock itself protects access to the other bits in the word).
  177. *
  178. * On RISC-V systems there seems to be no benefit to taking advantage of the
  179. * non-atomic property here: it's a lot more instructions and we still have to
  180. * provide release semantics anyway.
  181. */
  182. static inline void __clear_bit_unlock(
  183. unsigned long nr, volatile unsigned long *addr)
  184. {
  185. clear_bit_unlock(nr, addr);
  186. }
  187. #undef __test_and_op_bit
  188. #undef __op_bit
  189. #undef __NOP
  190. #undef __NOT
  191. #undef __AMO
  192. #include <asm-generic/bitops/non-atomic.h>
  193. #include <asm-generic/bitops/le.h>
  194. #include <asm-generic/bitops/ext2-atomic.h>
  195. #endif /* _ASM_RISCV_BITOPS_H */