bitops.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. #ifndef _ALPHA_BITOPS_H
  2. #define _ALPHA_BITOPS_H
  3. #ifndef _LINUX_BITOPS_H
  4. #error only <linux/bitops.h> can be included directly
  5. #endif
  6. #include <asm/compiler.h>
  7. #include <asm/barrier.h>
  8. /*
  9. * Copyright 1994, Linus Torvalds.
  10. */
  11. /*
  12. * These have to be done with inline assembly: that way the bit-setting
  13. * is guaranteed to be atomic. All bit operations return 0 if the bit
  14. * was cleared before the operation and != 0 if it was not.
  15. *
  16. * To get proper branch prediction for the main line, we must branch
  17. * forward to code at the end of this object's .text section, then
  18. * branch back to restart the operation.
  19. *
  20. * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
  21. */
  22. static inline void
  23. set_bit(unsigned long nr, volatile void * addr)
  24. {
  25. unsigned long temp;
  26. int *m = ((int *) addr) + (nr >> 5);
  27. __asm__ __volatile__(
  28. "1: ldl_l %0,%3\n"
  29. " bis %0,%2,%0\n"
  30. " stl_c %0,%1\n"
  31. " beq %0,2f\n"
  32. ".subsection 2\n"
  33. "2: br 1b\n"
  34. ".previous"
  35. :"=&r" (temp), "=m" (*m)
  36. :"Ir" (1UL << (nr & 31)), "m" (*m));
  37. }
  38. /*
  39. * WARNING: non atomic version.
  40. */
  41. static inline void
  42. __set_bit(unsigned long nr, volatile void * addr)
  43. {
  44. int *m = ((int *) addr) + (nr >> 5);
  45. *m |= 1 << (nr & 31);
  46. }
  47. static inline void
  48. clear_bit(unsigned long nr, volatile void * addr)
  49. {
  50. unsigned long temp;
  51. int *m = ((int *) addr) + (nr >> 5);
  52. __asm__ __volatile__(
  53. "1: ldl_l %0,%3\n"
  54. " bic %0,%2,%0\n"
  55. " stl_c %0,%1\n"
  56. " beq %0,2f\n"
  57. ".subsection 2\n"
  58. "2: br 1b\n"
  59. ".previous"
  60. :"=&r" (temp), "=m" (*m)
  61. :"Ir" (1UL << (nr & 31)), "m" (*m));
  62. }
  63. static inline void
  64. clear_bit_unlock(unsigned long nr, volatile void * addr)
  65. {
  66. smp_mb();
  67. clear_bit(nr, addr);
  68. }
  69. /*
  70. * WARNING: non atomic version.
  71. */
  72. static __inline__ void
  73. __clear_bit(unsigned long nr, volatile void * addr)
  74. {
  75. int *m = ((int *) addr) + (nr >> 5);
  76. *m &= ~(1 << (nr & 31));
  77. }
  78. static inline void
  79. __clear_bit_unlock(unsigned long nr, volatile void * addr)
  80. {
  81. smp_mb();
  82. __clear_bit(nr, addr);
  83. }
  84. static inline void
  85. change_bit(unsigned long nr, volatile void * addr)
  86. {
  87. unsigned long temp;
  88. int *m = ((int *) addr) + (nr >> 5);
  89. __asm__ __volatile__(
  90. "1: ldl_l %0,%3\n"
  91. " xor %0,%2,%0\n"
  92. " stl_c %0,%1\n"
  93. " beq %0,2f\n"
  94. ".subsection 2\n"
  95. "2: br 1b\n"
  96. ".previous"
  97. :"=&r" (temp), "=m" (*m)
  98. :"Ir" (1UL << (nr & 31)), "m" (*m));
  99. }
  100. /*
  101. * WARNING: non atomic version.
  102. */
  103. static __inline__ void
  104. __change_bit(unsigned long nr, volatile void * addr)
  105. {
  106. int *m = ((int *) addr) + (nr >> 5);
  107. *m ^= 1 << (nr & 31);
  108. }
  109. static inline int
  110. test_and_set_bit(unsigned long nr, volatile void *addr)
  111. {
  112. unsigned long oldbit;
  113. unsigned long temp;
  114. int *m = ((int *) addr) + (nr >> 5);
  115. __asm__ __volatile__(
  116. #ifdef CONFIG_SMP
  117. " mb\n"
  118. #endif
  119. "1: ldl_l %0,%4\n"
  120. " and %0,%3,%2\n"
  121. " bne %2,2f\n"
  122. " xor %0,%3,%0\n"
  123. " stl_c %0,%1\n"
  124. " beq %0,3f\n"
  125. "2:\n"
  126. #ifdef CONFIG_SMP
  127. " mb\n"
  128. #endif
  129. ".subsection 2\n"
  130. "3: br 1b\n"
  131. ".previous"
  132. :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  133. :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  134. return oldbit != 0;
  135. }
  136. static inline int
  137. test_and_set_bit_lock(unsigned long nr, volatile void *addr)
  138. {
  139. unsigned long oldbit;
  140. unsigned long temp;
  141. int *m = ((int *) addr) + (nr >> 5);
  142. __asm__ __volatile__(
  143. "1: ldl_l %0,%4\n"
  144. " and %0,%3,%2\n"
  145. " bne %2,2f\n"
  146. " xor %0,%3,%0\n"
  147. " stl_c %0,%1\n"
  148. " beq %0,3f\n"
  149. "2:\n"
  150. #ifdef CONFIG_SMP
  151. " mb\n"
  152. #endif
  153. ".subsection 2\n"
  154. "3: br 1b\n"
  155. ".previous"
  156. :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  157. :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  158. return oldbit != 0;
  159. }
  160. /*
  161. * WARNING: non atomic version.
  162. */
  163. static inline int
  164. __test_and_set_bit(unsigned long nr, volatile void * addr)
  165. {
  166. unsigned long mask = 1 << (nr & 0x1f);
  167. int *m = ((int *) addr) + (nr >> 5);
  168. int old = *m;
  169. *m = old | mask;
  170. return (old & mask) != 0;
  171. }
  172. static inline int
  173. test_and_clear_bit(unsigned long nr, volatile void * addr)
  174. {
  175. unsigned long oldbit;
  176. unsigned long temp;
  177. int *m = ((int *) addr) + (nr >> 5);
  178. __asm__ __volatile__(
  179. #ifdef CONFIG_SMP
  180. " mb\n"
  181. #endif
  182. "1: ldl_l %0,%4\n"
  183. " and %0,%3,%2\n"
  184. " beq %2,2f\n"
  185. " xor %0,%3,%0\n"
  186. " stl_c %0,%1\n"
  187. " beq %0,3f\n"
  188. "2:\n"
  189. #ifdef CONFIG_SMP
  190. " mb\n"
  191. #endif
  192. ".subsection 2\n"
  193. "3: br 1b\n"
  194. ".previous"
  195. :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  196. :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  197. return oldbit != 0;
  198. }
  199. /*
  200. * WARNING: non atomic version.
  201. */
  202. static inline int
  203. __test_and_clear_bit(unsigned long nr, volatile void * addr)
  204. {
  205. unsigned long mask = 1 << (nr & 0x1f);
  206. int *m = ((int *) addr) + (nr >> 5);
  207. int old = *m;
  208. *m = old & ~mask;
  209. return (old & mask) != 0;
  210. }
  211. static inline int
  212. test_and_change_bit(unsigned long nr, volatile void * addr)
  213. {
  214. unsigned long oldbit;
  215. unsigned long temp;
  216. int *m = ((int *) addr) + (nr >> 5);
  217. __asm__ __volatile__(
  218. #ifdef CONFIG_SMP
  219. " mb\n"
  220. #endif
  221. "1: ldl_l %0,%4\n"
  222. " and %0,%3,%2\n"
  223. " xor %0,%3,%0\n"
  224. " stl_c %0,%1\n"
  225. " beq %0,3f\n"
  226. #ifdef CONFIG_SMP
  227. " mb\n"
  228. #endif
  229. ".subsection 2\n"
  230. "3: br 1b\n"
  231. ".previous"
  232. :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  233. :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  234. return oldbit != 0;
  235. }
  236. /*
  237. * WARNING: non atomic version.
  238. */
  239. static __inline__ int
  240. __test_and_change_bit(unsigned long nr, volatile void * addr)
  241. {
  242. unsigned long mask = 1 << (nr & 0x1f);
  243. int *m = ((int *) addr) + (nr >> 5);
  244. int old = *m;
  245. *m = old ^ mask;
  246. return (old & mask) != 0;
  247. }
  248. static inline int
  249. test_bit(int nr, const volatile void * addr)
  250. {
  251. return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
  252. }
  253. /*
  254. * ffz = Find First Zero in word. Undefined if no zero exists,
  255. * so code should check against ~0UL first..
  256. *
  257. * Do a binary search on the bits. Due to the nature of large
  258. * constants on the alpha, it is worthwhile to split the search.
  259. */
  260. static inline unsigned long ffz_b(unsigned long x)
  261. {
  262. unsigned long sum, x1, x2, x4;
  263. x = ~x & -~x; /* set first 0 bit, clear others */
  264. x1 = x & 0xAA;
  265. x2 = x & 0xCC;
  266. x4 = x & 0xF0;
  267. sum = x2 ? 2 : 0;
  268. sum += (x4 != 0) * 4;
  269. sum += (x1 != 0);
  270. return sum;
  271. }
  272. static inline unsigned long ffz(unsigned long word)
  273. {
  274. #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
  275. /* Whee. EV67 can calculate it directly. */
  276. return __kernel_cttz(~word);
  277. #else
  278. unsigned long bits, qofs, bofs;
  279. bits = __kernel_cmpbge(word, ~0UL);
  280. qofs = ffz_b(bits);
  281. bits = __kernel_extbl(word, qofs);
  282. bofs = ffz_b(bits);
  283. return qofs*8 + bofs;
  284. #endif
  285. }
  286. /*
  287. * __ffs = Find First set bit in word. Undefined if no set bit exists.
  288. */
  289. static inline unsigned long __ffs(unsigned long word)
  290. {
  291. #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
  292. /* Whee. EV67 can calculate it directly. */
  293. return __kernel_cttz(word);
  294. #else
  295. unsigned long bits, qofs, bofs;
  296. bits = __kernel_cmpbge(0, word);
  297. qofs = ffz_b(bits);
  298. bits = __kernel_extbl(word, qofs);
  299. bofs = ffz_b(~bits);
  300. return qofs*8 + bofs;
  301. #endif
  302. }
  303. #ifdef __KERNEL__
  304. /*
  305. * ffs: find first bit set. This is defined the same way as
  306. * the libc and compiler builtin ffs routines, therefore
  307. * differs in spirit from the above __ffs.
  308. */
  309. static inline int ffs(int word)
  310. {
  311. int result = __ffs(word) + 1;
  312. return word ? result : 0;
  313. }
  314. /*
  315. * fls: find last bit set.
  316. */
  317. #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
  318. static inline int fls64(unsigned long word)
  319. {
  320. return 64 - __kernel_ctlz(word);
  321. }
  322. #else
  323. extern const unsigned char __flsm1_tab[256];
  324. static inline int fls64(unsigned long x)
  325. {
  326. unsigned long t, a, r;
  327. t = __kernel_cmpbge (x, 0x0101010101010101UL);
  328. a = __flsm1_tab[t];
  329. t = __kernel_extbl (x, a);
  330. r = a*8 + __flsm1_tab[t] + (x != 0);
  331. return r;
  332. }
  333. #endif
  334. static inline unsigned long __fls(unsigned long x)
  335. {
  336. return fls64(x) - 1;
  337. }
  338. static inline int fls(int x)
  339. {
  340. return fls64((unsigned int) x);
  341. }
  342. /*
  343. * hweightN: returns the hamming weight (i.e. the number
  344. * of bits set) of a N-bit word
  345. */
  346. #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
  347. /* Whee. EV67 can calculate it directly. */
  348. static inline unsigned long __arch_hweight64(unsigned long w)
  349. {
  350. return __kernel_ctpop(w);
  351. }
  352. static inline unsigned int __arch_hweight32(unsigned int w)
  353. {
  354. return __arch_hweight64(w);
  355. }
  356. static inline unsigned int __arch_hweight16(unsigned int w)
  357. {
  358. return __arch_hweight64(w & 0xffff);
  359. }
  360. static inline unsigned int __arch_hweight8(unsigned int w)
  361. {
  362. return __arch_hweight64(w & 0xff);
  363. }
  364. #else
  365. #include <asm-generic/bitops/arch_hweight.h>
  366. #endif
  367. #include <asm-generic/bitops/const_hweight.h>
  368. #endif /* __KERNEL__ */
  369. #include <asm-generic/bitops/find.h>
  370. #ifdef __KERNEL__
  371. /*
  372. * Every architecture must define this function. It's the fastest
  373. * way of searching a 100-bit bitmap. It's guaranteed that at least
  374. * one of the 100 bits is cleared.
  375. */
  376. static inline unsigned long
  377. sched_find_first_bit(const unsigned long b[2])
  378. {
  379. unsigned long b0, b1, ofs, tmp;
  380. b0 = b[0];
  381. b1 = b[1];
  382. ofs = (b0 ? 0 : 64);
  383. tmp = (b0 ? b0 : b1);
  384. return __ffs(tmp) + ofs;
  385. }
  386. #include <asm-generic/bitops/le.h>
  387. #include <asm-generic/bitops/ext2-atomic-setbit.h>
  388. #endif /* __KERNEL__ */
  389. #endif /* _ALPHA_BITOPS_H */