bitops.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465
  1. #ifndef _ALPHA_BITOPS_H
  2. #define _ALPHA_BITOPS_H
  3. #ifndef _LINUX_BITOPS_H
  4. #error only <linux/bitops.h> can be included directly
  5. #endif
  6. #include <asm/compiler.h>
  7. #include <asm/barrier.h>
  8. /*
  9. * Copyright 1994, Linus Torvalds.
  10. */
  11. /*
  12. * These have to be done with inline assembly: that way the bit-setting
  13. * is guaranteed to be atomic. All bit operations return 0 if the bit
  14. * was cleared before the operation and != 0 if it was not.
  15. *
  16. * To get proper branch prediction for the main line, we must branch
  17. * forward to code at the end of this object's .text section, then
  18. * branch back to restart the operation.
  19. *
  20. * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
  21. */
  22. static inline void
  23. set_bit(unsigned long nr, volatile void * addr)
  24. {
  25. unsigned long temp;
  26. int *m = ((int *) addr) + (nr >> 5);
  27. __asm__ __volatile__(
  28. "1: ldl_l %0,%3\n"
  29. " bis %0,%2,%0\n"
  30. " stl_c %0,%1\n"
  31. " beq %0,2f\n"
  32. ".subsection 2\n"
  33. "2: br 1b\n"
  34. ".previous"
  35. :"=&r" (temp), "=m" (*m)
  36. :"Ir" (1UL << (nr & 31)), "m" (*m));
  37. }
  38. /*
  39. * WARNING: non atomic version.
  40. */
  41. static inline void
  42. __set_bit(unsigned long nr, volatile void * addr)
  43. {
  44. int *m = ((int *) addr) + (nr >> 5);
  45. *m |= 1 << (nr & 31);
  46. }
  47. #define smp_mb__before_clear_bit() smp_mb()
  48. #define smp_mb__after_clear_bit() smp_mb()
  49. static inline void
  50. clear_bit(unsigned long nr, volatile void * addr)
  51. {
  52. unsigned long temp;
  53. int *m = ((int *) addr) + (nr >> 5);
  54. __asm__ __volatile__(
  55. "1: ldl_l %0,%3\n"
  56. " bic %0,%2,%0\n"
  57. " stl_c %0,%1\n"
  58. " beq %0,2f\n"
  59. ".subsection 2\n"
  60. "2: br 1b\n"
  61. ".previous"
  62. :"=&r" (temp), "=m" (*m)
  63. :"Ir" (1UL << (nr & 31)), "m" (*m));
  64. }
  65. static inline void
  66. clear_bit_unlock(unsigned long nr, volatile void * addr)
  67. {
  68. smp_mb();
  69. clear_bit(nr, addr);
  70. }
  71. /*
  72. * WARNING: non atomic version.
  73. */
  74. static __inline__ void
  75. __clear_bit(unsigned long nr, volatile void * addr)
  76. {
  77. int *m = ((int *) addr) + (nr >> 5);
  78. *m &= ~(1 << (nr & 31));
  79. }
  80. static inline void
  81. __clear_bit_unlock(unsigned long nr, volatile void * addr)
  82. {
  83. smp_mb();
  84. __clear_bit(nr, addr);
  85. }
  86. static inline void
  87. change_bit(unsigned long nr, volatile void * addr)
  88. {
  89. unsigned long temp;
  90. int *m = ((int *) addr) + (nr >> 5);
  91. __asm__ __volatile__(
  92. "1: ldl_l %0,%3\n"
  93. " xor %0,%2,%0\n"
  94. " stl_c %0,%1\n"
  95. " beq %0,2f\n"
  96. ".subsection 2\n"
  97. "2: br 1b\n"
  98. ".previous"
  99. :"=&r" (temp), "=m" (*m)
  100. :"Ir" (1UL << (nr & 31)), "m" (*m));
  101. }
  102. /*
  103. * WARNING: non atomic version.
  104. */
  105. static __inline__ void
  106. __change_bit(unsigned long nr, volatile void * addr)
  107. {
  108. int *m = ((int *) addr) + (nr >> 5);
  109. *m ^= 1 << (nr & 31);
  110. }
  111. static inline int
  112. test_and_set_bit(unsigned long nr, volatile void *addr)
  113. {
  114. unsigned long oldbit;
  115. unsigned long temp;
  116. int *m = ((int *) addr) + (nr >> 5);
  117. __asm__ __volatile__(
  118. #ifdef CONFIG_SMP
  119. " mb\n"
  120. #endif
  121. "1: ldl_l %0,%4\n"
  122. " and %0,%3,%2\n"
  123. " bne %2,2f\n"
  124. " xor %0,%3,%0\n"
  125. " stl_c %0,%1\n"
  126. " beq %0,3f\n"
  127. "2:\n"
  128. #ifdef CONFIG_SMP
  129. " mb\n"
  130. #endif
  131. ".subsection 2\n"
  132. "3: br 1b\n"
  133. ".previous"
  134. :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  135. :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  136. return oldbit != 0;
  137. }
  138. static inline int
  139. test_and_set_bit_lock(unsigned long nr, volatile void *addr)
  140. {
  141. unsigned long oldbit;
  142. unsigned long temp;
  143. int *m = ((int *) addr) + (nr >> 5);
  144. __asm__ __volatile__(
  145. "1: ldl_l %0,%4\n"
  146. " and %0,%3,%2\n"
  147. " bne %2,2f\n"
  148. " xor %0,%3,%0\n"
  149. " stl_c %0,%1\n"
  150. " beq %0,3f\n"
  151. "2:\n"
  152. #ifdef CONFIG_SMP
  153. " mb\n"
  154. #endif
  155. ".subsection 2\n"
  156. "3: br 1b\n"
  157. ".previous"
  158. :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  159. :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  160. return oldbit != 0;
  161. }
  162. /*
  163. * WARNING: non atomic version.
  164. */
  165. static inline int
  166. __test_and_set_bit(unsigned long nr, volatile void * addr)
  167. {
  168. unsigned long mask = 1 << (nr & 0x1f);
  169. int *m = ((int *) addr) + (nr >> 5);
  170. int old = *m;
  171. *m = old | mask;
  172. return (old & mask) != 0;
  173. }
  174. static inline int
  175. test_and_clear_bit(unsigned long nr, volatile void * addr)
  176. {
  177. unsigned long oldbit;
  178. unsigned long temp;
  179. int *m = ((int *) addr) + (nr >> 5);
  180. __asm__ __volatile__(
  181. #ifdef CONFIG_SMP
  182. " mb\n"
  183. #endif
  184. "1: ldl_l %0,%4\n"
  185. " and %0,%3,%2\n"
  186. " beq %2,2f\n"
  187. " xor %0,%3,%0\n"
  188. " stl_c %0,%1\n"
  189. " beq %0,3f\n"
  190. "2:\n"
  191. #ifdef CONFIG_SMP
  192. " mb\n"
  193. #endif
  194. ".subsection 2\n"
  195. "3: br 1b\n"
  196. ".previous"
  197. :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  198. :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  199. return oldbit != 0;
  200. }
  201. /*
  202. * WARNING: non atomic version.
  203. */
  204. static inline int
  205. __test_and_clear_bit(unsigned long nr, volatile void * addr)
  206. {
  207. unsigned long mask = 1 << (nr & 0x1f);
  208. int *m = ((int *) addr) + (nr >> 5);
  209. int old = *m;
  210. *m = old & ~mask;
  211. return (old & mask) != 0;
  212. }
  213. static inline int
  214. test_and_change_bit(unsigned long nr, volatile void * addr)
  215. {
  216. unsigned long oldbit;
  217. unsigned long temp;
  218. int *m = ((int *) addr) + (nr >> 5);
  219. __asm__ __volatile__(
  220. #ifdef CONFIG_SMP
  221. " mb\n"
  222. #endif
  223. "1: ldl_l %0,%4\n"
  224. " and %0,%3,%2\n"
  225. " xor %0,%3,%0\n"
  226. " stl_c %0,%1\n"
  227. " beq %0,3f\n"
  228. #ifdef CONFIG_SMP
  229. " mb\n"
  230. #endif
  231. ".subsection 2\n"
  232. "3: br 1b\n"
  233. ".previous"
  234. :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  235. :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  236. return oldbit != 0;
  237. }
  238. /*
  239. * WARNING: non atomic version.
  240. */
  241. static __inline__ int
  242. __test_and_change_bit(unsigned long nr, volatile void * addr)
  243. {
  244. unsigned long mask = 1 << (nr & 0x1f);
  245. int *m = ((int *) addr) + (nr >> 5);
  246. int old = *m;
  247. *m = old ^ mask;
  248. return (old & mask) != 0;
  249. }
  250. static inline int
  251. test_bit(int nr, const volatile void * addr)
  252. {
  253. return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
  254. }
  255. /*
  256. * ffz = Find First Zero in word. Undefined if no zero exists,
  257. * so code should check against ~0UL first..
  258. *
  259. * Do a binary search on the bits. Due to the nature of large
  260. * constants on the alpha, it is worthwhile to split the search.
  261. */
  262. static inline unsigned long ffz_b(unsigned long x)
  263. {
  264. unsigned long sum, x1, x2, x4;
  265. x = ~x & -~x; /* set first 0 bit, clear others */
  266. x1 = x & 0xAA;
  267. x2 = x & 0xCC;
  268. x4 = x & 0xF0;
  269. sum = x2 ? 2 : 0;
  270. sum += (x4 != 0) * 4;
  271. sum += (x1 != 0);
  272. return sum;
  273. }
  274. static inline unsigned long ffz(unsigned long word)
  275. {
  276. #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
  277. /* Whee. EV67 can calculate it directly. */
  278. return __kernel_cttz(~word);
  279. #else
  280. unsigned long bits, qofs, bofs;
  281. bits = __kernel_cmpbge(word, ~0UL);
  282. qofs = ffz_b(bits);
  283. bits = __kernel_extbl(word, qofs);
  284. bofs = ffz_b(bits);
  285. return qofs*8 + bofs;
  286. #endif
  287. }
  288. /*
  289. * __ffs = Find First set bit in word. Undefined if no set bit exists.
  290. */
  291. static inline unsigned long __ffs(unsigned long word)
  292. {
  293. #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
  294. /* Whee. EV67 can calculate it directly. */
  295. return __kernel_cttz(word);
  296. #else
  297. unsigned long bits, qofs, bofs;
  298. bits = __kernel_cmpbge(0, word);
  299. qofs = ffz_b(bits);
  300. bits = __kernel_extbl(word, qofs);
  301. bofs = ffz_b(~bits);
  302. return qofs*8 + bofs;
  303. #endif
  304. }
  305. #ifdef __KERNEL__
  306. /*
  307. * ffs: find first bit set. This is defined the same way as
  308. * the libc and compiler builtin ffs routines, therefore
  309. * differs in spirit from the above __ffs.
  310. */
  311. static inline int ffs(int word)
  312. {
  313. int result = __ffs(word) + 1;
  314. return word ? result : 0;
  315. }
  316. /*
  317. * fls: find last bit set.
  318. */
  319. #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
  320. static inline int fls64(unsigned long word)
  321. {
  322. return 64 - __kernel_ctlz(word);
  323. }
  324. #else
  325. extern const unsigned char __flsm1_tab[256];
  326. static inline int fls64(unsigned long x)
  327. {
  328. unsigned long t, a, r;
  329. t = __kernel_cmpbge (x, 0x0101010101010101UL);
  330. a = __flsm1_tab[t];
  331. t = __kernel_extbl (x, a);
  332. r = a*8 + __flsm1_tab[t] + (x != 0);
  333. return r;
  334. }
  335. #endif
  336. static inline unsigned long __fls(unsigned long x)
  337. {
  338. return fls64(x) - 1;
  339. }
  340. static inline int fls(int x)
  341. {
  342. return fls64((unsigned int) x);
  343. }
  344. /*
  345. * hweightN: returns the hamming weight (i.e. the number
  346. * of bits set) of a N-bit word
  347. */
  348. #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
  349. /* Whee. EV67 can calculate it directly. */
  350. static inline unsigned long __arch_hweight64(unsigned long w)
  351. {
  352. return __kernel_ctpop(w);
  353. }
  354. static inline unsigned int __arch_hweight32(unsigned int w)
  355. {
  356. return __arch_hweight64(w);
  357. }
  358. static inline unsigned int __arch_hweight16(unsigned int w)
  359. {
  360. return __arch_hweight64(w & 0xffff);
  361. }
  362. static inline unsigned int __arch_hweight8(unsigned int w)
  363. {
  364. return __arch_hweight64(w & 0xff);
  365. }
  366. #else
  367. #include <asm-generic/bitops/arch_hweight.h>
  368. #endif
  369. #include <asm-generic/bitops/const_hweight.h>
  370. #endif /* __KERNEL__ */
  371. #include <asm-generic/bitops/find.h>
  372. #ifdef __KERNEL__
  373. /*
  374. * Every architecture must define this function. It's the fastest
  375. * way of searching a 100-bit bitmap. It's guaranteed that at least
  376. * one of the 100 bits is cleared.
  377. */
  378. static inline unsigned long
  379. sched_find_first_bit(const unsigned long b[2])
  380. {
  381. unsigned long b0, b1, ofs, tmp;
  382. b0 = b[0];
  383. b1 = b[1];
  384. ofs = (b0 ? 0 : 64);
  385. tmp = (b0 ? b0 : b1);
  386. return __ffs(tmp) + ofs;
  387. }
  388. #include <asm-generic/bitops/le.h>
  389. #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
  390. #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
  391. #endif /* __KERNEL__ */
  392. #endif /* _ALPHA_BITOPS_H */