bitops.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
  7. * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_BITOPS_H
  10. #define _ASM_BITOPS_H
  11. #ifndef _LINUX_BITOPS_H
  12. #error only <linux/bitops.h> can be included directly
  13. #endif
  14. #include <linux/compiler.h>
  15. #include <linux/types.h>
  16. #include <asm/barrier.h>
  17. #include <asm/byteorder.h> /* sigh ... */
  18. #include <asm/compiler.h>
  19. #include <asm/cpu-features.h>
  20. #include <asm/llsc.h>
  21. #include <asm/sgidefs.h>
  22. #include <asm/war.h>
  23. /*
  24. * These are the "slower" versions of the functions and are in bitops.c.
  25. * These functions call raw_local_irq_{save,restore}().
  26. */
  27. void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
  28. void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
  29. void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
  30. int __mips_test_and_set_bit(unsigned long nr,
  31. volatile unsigned long *addr);
  32. int __mips_test_and_set_bit_lock(unsigned long nr,
  33. volatile unsigned long *addr);
  34. int __mips_test_and_clear_bit(unsigned long nr,
  35. volatile unsigned long *addr);
  36. int __mips_test_and_change_bit(unsigned long nr,
  37. volatile unsigned long *addr);
  38. /*
  39. * set_bit - Atomically set a bit in memory
  40. * @nr: the bit to set
  41. * @addr: the address to start counting from
  42. *
  43. * This function is atomic and may not be reordered. See __set_bit()
  44. * if you do not require the atomic guarantees.
  45. * Note that @nr may be almost arbitrarily large; this function is not
  46. * restricted to acting on a single-word quantity.
  47. */
  48. static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
  49. {
  50. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  51. int bit = nr & SZLONG_MASK;
  52. unsigned long temp;
  53. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  54. __asm__ __volatile__(
  55. " .set arch=r4000 \n"
  56. "1: " __LL "%0, %1 # set_bit \n"
  57. " or %0, %2 \n"
  58. " " __SC "%0, %1 \n"
  59. " beqzl %0, 1b \n"
  60. " .set mips0 \n"
  61. : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
  62. : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
  63. #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
  64. } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
  65. loongson_llsc_mb();
  66. do {
  67. __asm__ __volatile__(
  68. " " __LL "%0, %1 # set_bit \n"
  69. " " __INS "%0, %3, %2, 1 \n"
  70. " " __SC "%0, %1 \n"
  71. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  72. : "ir" (bit), "r" (~0));
  73. } while (unlikely(!temp));
  74. #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
  75. } else if (kernel_uses_llsc) {
  76. loongson_llsc_mb();
  77. do {
  78. __asm__ __volatile__(
  79. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  80. " " __LL "%0, %1 # set_bit \n"
  81. " or %0, %2 \n"
  82. " " __SC "%0, %1 \n"
  83. " .set mips0 \n"
  84. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  85. : "ir" (1UL << bit));
  86. } while (unlikely(!temp));
  87. } else
  88. __mips_set_bit(nr, addr);
  89. }
  90. /*
  91. * clear_bit - Clears a bit in memory
  92. * @nr: Bit to clear
  93. * @addr: Address to start counting from
  94. *
  95. * clear_bit() is atomic and may not be reordered. However, it does
  96. * not contain a memory barrier, so if it is used for locking purposes,
  97. * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
  98. * in order to ensure changes are visible on other processors.
  99. */
  100. static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
  101. {
  102. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  103. int bit = nr & SZLONG_MASK;
  104. unsigned long temp;
  105. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  106. __asm__ __volatile__(
  107. " .set arch=r4000 \n"
  108. "1: " __LL "%0, %1 # clear_bit \n"
  109. " and %0, %2 \n"
  110. " " __SC "%0, %1 \n"
  111. " beqzl %0, 1b \n"
  112. " .set mips0 \n"
  113. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  114. : "ir" (~(1UL << bit)));
  115. #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
  116. } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
  117. loongson_llsc_mb();
  118. do {
  119. __asm__ __volatile__(
  120. " " __LL "%0, %1 # clear_bit \n"
  121. " " __INS "%0, $0, %2, 1 \n"
  122. " " __SC "%0, %1 \n"
  123. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  124. : "ir" (bit));
  125. } while (unlikely(!temp));
  126. #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
  127. } else if (kernel_uses_llsc) {
  128. loongson_llsc_mb();
  129. do {
  130. __asm__ __volatile__(
  131. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  132. " " __LL "%0, %1 # clear_bit \n"
  133. " and %0, %2 \n"
  134. " " __SC "%0, %1 \n"
  135. " .set mips0 \n"
  136. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  137. : "ir" (~(1UL << bit)));
  138. } while (unlikely(!temp));
  139. } else
  140. __mips_clear_bit(nr, addr);
  141. }
  142. /*
  143. * clear_bit_unlock - Clears a bit in memory
  144. * @nr: Bit to clear
  145. * @addr: Address to start counting from
  146. *
  147. * clear_bit() is atomic and implies release semantics before the memory
  148. * operation. It can be used for an unlock.
  149. */
  150. static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
  151. {
  152. smp_mb__before_atomic();
  153. clear_bit(nr, addr);
  154. }
  155. /*
  156. * change_bit - Toggle a bit in memory
  157. * @nr: Bit to change
  158. * @addr: Address to start counting from
  159. *
  160. * change_bit() is atomic and may not be reordered.
  161. * Note that @nr may be almost arbitrarily large; this function is not
  162. * restricted to acting on a single-word quantity.
  163. */
  164. static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
  165. {
  166. int bit = nr & SZLONG_MASK;
  167. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  168. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  169. unsigned long temp;
  170. __asm__ __volatile__(
  171. " .set arch=r4000 \n"
  172. "1: " __LL "%0, %1 # change_bit \n"
  173. " xor %0, %2 \n"
  174. " " __SC "%0, %1 \n"
  175. " beqzl %0, 1b \n"
  176. " .set mips0 \n"
  177. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  178. : "ir" (1UL << bit));
  179. } else if (kernel_uses_llsc) {
  180. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  181. unsigned long temp;
  182. loongson_llsc_mb();
  183. do {
  184. __asm__ __volatile__(
  185. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  186. " " __LL "%0, %1 # change_bit \n"
  187. " xor %0, %2 \n"
  188. " " __SC "%0, %1 \n"
  189. " .set mips0 \n"
  190. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  191. : "ir" (1UL << bit));
  192. } while (unlikely(!temp));
  193. } else
  194. __mips_change_bit(nr, addr);
  195. }
  196. /*
  197. * test_and_set_bit - Set a bit and return its old value
  198. * @nr: Bit to set
  199. * @addr: Address to count from
  200. *
  201. * This operation is atomic and cannot be reordered.
  202. * It also implies a memory barrier.
  203. */
  204. static inline int test_and_set_bit(unsigned long nr,
  205. volatile unsigned long *addr)
  206. {
  207. int bit = nr & SZLONG_MASK;
  208. unsigned long res;
  209. smp_mb__before_llsc();
  210. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  211. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  212. unsigned long temp;
  213. __asm__ __volatile__(
  214. " .set arch=r4000 \n"
  215. "1: " __LL "%0, %1 # test_and_set_bit \n"
  216. " or %2, %0, %3 \n"
  217. " " __SC "%2, %1 \n"
  218. " beqzl %2, 1b \n"
  219. " and %2, %0, %3 \n"
  220. " .set mips0 \n"
  221. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  222. : "r" (1UL << bit)
  223. : "memory");
  224. } else if (kernel_uses_llsc) {
  225. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  226. unsigned long temp;
  227. do {
  228. __asm__ __volatile__(
  229. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  230. " " __LL "%0, %1 # test_and_set_bit \n"
  231. " or %2, %0, %3 \n"
  232. " " __SC "%2, %1 \n"
  233. " .set mips0 \n"
  234. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  235. : "r" (1UL << bit)
  236. : "memory");
  237. } while (unlikely(!res));
  238. res = temp & (1UL << bit);
  239. } else
  240. res = __mips_test_and_set_bit(nr, addr);
  241. smp_llsc_mb();
  242. return res != 0;
  243. }
  244. /*
  245. * test_and_set_bit_lock - Set a bit and return its old value
  246. * @nr: Bit to set
  247. * @addr: Address to count from
  248. *
  249. * This operation is atomic and implies acquire ordering semantics
  250. * after the memory operation.
  251. */
  252. static inline int test_and_set_bit_lock(unsigned long nr,
  253. volatile unsigned long *addr)
  254. {
  255. int bit = nr & SZLONG_MASK;
  256. unsigned long res;
  257. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  258. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  259. unsigned long temp;
  260. __asm__ __volatile__(
  261. " .set arch=r4000 \n"
  262. "1: " __LL "%0, %1 # test_and_set_bit \n"
  263. " or %2, %0, %3 \n"
  264. " " __SC "%2, %1 \n"
  265. " beqzl %2, 1b \n"
  266. " and %2, %0, %3 \n"
  267. " .set mips0 \n"
  268. : "=&r" (temp), "+m" (*m), "=&r" (res)
  269. : "r" (1UL << bit)
  270. : "memory");
  271. } else if (kernel_uses_llsc) {
  272. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  273. unsigned long temp;
  274. do {
  275. __asm__ __volatile__(
  276. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  277. " " __LL "%0, %1 # test_and_set_bit \n"
  278. " or %2, %0, %3 \n"
  279. " " __SC "%2, %1 \n"
  280. " .set mips0 \n"
  281. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  282. : "r" (1UL << bit)
  283. : "memory");
  284. } while (unlikely(!res));
  285. res = temp & (1UL << bit);
  286. } else
  287. res = __mips_test_and_set_bit_lock(nr, addr);
  288. smp_llsc_mb();
  289. return res != 0;
  290. }
  291. /*
  292. * test_and_clear_bit - Clear a bit and return its old value
  293. * @nr: Bit to clear
  294. * @addr: Address to count from
  295. *
  296. * This operation is atomic and cannot be reordered.
  297. * It also implies a memory barrier.
  298. */
  299. static inline int test_and_clear_bit(unsigned long nr,
  300. volatile unsigned long *addr)
  301. {
  302. int bit = nr & SZLONG_MASK;
  303. unsigned long res;
  304. smp_mb__before_llsc();
  305. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  306. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  307. unsigned long temp;
  308. __asm__ __volatile__(
  309. " .set arch=r4000 \n"
  310. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  311. " or %2, %0, %3 \n"
  312. " xor %2, %3 \n"
  313. " " __SC "%2, %1 \n"
  314. " beqzl %2, 1b \n"
  315. " and %2, %0, %3 \n"
  316. " .set mips0 \n"
  317. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  318. : "r" (1UL << bit)
  319. : "memory");
  320. #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
  321. } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
  322. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  323. unsigned long temp;
  324. do {
  325. __asm__ __volatile__(
  326. " " __LL "%0, %1 # test_and_clear_bit \n"
  327. " " __EXT "%2, %0, %3, 1 \n"
  328. " " __INS "%0, $0, %3, 1 \n"
  329. " " __SC "%0, %1 \n"
  330. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  331. : "ir" (bit)
  332. : "memory");
  333. } while (unlikely(!temp));
  334. #endif
  335. } else if (kernel_uses_llsc) {
  336. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  337. unsigned long temp;
  338. do {
  339. __asm__ __volatile__(
  340. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  341. " " __LL "%0, %1 # test_and_clear_bit \n"
  342. " or %2, %0, %3 \n"
  343. " xor %2, %3 \n"
  344. " " __SC "%2, %1 \n"
  345. " .set mips0 \n"
  346. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  347. : "r" (1UL << bit)
  348. : "memory");
  349. } while (unlikely(!res));
  350. res = temp & (1UL << bit);
  351. } else
  352. res = __mips_test_and_clear_bit(nr, addr);
  353. smp_llsc_mb();
  354. return res != 0;
  355. }
  356. /*
  357. * test_and_change_bit - Change a bit and return its old value
  358. * @nr: Bit to change
  359. * @addr: Address to count from
  360. *
  361. * This operation is atomic and cannot be reordered.
  362. * It also implies a memory barrier.
  363. */
  364. static inline int test_and_change_bit(unsigned long nr,
  365. volatile unsigned long *addr)
  366. {
  367. int bit = nr & SZLONG_MASK;
  368. unsigned long res;
  369. smp_mb__before_llsc();
  370. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  371. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  372. unsigned long temp;
  373. __asm__ __volatile__(
  374. " .set arch=r4000 \n"
  375. "1: " __LL "%0, %1 # test_and_change_bit \n"
  376. " xor %2, %0, %3 \n"
  377. " " __SC "%2, %1 \n"
  378. " beqzl %2, 1b \n"
  379. " and %2, %0, %3 \n"
  380. " .set mips0 \n"
  381. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  382. : "r" (1UL << bit)
  383. : "memory");
  384. } else if (kernel_uses_llsc) {
  385. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  386. unsigned long temp;
  387. do {
  388. __asm__ __volatile__(
  389. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  390. " " __LL "%0, %1 # test_and_change_bit \n"
  391. " xor %2, %0, %3 \n"
  392. " " __SC "\t%2, %1 \n"
  393. " .set mips0 \n"
  394. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  395. : "r" (1UL << bit)
  396. : "memory");
  397. } while (unlikely(!res));
  398. res = temp & (1UL << bit);
  399. } else
  400. res = __mips_test_and_change_bit(nr, addr);
  401. smp_llsc_mb();
  402. return res != 0;
  403. }
  404. #include <asm-generic/bitops/non-atomic.h>
  405. /*
  406. * __clear_bit_unlock - Clears a bit in memory
  407. * @nr: Bit to clear
  408. * @addr: Address to start counting from
  409. *
  410. * __clear_bit() is non-atomic and implies release semantics before the memory
  411. * operation. It can be used for an unlock if no other CPUs can concurrently
  412. * modify other bits in the word.
  413. */
  414. static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
  415. {
  416. smp_mb__before_llsc();
  417. __clear_bit(nr, addr);
  418. nudge_writes();
  419. }
  420. /*
  421. * Return the bit position (0..63) of the most significant 1 bit in a word
  422. * Returns -1 if no 1 bit exists
  423. */
  424. static inline unsigned long __fls(unsigned long word)
  425. {
  426. int num;
  427. if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
  428. __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
  429. __asm__(
  430. " .set push \n"
  431. " .set "MIPS_ISA_LEVEL" \n"
  432. " clz %0, %1 \n"
  433. " .set pop \n"
  434. : "=r" (num)
  435. : "r" (word));
  436. return 31 - num;
  437. }
  438. if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
  439. __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
  440. __asm__(
  441. " .set push \n"
  442. " .set "MIPS_ISA_LEVEL" \n"
  443. " dclz %0, %1 \n"
  444. " .set pop \n"
  445. : "=r" (num)
  446. : "r" (word));
  447. return 63 - num;
  448. }
  449. num = BITS_PER_LONG - 1;
  450. #if BITS_PER_LONG == 64
  451. if (!(word & (~0ul << 32))) {
  452. num -= 32;
  453. word <<= 32;
  454. }
  455. #endif
  456. if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
  457. num -= 16;
  458. word <<= 16;
  459. }
  460. if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
  461. num -= 8;
  462. word <<= 8;
  463. }
  464. if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
  465. num -= 4;
  466. word <<= 4;
  467. }
  468. if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
  469. num -= 2;
  470. word <<= 2;
  471. }
  472. if (!(word & (~0ul << (BITS_PER_LONG-1))))
  473. num -= 1;
  474. return num;
  475. }
  476. /*
  477. * __ffs - find first bit in word.
  478. * @word: The word to search
  479. *
  480. * Returns 0..SZLONG-1
  481. * Undefined if no bit exists, so code should check against 0 first.
  482. */
  483. static inline unsigned long __ffs(unsigned long word)
  484. {
  485. return __fls(word & -word);
  486. }
  487. /*
  488. * fls - find last bit set.
  489. * @word: The word to search
  490. *
  491. * This is defined the same way as ffs.
  492. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  493. */
  494. static inline int fls(int x)
  495. {
  496. int r;
  497. if (!__builtin_constant_p(x) &&
  498. __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
  499. __asm__(
  500. " .set push \n"
  501. " .set "MIPS_ISA_LEVEL" \n"
  502. " clz %0, %1 \n"
  503. " .set pop \n"
  504. : "=r" (x)
  505. : "r" (x));
  506. return 32 - x;
  507. }
  508. r = 32;
  509. if (!x)
  510. return 0;
  511. if (!(x & 0xffff0000u)) {
  512. x <<= 16;
  513. r -= 16;
  514. }
  515. if (!(x & 0xff000000u)) {
  516. x <<= 8;
  517. r -= 8;
  518. }
  519. if (!(x & 0xf0000000u)) {
  520. x <<= 4;
  521. r -= 4;
  522. }
  523. if (!(x & 0xc0000000u)) {
  524. x <<= 2;
  525. r -= 2;
  526. }
  527. if (!(x & 0x80000000u)) {
  528. x <<= 1;
  529. r -= 1;
  530. }
  531. return r;
  532. }
  533. #include <asm-generic/bitops/fls64.h>
  534. /*
  535. * ffs - find first bit set.
  536. * @word: The word to search
  537. *
  538. * This is defined the same way as
  539. * the libc and compiler builtin ffs routines, therefore
  540. * differs in spirit from the above ffz (man ffs).
  541. */
  542. static inline int ffs(int word)
  543. {
  544. if (!word)
  545. return 0;
  546. return fls(word & -word);
  547. }
  548. #include <asm-generic/bitops/ffz.h>
  549. #include <asm-generic/bitops/find.h>
  550. #ifdef __KERNEL__
  551. #include <asm-generic/bitops/sched.h>
  552. #include <asm/arch_hweight.h>
  553. #include <asm-generic/bitops/const_hweight.h>
  554. #include <asm-generic/bitops/le.h>
  555. #include <asm-generic/bitops/ext2-atomic.h>
  556. #endif /* __KERNEL__ */
  557. #endif /* _ASM_BITOPS_H */