bitops.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
  7. * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_BITOPS_H
  10. #define _ASM_BITOPS_H
  11. #ifndef _LINUX_BITOPS_H
  12. #error only <linux/bitops.h> can be included directly
  13. #endif
  14. #include <linux/compiler.h>
  15. #include <linux/types.h>
  16. #include <asm/barrier.h>
  17. #include <asm/byteorder.h> /* sigh ... */
  18. #include <asm/compiler.h>
  19. #include <asm/cpu-features.h>
  20. #include <asm/llsc.h>
  21. #include <asm/sgidefs.h>
  22. #include <asm/war.h>
  23. /*
  24. * These are the "slower" versions of the functions and are in bitops.c.
  25. * These functions call raw_local_irq_{save,restore}().
  26. */
  27. void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
  28. void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
  29. void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
  30. int __mips_test_and_set_bit(unsigned long nr,
  31. volatile unsigned long *addr);
  32. int __mips_test_and_set_bit_lock(unsigned long nr,
  33. volatile unsigned long *addr);
  34. int __mips_test_and_clear_bit(unsigned long nr,
  35. volatile unsigned long *addr);
  36. int __mips_test_and_change_bit(unsigned long nr,
  37. volatile unsigned long *addr);
  38. /*
  39. * set_bit - Atomically set a bit in memory
  40. * @nr: the bit to set
  41. * @addr: the address to start counting from
  42. *
  43. * This function is atomic and may not be reordered. See __set_bit()
  44. * if you do not require the atomic guarantees.
  45. * Note that @nr may be almost arbitrarily large; this function is not
  46. * restricted to acting on a single-word quantity.
  47. */
  48. static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
  49. {
  50. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  51. int bit = nr & SZLONG_MASK;
  52. unsigned long temp;
  53. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  54. __asm__ __volatile__(
  55. " .set arch=r4000 \n"
  56. "1: " __LL "%0, %1 # set_bit \n"
  57. " or %0, %2 \n"
  58. " " __SC "%0, %1 \n"
  59. " beqzl %0, 1b \n"
  60. " .set mips0 \n"
  61. : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
  62. : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
  63. #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
  64. } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
  65. do {
  66. __asm__ __volatile__(
  67. " " __LL "%0, %1 # set_bit \n"
  68. " " __INS "%0, %3, %2, 1 \n"
  69. " " __SC "%0, %1 \n"
  70. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  71. : "ir" (bit), "r" (~0));
  72. } while (unlikely(!temp));
  73. #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
  74. } else if (kernel_uses_llsc) {
  75. do {
  76. __asm__ __volatile__(
  77. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  78. " " __LL "%0, %1 # set_bit \n"
  79. " or %0, %2 \n"
  80. " " __SC "%0, %1 \n"
  81. " .set mips0 \n"
  82. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  83. : "ir" (1UL << bit));
  84. } while (unlikely(!temp));
  85. } else
  86. __mips_set_bit(nr, addr);
  87. }
  88. /*
  89. * clear_bit - Clears a bit in memory
  90. * @nr: Bit to clear
  91. * @addr: Address to start counting from
  92. *
  93. * clear_bit() is atomic and may not be reordered. However, it does
  94. * not contain a memory barrier, so if it is used for locking purposes,
  95. * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
  96. * in order to ensure changes are visible on other processors.
  97. */
  98. static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
  99. {
  100. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  101. int bit = nr & SZLONG_MASK;
  102. unsigned long temp;
  103. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  104. __asm__ __volatile__(
  105. " .set arch=r4000 \n"
  106. "1: " __LL "%0, %1 # clear_bit \n"
  107. " and %0, %2 \n"
  108. " " __SC "%0, %1 \n"
  109. " beqzl %0, 1b \n"
  110. " .set mips0 \n"
  111. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  112. : "ir" (~(1UL << bit)));
  113. #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
  114. } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
  115. do {
  116. __asm__ __volatile__(
  117. " " __LL "%0, %1 # clear_bit \n"
  118. " " __INS "%0, $0, %2, 1 \n"
  119. " " __SC "%0, %1 \n"
  120. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  121. : "ir" (bit));
  122. } while (unlikely(!temp));
  123. #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
  124. } else if (kernel_uses_llsc) {
  125. do {
  126. __asm__ __volatile__(
  127. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  128. " " __LL "%0, %1 # clear_bit \n"
  129. " and %0, %2 \n"
  130. " " __SC "%0, %1 \n"
  131. " .set mips0 \n"
  132. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  133. : "ir" (~(1UL << bit)));
  134. } while (unlikely(!temp));
  135. } else
  136. __mips_clear_bit(nr, addr);
  137. }
  138. /*
  139. * clear_bit_unlock - Clears a bit in memory
  140. * @nr: Bit to clear
  141. * @addr: Address to start counting from
  142. *
  143. * clear_bit() is atomic and implies release semantics before the memory
  144. * operation. It can be used for an unlock.
  145. */
  146. static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
  147. {
  148. smp_mb__before_atomic();
  149. clear_bit(nr, addr);
  150. }
  151. /*
  152. * change_bit - Toggle a bit in memory
  153. * @nr: Bit to change
  154. * @addr: Address to start counting from
  155. *
  156. * change_bit() is atomic and may not be reordered.
  157. * Note that @nr may be almost arbitrarily large; this function is not
  158. * restricted to acting on a single-word quantity.
  159. */
  160. static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
  161. {
  162. int bit = nr & SZLONG_MASK;
  163. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  164. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  165. unsigned long temp;
  166. __asm__ __volatile__(
  167. " .set arch=r4000 \n"
  168. "1: " __LL "%0, %1 # change_bit \n"
  169. " xor %0, %2 \n"
  170. " " __SC "%0, %1 \n"
  171. " beqzl %0, 1b \n"
  172. " .set mips0 \n"
  173. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  174. : "ir" (1UL << bit));
  175. } else if (kernel_uses_llsc) {
  176. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  177. unsigned long temp;
  178. do {
  179. __asm__ __volatile__(
  180. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  181. " " __LL "%0, %1 # change_bit \n"
  182. " xor %0, %2 \n"
  183. " " __SC "%0, %1 \n"
  184. " .set mips0 \n"
  185. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
  186. : "ir" (1UL << bit));
  187. } while (unlikely(!temp));
  188. } else
  189. __mips_change_bit(nr, addr);
  190. }
  191. /*
  192. * test_and_set_bit - Set a bit and return its old value
  193. * @nr: Bit to set
  194. * @addr: Address to count from
  195. *
  196. * This operation is atomic and cannot be reordered.
  197. * It also implies a memory barrier.
  198. */
  199. static inline int test_and_set_bit(unsigned long nr,
  200. volatile unsigned long *addr)
  201. {
  202. int bit = nr & SZLONG_MASK;
  203. unsigned long res;
  204. smp_mb__before_llsc();
  205. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  206. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  207. unsigned long temp;
  208. __asm__ __volatile__(
  209. " .set arch=r4000 \n"
  210. "1: " __LL "%0, %1 # test_and_set_bit \n"
  211. " or %2, %0, %3 \n"
  212. " " __SC "%2, %1 \n"
  213. " beqzl %2, 1b \n"
  214. " and %2, %0, %3 \n"
  215. " .set mips0 \n"
  216. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  217. : "r" (1UL << bit)
  218. : "memory");
  219. } else if (kernel_uses_llsc) {
  220. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  221. unsigned long temp;
  222. do {
  223. __asm__ __volatile__(
  224. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  225. " " __LL "%0, %1 # test_and_set_bit \n"
  226. " or %2, %0, %3 \n"
  227. " " __SC "%2, %1 \n"
  228. " .set mips0 \n"
  229. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  230. : "r" (1UL << bit)
  231. : "memory");
  232. } while (unlikely(!res));
  233. res = temp & (1UL << bit);
  234. } else
  235. res = __mips_test_and_set_bit(nr, addr);
  236. smp_llsc_mb();
  237. return res != 0;
  238. }
  239. /*
  240. * test_and_set_bit_lock - Set a bit and return its old value
  241. * @nr: Bit to set
  242. * @addr: Address to count from
  243. *
  244. * This operation is atomic and implies acquire ordering semantics
  245. * after the memory operation.
  246. */
  247. static inline int test_and_set_bit_lock(unsigned long nr,
  248. volatile unsigned long *addr)
  249. {
  250. int bit = nr & SZLONG_MASK;
  251. unsigned long res;
  252. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  253. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  254. unsigned long temp;
  255. __asm__ __volatile__(
  256. " .set arch=r4000 \n"
  257. "1: " __LL "%0, %1 # test_and_set_bit \n"
  258. " or %2, %0, %3 \n"
  259. " " __SC "%2, %1 \n"
  260. " beqzl %2, 1b \n"
  261. " and %2, %0, %3 \n"
  262. " .set mips0 \n"
  263. : "=&r" (temp), "+m" (*m), "=&r" (res)
  264. : "r" (1UL << bit)
  265. : "memory");
  266. } else if (kernel_uses_llsc) {
  267. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  268. unsigned long temp;
  269. do {
  270. __asm__ __volatile__(
  271. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  272. " " __LL "%0, %1 # test_and_set_bit \n"
  273. " or %2, %0, %3 \n"
  274. " " __SC "%2, %1 \n"
  275. " .set mips0 \n"
  276. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  277. : "r" (1UL << bit)
  278. : "memory");
  279. } while (unlikely(!res));
  280. res = temp & (1UL << bit);
  281. } else
  282. res = __mips_test_and_set_bit_lock(nr, addr);
  283. smp_llsc_mb();
  284. return res != 0;
  285. }
  286. /*
  287. * test_and_clear_bit - Clear a bit and return its old value
  288. * @nr: Bit to clear
  289. * @addr: Address to count from
  290. *
  291. * This operation is atomic and cannot be reordered.
  292. * It also implies a memory barrier.
  293. */
  294. static inline int test_and_clear_bit(unsigned long nr,
  295. volatile unsigned long *addr)
  296. {
  297. int bit = nr & SZLONG_MASK;
  298. unsigned long res;
  299. smp_mb__before_llsc();
  300. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  301. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  302. unsigned long temp;
  303. __asm__ __volatile__(
  304. " .set arch=r4000 \n"
  305. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  306. " or %2, %0, %3 \n"
  307. " xor %2, %3 \n"
  308. " " __SC "%2, %1 \n"
  309. " beqzl %2, 1b \n"
  310. " and %2, %0, %3 \n"
  311. " .set mips0 \n"
  312. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  313. : "r" (1UL << bit)
  314. : "memory");
  315. #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
  316. } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
  317. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  318. unsigned long temp;
  319. do {
  320. __asm__ __volatile__(
  321. " " __LL "%0, %1 # test_and_clear_bit \n"
  322. " " __EXT "%2, %0, %3, 1 \n"
  323. " " __INS "%0, $0, %3, 1 \n"
  324. " " __SC "%0, %1 \n"
  325. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  326. : "ir" (bit)
  327. : "memory");
  328. } while (unlikely(!temp));
  329. #endif
  330. } else if (kernel_uses_llsc) {
  331. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  332. unsigned long temp;
  333. do {
  334. __asm__ __volatile__(
  335. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  336. " " __LL "%0, %1 # test_and_clear_bit \n"
  337. " or %2, %0, %3 \n"
  338. " xor %2, %3 \n"
  339. " " __SC "%2, %1 \n"
  340. " .set mips0 \n"
  341. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  342. : "r" (1UL << bit)
  343. : "memory");
  344. } while (unlikely(!res));
  345. res = temp & (1UL << bit);
  346. } else
  347. res = __mips_test_and_clear_bit(nr, addr);
  348. smp_llsc_mb();
  349. return res != 0;
  350. }
  351. /*
  352. * test_and_change_bit - Change a bit and return its old value
  353. * @nr: Bit to change
  354. * @addr: Address to count from
  355. *
  356. * This operation is atomic and cannot be reordered.
  357. * It also implies a memory barrier.
  358. */
  359. static inline int test_and_change_bit(unsigned long nr,
  360. volatile unsigned long *addr)
  361. {
  362. int bit = nr & SZLONG_MASK;
  363. unsigned long res;
  364. smp_mb__before_llsc();
  365. if (kernel_uses_llsc && R10000_LLSC_WAR) {
  366. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  367. unsigned long temp;
  368. __asm__ __volatile__(
  369. " .set arch=r4000 \n"
  370. "1: " __LL "%0, %1 # test_and_change_bit \n"
  371. " xor %2, %0, %3 \n"
  372. " " __SC "%2, %1 \n"
  373. " beqzl %2, 1b \n"
  374. " and %2, %0, %3 \n"
  375. " .set mips0 \n"
  376. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  377. : "r" (1UL << bit)
  378. : "memory");
  379. } else if (kernel_uses_llsc) {
  380. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  381. unsigned long temp;
  382. do {
  383. __asm__ __volatile__(
  384. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  385. " " __LL "%0, %1 # test_and_change_bit \n"
  386. " xor %2, %0, %3 \n"
  387. " " __SC "\t%2, %1 \n"
  388. " .set mips0 \n"
  389. : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
  390. : "r" (1UL << bit)
  391. : "memory");
  392. } while (unlikely(!res));
  393. res = temp & (1UL << bit);
  394. } else
  395. res = __mips_test_and_change_bit(nr, addr);
  396. smp_llsc_mb();
  397. return res != 0;
  398. }
  399. #include <asm-generic/bitops/non-atomic.h>
  400. /*
  401. * __clear_bit_unlock - Clears a bit in memory
  402. * @nr: Bit to clear
  403. * @addr: Address to start counting from
  404. *
  405. * __clear_bit() is non-atomic and implies release semantics before the memory
  406. * operation. It can be used for an unlock if no other CPUs can concurrently
  407. * modify other bits in the word.
  408. */
  409. static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
  410. {
  411. smp_mb__before_llsc();
  412. __clear_bit(nr, addr);
  413. }
  414. /*
  415. * Return the bit position (0..63) of the most significant 1 bit in a word
  416. * Returns -1 if no 1 bit exists
  417. */
  418. static inline unsigned long __fls(unsigned long word)
  419. {
  420. int num;
  421. if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
  422. __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
  423. __asm__(
  424. " .set push \n"
  425. " .set "MIPS_ISA_LEVEL" \n"
  426. " clz %0, %1 \n"
  427. " .set pop \n"
  428. : "=r" (num)
  429. : "r" (word));
  430. return 31 - num;
  431. }
  432. if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
  433. __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
  434. __asm__(
  435. " .set push \n"
  436. " .set "MIPS_ISA_LEVEL" \n"
  437. " dclz %0, %1 \n"
  438. " .set pop \n"
  439. : "=r" (num)
  440. : "r" (word));
  441. return 63 - num;
  442. }
  443. num = BITS_PER_LONG - 1;
  444. #if BITS_PER_LONG == 64
  445. if (!(word & (~0ul << 32))) {
  446. num -= 32;
  447. word <<= 32;
  448. }
  449. #endif
  450. if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
  451. num -= 16;
  452. word <<= 16;
  453. }
  454. if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
  455. num -= 8;
  456. word <<= 8;
  457. }
  458. if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
  459. num -= 4;
  460. word <<= 4;
  461. }
  462. if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
  463. num -= 2;
  464. word <<= 2;
  465. }
  466. if (!(word & (~0ul << (BITS_PER_LONG-1))))
  467. num -= 1;
  468. return num;
  469. }
  470. /*
  471. * __ffs - find first bit in word.
  472. * @word: The word to search
  473. *
  474. * Returns 0..SZLONG-1
  475. * Undefined if no bit exists, so code should check against 0 first.
  476. */
  477. static inline unsigned long __ffs(unsigned long word)
  478. {
  479. return __fls(word & -word);
  480. }
  481. /*
  482. * fls - find last bit set.
  483. * @word: The word to search
  484. *
  485. * This is defined the same way as ffs.
  486. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  487. */
  488. static inline int fls(int x)
  489. {
  490. int r;
  491. if (!__builtin_constant_p(x) &&
  492. __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
  493. __asm__(
  494. " .set push \n"
  495. " .set "MIPS_ISA_LEVEL" \n"
  496. " clz %0, %1 \n"
  497. " .set pop \n"
  498. : "=r" (x)
  499. : "r" (x));
  500. return 32 - x;
  501. }
  502. r = 32;
  503. if (!x)
  504. return 0;
  505. if (!(x & 0xffff0000u)) {
  506. x <<= 16;
  507. r -= 16;
  508. }
  509. if (!(x & 0xff000000u)) {
  510. x <<= 8;
  511. r -= 8;
  512. }
  513. if (!(x & 0xf0000000u)) {
  514. x <<= 4;
  515. r -= 4;
  516. }
  517. if (!(x & 0xc0000000u)) {
  518. x <<= 2;
  519. r -= 2;
  520. }
  521. if (!(x & 0x80000000u)) {
  522. x <<= 1;
  523. r -= 1;
  524. }
  525. return r;
  526. }
  527. #include <asm-generic/bitops/fls64.h>
  528. /*
  529. * ffs - find first bit set.
  530. * @word: The word to search
  531. *
  532. * This is defined the same way as
  533. * the libc and compiler builtin ffs routines, therefore
  534. * differs in spirit from the above ffz (man ffs).
  535. */
  536. static inline int ffs(int word)
  537. {
  538. if (!word)
  539. return 0;
  540. return fls(word & -word);
  541. }
  542. #include <asm-generic/bitops/ffz.h>
  543. #include <asm-generic/bitops/find.h>
  544. #ifdef __KERNEL__
  545. #include <asm-generic/bitops/sched.h>
  546. #include <asm/arch_hweight.h>
  547. #include <asm-generic/bitops/const_hweight.h>
  548. #include <asm-generic/bitops/le.h>
  549. #include <asm-generic/bitops/ext2-atomic.h>
  550. #endif /* __KERNEL__ */
  551. #endif /* _ASM_BITOPS_H */