bitops.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. #ifndef _M68K_BITOPS_H
  2. #define _M68K_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file COPYING in the main directory of this archive
  8. * for more details.
  9. */
  10. #ifndef _LINUX_BITOPS_H
  11. #error only <linux/bitops.h> can be included directly
  12. #endif
  13. #include <linux/compiler.h>
  14. #include <asm/barrier.h>
  15. /*
  16. * Bit access functions vary across the ColdFire and 68k families.
  17. * So we will break them out here, and then macro in the ones we want.
  18. *
  19. * ColdFire - supports standard bset/bclr/bchg with register operand only
  20. * 68000 - supports standard bset/bclr/bchg with memory operand
  21. * >= 68020 - also supports the bfset/bfclr/bfchg instructions
  22. *
  23. * Although it is possible to use only the bset/bclr/bchg with register
  24. * operands on all platforms you end up with larger generated code.
  25. * So we use the best form possible on a given platform.
  26. */
  27. static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
  28. {
  29. char *p = (char *)vaddr + (nr ^ 31) / 8;
  30. __asm__ __volatile__ ("bset %1,(%0)"
  31. :
  32. : "a" (p), "di" (nr & 7)
  33. : "memory");
  34. }
  35. static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
  36. {
  37. char *p = (char *)vaddr + (nr ^ 31) / 8;
  38. __asm__ __volatile__ ("bset %1,%0"
  39. : "+m" (*p)
  40. : "di" (nr & 7));
  41. }
  42. static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
  43. {
  44. __asm__ __volatile__ ("bfset %1{%0:#1}"
  45. :
  46. : "d" (nr ^ 31), "o" (*vaddr)
  47. : "memory");
  48. }
  49. #if defined(CONFIG_COLDFIRE)
  50. #define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
  51. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  52. #define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
  53. #else
  54. #define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  55. bset_mem_set_bit(nr, vaddr) : \
  56. bfset_mem_set_bit(nr, vaddr))
  57. #endif
  58. #define __set_bit(nr, vaddr) set_bit(nr, vaddr)
  59. static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
  60. {
  61. char *p = (char *)vaddr + (nr ^ 31) / 8;
  62. __asm__ __volatile__ ("bclr %1,(%0)"
  63. :
  64. : "a" (p), "di" (nr & 7)
  65. : "memory");
  66. }
  67. static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
  68. {
  69. char *p = (char *)vaddr + (nr ^ 31) / 8;
  70. __asm__ __volatile__ ("bclr %1,%0"
  71. : "+m" (*p)
  72. : "di" (nr & 7));
  73. }
  74. static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
  75. {
  76. __asm__ __volatile__ ("bfclr %1{%0:#1}"
  77. :
  78. : "d" (nr ^ 31), "o" (*vaddr)
  79. : "memory");
  80. }
  81. #if defined(CONFIG_COLDFIRE)
  82. #define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
  83. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  84. #define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
  85. #else
  86. #define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  87. bclr_mem_clear_bit(nr, vaddr) : \
  88. bfclr_mem_clear_bit(nr, vaddr))
  89. #endif
  90. #define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
  91. static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
  92. {
  93. char *p = (char *)vaddr + (nr ^ 31) / 8;
  94. __asm__ __volatile__ ("bchg %1,(%0)"
  95. :
  96. : "a" (p), "di" (nr & 7)
  97. : "memory");
  98. }
  99. static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
  100. {
  101. char *p = (char *)vaddr + (nr ^ 31) / 8;
  102. __asm__ __volatile__ ("bchg %1,%0"
  103. : "+m" (*p)
  104. : "di" (nr & 7));
  105. }
  106. static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
  107. {
  108. __asm__ __volatile__ ("bfchg %1{%0:#1}"
  109. :
  110. : "d" (nr ^ 31), "o" (*vaddr)
  111. : "memory");
  112. }
  113. #if defined(CONFIG_COLDFIRE)
  114. #define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
  115. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  116. #define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
  117. #else
  118. #define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  119. bchg_mem_change_bit(nr, vaddr) : \
  120. bfchg_mem_change_bit(nr, vaddr))
  121. #endif
  122. #define __change_bit(nr, vaddr) change_bit(nr, vaddr)
  123. static inline int test_bit(int nr, const volatile unsigned long *vaddr)
  124. {
  125. return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
  126. }
  127. static inline int bset_reg_test_and_set_bit(int nr,
  128. volatile unsigned long *vaddr)
  129. {
  130. char *p = (char *)vaddr + (nr ^ 31) / 8;
  131. char retval;
  132. __asm__ __volatile__ ("bset %2,(%1); sne %0"
  133. : "=d" (retval)
  134. : "a" (p), "di" (nr & 7)
  135. : "memory");
  136. return retval;
  137. }
  138. static inline int bset_mem_test_and_set_bit(int nr,
  139. volatile unsigned long *vaddr)
  140. {
  141. char *p = (char *)vaddr + (nr ^ 31) / 8;
  142. char retval;
  143. __asm__ __volatile__ ("bset %2,%1; sne %0"
  144. : "=d" (retval), "+m" (*p)
  145. : "di" (nr & 7));
  146. return retval;
  147. }
  148. static inline int bfset_mem_test_and_set_bit(int nr,
  149. volatile unsigned long *vaddr)
  150. {
  151. char retval;
  152. __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
  153. : "=d" (retval)
  154. : "d" (nr ^ 31), "o" (*vaddr)
  155. : "memory");
  156. return retval;
  157. }
  158. #if defined(CONFIG_COLDFIRE)
  159. #define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
  160. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  161. #define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
  162. #else
  163. #define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  164. bset_mem_test_and_set_bit(nr, vaddr) : \
  165. bfset_mem_test_and_set_bit(nr, vaddr))
  166. #endif
  167. #define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
  168. static inline int bclr_reg_test_and_clear_bit(int nr,
  169. volatile unsigned long *vaddr)
  170. {
  171. char *p = (char *)vaddr + (nr ^ 31) / 8;
  172. char retval;
  173. __asm__ __volatile__ ("bclr %2,(%1); sne %0"
  174. : "=d" (retval)
  175. : "a" (p), "di" (nr & 7)
  176. : "memory");
  177. return retval;
  178. }
  179. static inline int bclr_mem_test_and_clear_bit(int nr,
  180. volatile unsigned long *vaddr)
  181. {
  182. char *p = (char *)vaddr + (nr ^ 31) / 8;
  183. char retval;
  184. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  185. : "=d" (retval), "+m" (*p)
  186. : "di" (nr & 7));
  187. return retval;
  188. }
  189. static inline int bfclr_mem_test_and_clear_bit(int nr,
  190. volatile unsigned long *vaddr)
  191. {
  192. char retval;
  193. __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
  194. : "=d" (retval)
  195. : "d" (nr ^ 31), "o" (*vaddr)
  196. : "memory");
  197. return retval;
  198. }
  199. #if defined(CONFIG_COLDFIRE)
  200. #define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
  201. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  202. #define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
  203. #else
  204. #define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  205. bclr_mem_test_and_clear_bit(nr, vaddr) : \
  206. bfclr_mem_test_and_clear_bit(nr, vaddr))
  207. #endif
  208. #define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
  209. static inline int bchg_reg_test_and_change_bit(int nr,
  210. volatile unsigned long *vaddr)
  211. {
  212. char *p = (char *)vaddr + (nr ^ 31) / 8;
  213. char retval;
  214. __asm__ __volatile__ ("bchg %2,(%1); sne %0"
  215. : "=d" (retval)
  216. : "a" (p), "di" (nr & 7)
  217. : "memory");
  218. return retval;
  219. }
  220. static inline int bchg_mem_test_and_change_bit(int nr,
  221. volatile unsigned long *vaddr)
  222. {
  223. char *p = (char *)vaddr + (nr ^ 31) / 8;
  224. char retval;
  225. __asm__ __volatile__ ("bchg %2,%1; sne %0"
  226. : "=d" (retval), "+m" (*p)
  227. : "di" (nr & 7));
  228. return retval;
  229. }
  230. static inline int bfchg_mem_test_and_change_bit(int nr,
  231. volatile unsigned long *vaddr)
  232. {
  233. char retval;
  234. __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
  235. : "=d" (retval)
  236. : "d" (nr ^ 31), "o" (*vaddr)
  237. : "memory");
  238. return retval;
  239. }
  240. #if defined(CONFIG_COLDFIRE)
  241. #define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
  242. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  243. #define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
  244. #else
  245. #define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  246. bchg_mem_test_and_change_bit(nr, vaddr) : \
  247. bfchg_mem_test_and_change_bit(nr, vaddr))
  248. #endif
  249. #define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
  250. /*
  251. * The true 68020 and more advanced processors support the "bfffo"
  252. * instruction for finding bits. ColdFire and simple 68000 parts
  253. * (including CPU32) do not support this. They simply use the generic
  254. * functions.
  255. */
  256. #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  257. #include <asm-generic/bitops/ffz.h>
  258. #else
  259. static inline int find_first_zero_bit(const unsigned long *vaddr,
  260. unsigned size)
  261. {
  262. const unsigned long *p = vaddr;
  263. int res = 32;
  264. unsigned int words;
  265. unsigned long num;
  266. if (!size)
  267. return 0;
  268. words = (size + 31) >> 5;
  269. while (!(num = ~*p++)) {
  270. if (!--words)
  271. goto out;
  272. }
  273. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  274. : "=d" (res) : "d" (num & -num));
  275. res ^= 31;
  276. out:
  277. res += ((long)p - (long)vaddr - 4) * 8;
  278. return res < size ? res : size;
  279. }
  280. #define find_first_zero_bit find_first_zero_bit
  281. static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
  282. int offset)
  283. {
  284. const unsigned long *p = vaddr + (offset >> 5);
  285. int bit = offset & 31UL, res;
  286. if (offset >= size)
  287. return size;
  288. if (bit) {
  289. unsigned long num = ~*p++ & (~0UL << bit);
  290. offset -= bit;
  291. /* Look for zero in first longword */
  292. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  293. : "=d" (res) : "d" (num & -num));
  294. if (res < 32) {
  295. offset += res ^ 31;
  296. return offset < size ? offset : size;
  297. }
  298. offset += 32;
  299. if (offset >= size)
  300. return size;
  301. }
  302. /* No zero yet, search remaining full bytes for a zero */
  303. return offset + find_first_zero_bit(p, size - offset);
  304. }
  305. #define find_next_zero_bit find_next_zero_bit
  306. static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
  307. {
  308. const unsigned long *p = vaddr;
  309. int res = 32;
  310. unsigned int words;
  311. unsigned long num;
  312. if (!size)
  313. return 0;
  314. words = (size + 31) >> 5;
  315. while (!(num = *p++)) {
  316. if (!--words)
  317. goto out;
  318. }
  319. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  320. : "=d" (res) : "d" (num & -num));
  321. res ^= 31;
  322. out:
  323. res += ((long)p - (long)vaddr - 4) * 8;
  324. return res < size ? res : size;
  325. }
  326. #define find_first_bit find_first_bit
  327. static inline int find_next_bit(const unsigned long *vaddr, int size,
  328. int offset)
  329. {
  330. const unsigned long *p = vaddr + (offset >> 5);
  331. int bit = offset & 31UL, res;
  332. if (offset >= size)
  333. return size;
  334. if (bit) {
  335. unsigned long num = *p++ & (~0UL << bit);
  336. offset -= bit;
  337. /* Look for one in first longword */
  338. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  339. : "=d" (res) : "d" (num & -num));
  340. if (res < 32) {
  341. offset += res ^ 31;
  342. return offset < size ? offset : size;
  343. }
  344. offset += 32;
  345. if (offset >= size)
  346. return size;
  347. }
  348. /* No one yet, search remaining full bytes for a one */
  349. return offset + find_first_bit(p, size - offset);
  350. }
  351. #define find_next_bit find_next_bit
  352. /*
  353. * ffz = Find First Zero in word. Undefined if no zero exists,
  354. * so code should check against ~0UL first..
  355. */
  356. static inline unsigned long ffz(unsigned long word)
  357. {
  358. int res;
  359. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  360. : "=d" (res) : "d" (~word & -~word));
  361. return res ^ 31;
  362. }
  363. #endif
  364. #include <asm-generic/bitops/find.h>
  365. #ifdef __KERNEL__
  366. #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  367. /*
  368. * The newer ColdFire family members support a "bitrev" instruction
  369. * and we can use that to implement a fast ffs. Older Coldfire parts,
  370. * and normal 68000 parts don't have anything special, so we use the
  371. * generic functions for those.
  372. */
  373. #if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
  374. !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
  375. static inline unsigned long __ffs(unsigned long x)
  376. {
  377. __asm__ __volatile__ ("bitrev %0; ff1 %0"
  378. : "=d" (x)
  379. : "0" (x));
  380. return x;
  381. }
  382. static inline int ffs(int x)
  383. {
  384. if (!x)
  385. return 0;
  386. return __ffs(x) + 1;
  387. }
  388. #else
  389. #include <asm-generic/bitops/ffs.h>
  390. #include <asm-generic/bitops/__ffs.h>
  391. #endif
  392. #include <asm-generic/bitops/fls.h>
  393. #include <asm-generic/bitops/__fls.h>
  394. #else
  395. /*
  396. * ffs: find first bit set. This is defined the same way as
  397. * the libc and compiler builtin ffs routines, therefore
  398. * differs in spirit from the above ffz (man ffs).
  399. */
  400. static inline int ffs(int x)
  401. {
  402. int cnt;
  403. __asm__ ("bfffo %1{#0:#0},%0"
  404. : "=d" (cnt)
  405. : "dm" (x & -x));
  406. return 32 - cnt;
  407. }
  408. static inline unsigned long __ffs(unsigned long x)
  409. {
  410. return ffs(x) - 1;
  411. }
  412. /*
  413. * fls: find last bit set.
  414. */
  415. static inline int fls(int x)
  416. {
  417. int cnt;
  418. __asm__ ("bfffo %1{#0,#0},%0"
  419. : "=d" (cnt)
  420. : "dm" (x));
  421. return 32 - cnt;
  422. }
  423. static inline int __fls(int x)
  424. {
  425. return fls(x) - 1;
  426. }
  427. #endif
  428. /* Simple test-and-set bit locks */
  429. #define test_and_set_bit_lock test_and_set_bit
  430. #define clear_bit_unlock clear_bit
  431. #define __clear_bit_unlock clear_bit_unlock
  432. #include <asm-generic/bitops/ext2-atomic.h>
  433. #include <asm-generic/bitops/le.h>
  434. #include <asm-generic/bitops/fls64.h>
  435. #include <asm-generic/bitops/sched.h>
  436. #include <asm-generic/bitops/hweight.h>
  437. #endif /* __KERNEL__ */
  438. #endif /* _M68K_BITOPS_H */