bitmap.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /*
  2. * linux/fs/hfsplus/bitmap.c
  3. *
  4. * Copyright (C) 2001
  5. * Brad Boyer (flar@allandria.com)
  6. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  7. *
  8. * Handling of allocation file
  9. */
  10. #include <linux/pagemap.h>
  11. #include "hfsplus_fs.h"
  12. #include "hfsplus_raw.h"
  13. #define PAGE_CACHE_BITS (PAGE_SIZE * 8)
  14. int hfsplus_block_allocate(struct super_block *sb, u32 size,
  15. u32 offset, u32 *max)
  16. {
  17. struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
  18. struct page *page;
  19. struct address_space *mapping;
  20. __be32 *pptr, *curr, *end;
  21. u32 mask, start, len, n;
  22. __be32 val;
  23. int i;
  24. len = *max;
  25. if (!len)
  26. return size;
  27. hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
  28. mutex_lock(&sbi->alloc_mutex);
  29. mapping = sbi->alloc_file->i_mapping;
  30. page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
  31. if (IS_ERR(page)) {
  32. start = size;
  33. goto out;
  34. }
  35. pptr = kmap(page);
  36. curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
  37. i = offset % 32;
  38. offset &= ~(PAGE_CACHE_BITS - 1);
  39. if ((size ^ offset) / PAGE_CACHE_BITS)
  40. end = pptr + PAGE_CACHE_BITS / 32;
  41. else
  42. end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
  43. /* scan the first partial u32 for zero bits */
  44. val = *curr;
  45. if (~val) {
  46. n = be32_to_cpu(val);
  47. mask = (1U << 31) >> i;
  48. for (; i < 32; mask >>= 1, i++) {
  49. if (!(n & mask))
  50. goto found;
  51. }
  52. }
  53. curr++;
  54. /* scan complete u32s for the first zero bit */
  55. while (1) {
  56. while (curr < end) {
  57. val = *curr;
  58. if (~val) {
  59. n = be32_to_cpu(val);
  60. mask = 1 << 31;
  61. for (i = 0; i < 32; mask >>= 1, i++) {
  62. if (!(n & mask))
  63. goto found;
  64. }
  65. }
  66. curr++;
  67. }
  68. kunmap(page);
  69. offset += PAGE_CACHE_BITS;
  70. if (offset >= size)
  71. break;
  72. page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
  73. NULL);
  74. if (IS_ERR(page)) {
  75. start = size;
  76. goto out;
  77. }
  78. curr = pptr = kmap(page);
  79. if ((size ^ offset) / PAGE_CACHE_BITS)
  80. end = pptr + PAGE_CACHE_BITS / 32;
  81. else
  82. end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
  83. }
  84. hfs_dbg(BITMAP, "bitmap full\n");
  85. start = size;
  86. goto out;
  87. found:
  88. start = offset + (curr - pptr) * 32 + i;
  89. if (start >= size) {
  90. hfs_dbg(BITMAP, "bitmap full\n");
  91. goto out;
  92. }
  93. /* do any partial u32 at the start */
  94. len = min(size - start, len);
  95. while (1) {
  96. n |= mask;
  97. if (++i >= 32)
  98. break;
  99. mask >>= 1;
  100. if (!--len || n & mask)
  101. goto done;
  102. }
  103. if (!--len)
  104. goto done;
  105. *curr++ = cpu_to_be32(n);
  106. /* do full u32s */
  107. while (1) {
  108. while (curr < end) {
  109. n = be32_to_cpu(*curr);
  110. if (len < 32)
  111. goto last;
  112. if (n) {
  113. len = 32;
  114. goto last;
  115. }
  116. *curr++ = cpu_to_be32(0xffffffff);
  117. len -= 32;
  118. }
  119. set_page_dirty(page);
  120. kunmap(page);
  121. offset += PAGE_CACHE_BITS;
  122. page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
  123. NULL);
  124. if (IS_ERR(page)) {
  125. start = size;
  126. goto out;
  127. }
  128. pptr = kmap(page);
  129. curr = pptr;
  130. end = pptr + PAGE_CACHE_BITS / 32;
  131. }
  132. last:
  133. /* do any partial u32 at end */
  134. mask = 1U << 31;
  135. for (i = 0; i < len; i++) {
  136. if (n & mask)
  137. break;
  138. n |= mask;
  139. mask >>= 1;
  140. }
  141. done:
  142. *curr = cpu_to_be32(n);
  143. set_page_dirty(page);
  144. kunmap(page);
  145. *max = offset + (curr - pptr) * 32 + i - start;
  146. sbi->free_blocks -= *max;
  147. hfsplus_mark_mdb_dirty(sb);
  148. hfs_dbg(BITMAP, "-> %u,%u\n", start, *max);
  149. out:
  150. mutex_unlock(&sbi->alloc_mutex);
  151. return start;
  152. }
  153. int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
  154. {
  155. struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
  156. struct page *page;
  157. struct address_space *mapping;
  158. __be32 *pptr, *curr, *end;
  159. u32 mask, len, pnr;
  160. int i;
  161. /* is there any actual work to be done? */
  162. if (!count)
  163. return 0;
  164. hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count);
  165. /* are all of the bits in range? */
  166. if ((offset + count) > sbi->total_blocks)
  167. return -ENOENT;
  168. mutex_lock(&sbi->alloc_mutex);
  169. mapping = sbi->alloc_file->i_mapping;
  170. pnr = offset / PAGE_CACHE_BITS;
  171. page = read_mapping_page(mapping, pnr, NULL);
  172. if (IS_ERR(page))
  173. goto kaboom;
  174. pptr = kmap(page);
  175. curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
  176. end = pptr + PAGE_CACHE_BITS / 32;
  177. len = count;
  178. /* do any partial u32 at the start */
  179. i = offset % 32;
  180. if (i) {
  181. int j = 32 - i;
  182. mask = 0xffffffffU << j;
  183. if (j > count) {
  184. mask |= 0xffffffffU >> (i + count);
  185. *curr++ &= cpu_to_be32(mask);
  186. goto out;
  187. }
  188. *curr++ &= cpu_to_be32(mask);
  189. count -= j;
  190. }
  191. /* do full u32s */
  192. while (1) {
  193. while (curr < end) {
  194. if (count < 32)
  195. goto done;
  196. *curr++ = 0;
  197. count -= 32;
  198. }
  199. if (!count)
  200. break;
  201. set_page_dirty(page);
  202. kunmap(page);
  203. page = read_mapping_page(mapping, ++pnr, NULL);
  204. if (IS_ERR(page))
  205. goto kaboom;
  206. pptr = kmap(page);
  207. curr = pptr;
  208. end = pptr + PAGE_CACHE_BITS / 32;
  209. }
  210. done:
  211. /* do any partial u32 at end */
  212. if (count) {
  213. mask = 0xffffffffU >> count;
  214. *curr &= cpu_to_be32(mask);
  215. }
  216. out:
  217. set_page_dirty(page);
  218. kunmap(page);
  219. sbi->free_blocks += len;
  220. hfsplus_mark_mdb_dirty(sb);
  221. mutex_unlock(&sbi->alloc_mutex);
  222. return 0;
  223. kaboom:
  224. pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page));
  225. mutex_unlock(&sbi->alloc_mutex);
  226. return -EIO;
  227. }