buffer.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /*
  2. * linux/fs/hpfs/buffer.c
  3. *
  4. * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
  5. *
  6. * general buffer i/o
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/slab.h>
  10. #include <linux/blkdev.h>
  11. #include "hpfs_fn.h"
  12. secno hpfs_search_hotfix_map(struct super_block *s, secno sec)
  13. {
  14. unsigned i;
  15. struct hpfs_sb_info *sbi = hpfs_sb(s);
  16. for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
  17. if (sbi->hotfix_from[i] == sec) {
  18. return sbi->hotfix_to[i];
  19. }
  20. }
  21. return sec;
  22. }
  23. unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n)
  24. {
  25. unsigned i;
  26. struct hpfs_sb_info *sbi = hpfs_sb(s);
  27. for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
  28. if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) {
  29. n = sbi->hotfix_from[i] - sec;
  30. }
  31. }
  32. return n;
  33. }
  34. void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
  35. {
  36. struct buffer_head *bh;
  37. struct blk_plug plug;
  38. if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size))
  39. return;
  40. if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n))
  41. return;
  42. bh = sb_find_get_block(s, secno);
  43. if (bh) {
  44. if (buffer_uptodate(bh)) {
  45. brelse(bh);
  46. return;
  47. }
  48. brelse(bh);
  49. };
  50. blk_start_plug(&plug);
  51. while (n > 0) {
  52. if (unlikely(secno >= hpfs_sb(s)->sb_fs_size))
  53. break;
  54. sb_breadahead(s, secno);
  55. secno++;
  56. n--;
  57. }
  58. blk_finish_plug(&plug);
  59. }
  60. /* Map a sector into a buffer and return pointers to it and to the buffer. */
  61. void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
  62. int ahead)
  63. {
  64. struct buffer_head *bh;
  65. hpfs_lock_assert(s);
  66. hpfs_prefetch_sectors(s, secno, ahead);
  67. cond_resched();
  68. *bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno));
  69. if (bh != NULL)
  70. return bh->b_data;
  71. else {
  72. pr_err("%s(): read error\n", __func__);
  73. return NULL;
  74. }
  75. }
  76. /* Like hpfs_map_sector but don't read anything */
  77. void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
  78. {
  79. struct buffer_head *bh;
  80. /*return hpfs_map_sector(s, secno, bhp, 0);*/
  81. hpfs_lock_assert(s);
  82. cond_resched();
  83. if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) {
  84. if (!buffer_uptodate(bh)) wait_on_buffer(bh);
  85. set_buffer_uptodate(bh);
  86. return bh->b_data;
  87. } else {
  88. pr_err("%s(): getblk failed\n", __func__);
  89. return NULL;
  90. }
  91. }
  92. /* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */
  93. void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
  94. int ahead)
  95. {
  96. char *data;
  97. hpfs_lock_assert(s);
  98. cond_resched();
  99. if (secno & 3) {
  100. pr_err("%s(): unaligned read\n", __func__);
  101. return NULL;
  102. }
  103. hpfs_prefetch_sectors(s, secno, 4 + ahead);
  104. if (!hpfs_map_sector(s, secno + 0, &qbh->bh[0], 0)) goto bail0;
  105. if (!hpfs_map_sector(s, secno + 1, &qbh->bh[1], 0)) goto bail1;
  106. if (!hpfs_map_sector(s, secno + 2, &qbh->bh[2], 0)) goto bail2;
  107. if (!hpfs_map_sector(s, secno + 3, &qbh->bh[3], 0)) goto bail3;
  108. if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
  109. likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
  110. likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
  111. return qbh->data = qbh->bh[0]->b_data;
  112. }
  113. qbh->data = data = kmalloc(2048, GFP_NOFS);
  114. if (!data) {
  115. pr_err("%s(): out of memory\n", __func__);
  116. goto bail4;
  117. }
  118. memcpy(data + 0 * 512, qbh->bh[0]->b_data, 512);
  119. memcpy(data + 1 * 512, qbh->bh[1]->b_data, 512);
  120. memcpy(data + 2 * 512, qbh->bh[2]->b_data, 512);
  121. memcpy(data + 3 * 512, qbh->bh[3]->b_data, 512);
  122. return data;
  123. bail4:
  124. brelse(qbh->bh[3]);
  125. bail3:
  126. brelse(qbh->bh[2]);
  127. bail2:
  128. brelse(qbh->bh[1]);
  129. bail1:
  130. brelse(qbh->bh[0]);
  131. bail0:
  132. return NULL;
  133. }
  134. /* Don't read sectors */
  135. void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
  136. struct quad_buffer_head *qbh)
  137. {
  138. cond_resched();
  139. hpfs_lock_assert(s);
  140. if (secno & 3) {
  141. pr_err("%s(): unaligned read\n", __func__);
  142. return NULL;
  143. }
  144. if (!hpfs_get_sector(s, secno + 0, &qbh->bh[0])) goto bail0;
  145. if (!hpfs_get_sector(s, secno + 1, &qbh->bh[1])) goto bail1;
  146. if (!hpfs_get_sector(s, secno + 2, &qbh->bh[2])) goto bail2;
  147. if (!hpfs_get_sector(s, secno + 3, &qbh->bh[3])) goto bail3;
  148. if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
  149. likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
  150. likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
  151. return qbh->data = qbh->bh[0]->b_data;
  152. }
  153. if (!(qbh->data = kmalloc(2048, GFP_NOFS))) {
  154. pr_err("%s(): out of memory\n", __func__);
  155. goto bail4;
  156. }
  157. return qbh->data;
  158. bail4:
  159. brelse(qbh->bh[3]);
  160. bail3:
  161. brelse(qbh->bh[2]);
  162. bail2:
  163. brelse(qbh->bh[1]);
  164. bail1:
  165. brelse(qbh->bh[0]);
  166. bail0:
  167. return NULL;
  168. }
  169. void hpfs_brelse4(struct quad_buffer_head *qbh)
  170. {
  171. if (unlikely(qbh->data != qbh->bh[0]->b_data))
  172. kfree(qbh->data);
  173. brelse(qbh->bh[0]);
  174. brelse(qbh->bh[1]);
  175. brelse(qbh->bh[2]);
  176. brelse(qbh->bh[3]);
  177. }
  178. void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
  179. {
  180. if (unlikely(qbh->data != qbh->bh[0]->b_data)) {
  181. memcpy(qbh->bh[0]->b_data, qbh->data + 0 * 512, 512);
  182. memcpy(qbh->bh[1]->b_data, qbh->data + 1 * 512, 512);
  183. memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
  184. memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
  185. }
  186. mark_buffer_dirty(qbh->bh[0]);
  187. mark_buffer_dirty(qbh->bh[1]);
  188. mark_buffer_dirty(qbh->bh[2]);
  189. mark_buffer_dirty(qbh->bh[3]);
  190. }