writeback.h 2.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. #ifndef _BCACHE_WRITEBACK_H
  2. #define _BCACHE_WRITEBACK_H
  3. #define CUTOFF_WRITEBACK 40
  4. #define CUTOFF_WRITEBACK_SYNC 70
  5. static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
  6. {
  7. uint64_t i, ret = 0;
  8. for (i = 0; i < d->nr_stripes; i++)
  9. ret += atomic_read(d->stripe_sectors_dirty + i);
  10. return ret;
  11. }
  12. static inline unsigned offset_to_stripe(struct bcache_device *d,
  13. uint64_t offset)
  14. {
  15. do_div(offset, d->stripe_size);
  16. return offset;
  17. }
  18. static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
  19. uint64_t offset,
  20. unsigned nr_sectors)
  21. {
  22. unsigned stripe = offset_to_stripe(&dc->disk, offset);
  23. while (1) {
  24. if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
  25. return true;
  26. if (nr_sectors <= dc->disk.stripe_size)
  27. return false;
  28. nr_sectors -= dc->disk.stripe_size;
  29. stripe++;
  30. }
  31. }
  32. static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
  33. unsigned cache_mode, bool would_skip)
  34. {
  35. unsigned in_use = dc->disk.c->gc_stats.in_use;
  36. if (cache_mode != CACHE_MODE_WRITEBACK ||
  37. test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
  38. in_use > CUTOFF_WRITEBACK_SYNC)
  39. return false;
  40. if (dc->partial_stripes_expensive &&
  41. bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
  42. bio_sectors(bio)))
  43. return true;
  44. if (would_skip)
  45. return false;
  46. return bio->bi_rw & REQ_SYNC ||
  47. in_use <= CUTOFF_WRITEBACK;
  48. }
  49. static inline void bch_writeback_queue(struct cached_dev *dc)
  50. {
  51. wake_up_process(dc->writeback_thread);
  52. }
  53. static inline void bch_writeback_add(struct cached_dev *dc)
  54. {
  55. if (!atomic_read(&dc->has_dirty) &&
  56. !atomic_xchg(&dc->has_dirty, 1)) {
  57. atomic_inc(&dc->count);
  58. if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
  59. SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
  60. /* XXX: should do this synchronously */
  61. bch_write_bdev_super(dc, NULL);
  62. }
  63. bch_writeback_queue(dc);
  64. }
  65. }
  66. void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
  67. void bch_sectors_dirty_init(struct cached_dev *dc);
  68. void bch_cached_dev_writeback_init(struct cached_dev *);
  69. int bch_cached_dev_writeback_start(struct cached_dev *);
  70. #endif