writeback.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _BCACHE_WRITEBACK_H
  3. #define _BCACHE_WRITEBACK_H
  4. #define CUTOFF_WRITEBACK 40
  5. #define CUTOFF_WRITEBACK_SYNC 70
  6. #define CUTOFF_WRITEBACK_MAX 70
  7. #define CUTOFF_WRITEBACK_SYNC_MAX 90
  8. #define MAX_WRITEBACKS_IN_PASS 5
  9. #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
  10. #define WRITEBACK_RATE_UPDATE_SECS_MAX 60
  11. #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5
  12. #define BCH_AUTO_GC_DIRTY_THRESHOLD 50
  13. /*
  14. * 14 (16384ths) is chosen here as something that each backing device
  15. * should be a reasonable fraction of the share, and not to blow up
  16. * until individual backing devices are a petabyte.
  17. */
  18. #define WRITEBACK_SHARE_SHIFT 14
  19. static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
  20. {
  21. uint64_t i, ret = 0;
  22. for (i = 0; i < d->nr_stripes; i++)
  23. ret += atomic_read(d->stripe_sectors_dirty + i);
  24. return ret;
  25. }
  26. static inline int offset_to_stripe(struct bcache_device *d,
  27. uint64_t offset)
  28. {
  29. do_div(offset, d->stripe_size);
  30. /* d->nr_stripes is in range [1, INT_MAX] */
  31. if (unlikely(offset >= d->nr_stripes)) {
  32. pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
  33. offset, d->nr_stripes);
  34. return -EINVAL;
  35. }
  36. /*
  37. * Here offset is definitly smaller than INT_MAX,
  38. * return it as int will never overflow.
  39. */
  40. return offset;
  41. }
  42. static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
  43. uint64_t offset,
  44. unsigned int nr_sectors)
  45. {
  46. int stripe = offset_to_stripe(&dc->disk, offset);
  47. if (stripe < 0)
  48. return false;
  49. while (1) {
  50. if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
  51. return true;
  52. if (nr_sectors <= dc->disk.stripe_size)
  53. return false;
  54. nr_sectors -= dc->disk.stripe_size;
  55. stripe++;
  56. }
  57. }
  58. extern unsigned int bch_cutoff_writeback;
  59. extern unsigned int bch_cutoff_writeback_sync;
  60. static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
  61. unsigned int cache_mode, bool would_skip)
  62. {
  63. unsigned int in_use = dc->disk.c->gc_stats.in_use;
  64. if (cache_mode != CACHE_MODE_WRITEBACK ||
  65. test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
  66. in_use > bch_cutoff_writeback_sync)
  67. return false;
  68. if (bio_op(bio) == REQ_OP_DISCARD)
  69. return false;
  70. if (dc->partial_stripes_expensive &&
  71. bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
  72. bio_sectors(bio)))
  73. return true;
  74. if (would_skip)
  75. return false;
  76. return (op_is_sync(bio->bi_opf) ||
  77. bio->bi_opf & (REQ_META|REQ_PRIO) ||
  78. in_use <= bch_cutoff_writeback);
  79. }
  80. static inline void bch_writeback_queue(struct cached_dev *dc)
  81. {
  82. if (!IS_ERR_OR_NULL(dc->writeback_thread))
  83. wake_up_process(dc->writeback_thread);
  84. }
  85. static inline void bch_writeback_add(struct cached_dev *dc)
  86. {
  87. if (!atomic_read(&dc->has_dirty) &&
  88. !atomic_xchg(&dc->has_dirty, 1)) {
  89. if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
  90. SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
  91. /* XXX: should do this synchronously */
  92. bch_write_bdev_super(dc, NULL);
  93. }
  94. bch_writeback_queue(dc);
  95. }
  96. }
  97. void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
  98. uint64_t offset, int nr_sectors);
  99. void bch_sectors_dirty_init(struct bcache_device *d);
  100. void bch_cached_dev_writeback_init(struct cached_dev *dc);
  101. int bch_cached_dev_writeback_start(struct cached_dev *dc);
  102. #endif