dm-zoned-reclaim.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589
  1. /*
  2. * Copyright (C) 2017 Western Digital Corporation or its affiliates.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm-zoned.h"
  7. #include <linux/module.h>
  8. #define DM_MSG_PREFIX "zoned reclaim"
  9. struct dmz_reclaim {
  10. struct dmz_metadata *metadata;
  11. struct dmz_dev *dev;
  12. struct delayed_work work;
  13. struct workqueue_struct *wq;
  14. struct dm_kcopyd_client *kc;
  15. struct dm_kcopyd_throttle kc_throttle;
  16. int kc_err;
  17. unsigned long flags;
  18. /* Last target access time */
  19. unsigned long atime;
  20. };
  21. /*
  22. * Reclaim state flags.
  23. */
  24. enum {
  25. DMZ_RECLAIM_KCOPY,
  26. };
  27. /*
  28. * Number of seconds of target BIO inactivity to consider the target idle.
  29. */
  30. #define DMZ_IDLE_PERIOD (10UL * HZ)
  31. /*
  32. * Percentage of unmapped (free) random zones below which reclaim starts
  33. * even if the target is busy.
  34. */
  35. #define DMZ_RECLAIM_LOW_UNMAP_RND 30
  36. /*
  37. * Percentage of unmapped (free) random zones above which reclaim will
  38. * stop if the target is busy.
  39. */
  40. #define DMZ_RECLAIM_HIGH_UNMAP_RND 50
  41. /*
  42. * Align a sequential zone write pointer to chunk_block.
  43. */
  44. static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
  45. sector_t block)
  46. {
  47. struct dmz_metadata *zmd = zrc->metadata;
  48. sector_t wp_block = zone->wp_block;
  49. unsigned int nr_blocks;
  50. int ret;
  51. if (wp_block == block)
  52. return 0;
  53. if (wp_block > block)
  54. return -EIO;
  55. /*
  56. * Zeroout the space between the write
  57. * pointer and the requested position.
  58. */
  59. nr_blocks = block - wp_block;
  60. ret = blkdev_issue_zeroout(zrc->dev->bdev,
  61. dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
  62. dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
  63. if (ret) {
  64. dmz_dev_err(zrc->dev,
  65. "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
  66. dmz_id(zmd, zone), (unsigned long long)wp_block,
  67. (unsigned long long)block, nr_blocks, ret);
  68. dmz_check_bdev(zrc->dev);
  69. return ret;
  70. }
  71. zone->wp_block = block;
  72. return 0;
  73. }
  74. /*
  75. * dm_kcopyd_copy end notification.
  76. */
  77. static void dmz_reclaim_kcopy_end(int read_err, unsigned long write_err,
  78. void *context)
  79. {
  80. struct dmz_reclaim *zrc = context;
  81. if (read_err || write_err)
  82. zrc->kc_err = -EIO;
  83. else
  84. zrc->kc_err = 0;
  85. clear_bit_unlock(DMZ_RECLAIM_KCOPY, &zrc->flags);
  86. smp_mb__after_atomic();
  87. wake_up_bit(&zrc->flags, DMZ_RECLAIM_KCOPY);
  88. }
  89. /*
  90. * Copy valid blocks of src_zone into dst_zone.
  91. */
  92. static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
  93. struct dm_zone *src_zone, struct dm_zone *dst_zone)
  94. {
  95. struct dmz_metadata *zmd = zrc->metadata;
  96. struct dmz_dev *dev = zrc->dev;
  97. struct dm_io_region src, dst;
  98. sector_t block = 0, end_block;
  99. sector_t nr_blocks;
  100. sector_t src_zone_block;
  101. sector_t dst_zone_block;
  102. unsigned long flags = 0;
  103. int ret;
  104. if (dmz_is_seq(src_zone))
  105. end_block = src_zone->wp_block;
  106. else
  107. end_block = dev->zone_nr_blocks;
  108. src_zone_block = dmz_start_block(zmd, src_zone);
  109. dst_zone_block = dmz_start_block(zmd, dst_zone);
  110. if (dmz_is_seq(dst_zone))
  111. set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
  112. while (block < end_block) {
  113. if (dev->flags & DMZ_BDEV_DYING)
  114. return -EIO;
  115. /* Get a valid region from the source zone */
  116. ret = dmz_first_valid_block(zmd, src_zone, &block);
  117. if (ret <= 0)
  118. return ret;
  119. nr_blocks = ret;
  120. /*
  121. * If we are writing in a sequential zone, we must make sure
  122. * that writes are sequential. So Zeroout any eventual hole
  123. * between writes.
  124. */
  125. if (dmz_is_seq(dst_zone)) {
  126. ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
  127. if (ret)
  128. return ret;
  129. }
  130. src.bdev = dev->bdev;
  131. src.sector = dmz_blk2sect(src_zone_block + block);
  132. src.count = dmz_blk2sect(nr_blocks);
  133. dst.bdev = dev->bdev;
  134. dst.sector = dmz_blk2sect(dst_zone_block + block);
  135. dst.count = src.count;
  136. /* Copy the valid region */
  137. set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
  138. dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
  139. dmz_reclaim_kcopy_end, zrc);
  140. /* Wait for copy to complete */
  141. wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
  142. TASK_UNINTERRUPTIBLE);
  143. if (zrc->kc_err)
  144. return zrc->kc_err;
  145. block += nr_blocks;
  146. if (dmz_is_seq(dst_zone))
  147. dst_zone->wp_block = block;
  148. }
  149. return 0;
  150. }
  151. /*
  152. * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
  153. * and free the buffer zone.
  154. */
  155. static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  156. {
  157. struct dm_zone *bzone = dzone->bzone;
  158. sector_t chunk_block = dzone->wp_block;
  159. struct dmz_metadata *zmd = zrc->metadata;
  160. int ret;
  161. dmz_dev_debug(zrc->dev,
  162. "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
  163. dzone->chunk, dmz_id(zmd, bzone), dmz_weight(bzone),
  164. dmz_id(zmd, dzone), dmz_weight(dzone));
  165. /* Flush data zone into the buffer zone */
  166. ret = dmz_reclaim_copy(zrc, bzone, dzone);
  167. if (ret < 0)
  168. return ret;
  169. dmz_lock_flush(zmd);
  170. /* Validate copied blocks */
  171. ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
  172. if (ret == 0) {
  173. /* Free the buffer zone */
  174. dmz_invalidate_blocks(zmd, bzone, 0, zrc->dev->zone_nr_blocks);
  175. dmz_lock_map(zmd);
  176. dmz_unmap_zone(zmd, bzone);
  177. dmz_unlock_zone_reclaim(dzone);
  178. dmz_free_zone(zmd, bzone);
  179. dmz_unlock_map(zmd);
  180. }
  181. dmz_unlock_flush(zmd);
  182. return ret;
  183. }
  184. /*
  185. * Merge valid blocks of dzone into its buffer zone and free dzone.
  186. */
  187. static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  188. {
  189. unsigned int chunk = dzone->chunk;
  190. struct dm_zone *bzone = dzone->bzone;
  191. struct dmz_metadata *zmd = zrc->metadata;
  192. int ret = 0;
  193. dmz_dev_debug(zrc->dev,
  194. "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
  195. chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
  196. dmz_id(zmd, bzone), dmz_weight(bzone));
  197. /* Flush data zone into the buffer zone */
  198. ret = dmz_reclaim_copy(zrc, dzone, bzone);
  199. if (ret < 0)
  200. return ret;
  201. dmz_lock_flush(zmd);
  202. /* Validate copied blocks */
  203. ret = dmz_merge_valid_blocks(zmd, dzone, bzone, 0);
  204. if (ret == 0) {
  205. /*
  206. * Free the data zone and remap the chunk to
  207. * the buffer zone.
  208. */
  209. dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
  210. dmz_lock_map(zmd);
  211. dmz_unmap_zone(zmd, bzone);
  212. dmz_unmap_zone(zmd, dzone);
  213. dmz_unlock_zone_reclaim(dzone);
  214. dmz_free_zone(zmd, dzone);
  215. dmz_map_zone(zmd, bzone, chunk);
  216. dmz_unlock_map(zmd);
  217. }
  218. dmz_unlock_flush(zmd);
  219. return ret;
  220. }
  221. /*
  222. * Move valid blocks of the random data zone dzone into a free sequential zone.
  223. * Once blocks are moved, remap the zone chunk to the sequential zone.
  224. */
  225. static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  226. {
  227. unsigned int chunk = dzone->chunk;
  228. struct dm_zone *szone = NULL;
  229. struct dmz_metadata *zmd = zrc->metadata;
  230. int ret;
  231. /* Get a free sequential zone */
  232. dmz_lock_map(zmd);
  233. szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM);
  234. dmz_unlock_map(zmd);
  235. if (!szone)
  236. return -ENOSPC;
  237. dmz_dev_debug(zrc->dev,
  238. "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
  239. chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
  240. dmz_id(zmd, szone));
  241. /* Flush the random data zone into the sequential zone */
  242. ret = dmz_reclaim_copy(zrc, dzone, szone);
  243. dmz_lock_flush(zmd);
  244. if (ret == 0) {
  245. /* Validate copied blocks */
  246. ret = dmz_copy_valid_blocks(zmd, dzone, szone);
  247. }
  248. if (ret) {
  249. /* Free the sequential zone */
  250. dmz_lock_map(zmd);
  251. dmz_free_zone(zmd, szone);
  252. dmz_unlock_map(zmd);
  253. } else {
  254. /* Free the data zone and remap the chunk */
  255. dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
  256. dmz_lock_map(zmd);
  257. dmz_unmap_zone(zmd, dzone);
  258. dmz_unlock_zone_reclaim(dzone);
  259. dmz_free_zone(zmd, dzone);
  260. dmz_map_zone(zmd, szone, chunk);
  261. dmz_unlock_map(zmd);
  262. }
  263. dmz_unlock_flush(zmd);
  264. return ret;
  265. }
  266. /*
  267. * Reclaim an empty zone.
  268. */
  269. static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  270. {
  271. struct dmz_metadata *zmd = zrc->metadata;
  272. dmz_lock_flush(zmd);
  273. dmz_lock_map(zmd);
  274. dmz_unmap_zone(zmd, dzone);
  275. dmz_unlock_zone_reclaim(dzone);
  276. dmz_free_zone(zmd, dzone);
  277. dmz_unlock_map(zmd);
  278. dmz_unlock_flush(zmd);
  279. }
  280. /*
  281. * Find a candidate zone for reclaim and process it.
  282. */
  283. static int dmz_do_reclaim(struct dmz_reclaim *zrc)
  284. {
  285. struct dmz_metadata *zmd = zrc->metadata;
  286. struct dm_zone *dzone;
  287. struct dm_zone *rzone;
  288. unsigned long start;
  289. int ret;
  290. /* Get a data zone */
  291. dzone = dmz_get_zone_for_reclaim(zmd);
  292. if (IS_ERR(dzone))
  293. return PTR_ERR(dzone);
  294. start = jiffies;
  295. if (dmz_is_rnd(dzone)) {
  296. if (!dmz_weight(dzone)) {
  297. /* Empty zone */
  298. dmz_reclaim_empty(zrc, dzone);
  299. ret = 0;
  300. } else {
  301. /*
  302. * Reclaim the random data zone by moving its
  303. * valid data blocks to a free sequential zone.
  304. */
  305. ret = dmz_reclaim_rnd_data(zrc, dzone);
  306. }
  307. rzone = dzone;
  308. } else {
  309. struct dm_zone *bzone = dzone->bzone;
  310. sector_t chunk_block = 0;
  311. ret = dmz_first_valid_block(zmd, bzone, &chunk_block);
  312. if (ret < 0)
  313. goto out;
  314. if (ret == 0 || chunk_block >= dzone->wp_block) {
  315. /*
  316. * The buffer zone is empty or its valid blocks are
  317. * after the data zone write pointer.
  318. */
  319. ret = dmz_reclaim_buf(zrc, dzone);
  320. rzone = bzone;
  321. } else {
  322. /*
  323. * Reclaim the data zone by merging it into the
  324. * buffer zone so that the buffer zone itself can
  325. * be later reclaimed.
  326. */
  327. ret = dmz_reclaim_seq_data(zrc, dzone);
  328. rzone = dzone;
  329. }
  330. }
  331. out:
  332. if (ret) {
  333. dmz_unlock_zone_reclaim(dzone);
  334. return ret;
  335. }
  336. ret = dmz_flush_metadata(zrc->metadata);
  337. if (ret) {
  338. dmz_dev_debug(zrc->dev,
  339. "Metadata flush for zone %u failed, err %d\n",
  340. dmz_id(zmd, rzone), ret);
  341. return ret;
  342. }
  343. dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
  344. dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
  345. return 0;
  346. }
  347. /*
  348. * Test if the target device is idle.
  349. */
  350. static inline int dmz_target_idle(struct dmz_reclaim *zrc)
  351. {
  352. return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
  353. }
  354. /*
  355. * Test if reclaim is necessary.
  356. */
  357. static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
  358. {
  359. struct dmz_metadata *zmd = zrc->metadata;
  360. unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
  361. unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
  362. unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
  363. /* Reclaim when idle */
  364. if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd)
  365. return true;
  366. /* If there are still plenty of random zones, do not reclaim */
  367. if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND)
  368. return false;
  369. /*
  370. * If the percentage of unmappped random zones is low,
  371. * reclaim even if the target is busy.
  372. */
  373. return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
  374. }
  375. /*
  376. * Reclaim work function.
  377. */
  378. static void dmz_reclaim_work(struct work_struct *work)
  379. {
  380. struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
  381. struct dmz_metadata *zmd = zrc->metadata;
  382. unsigned int nr_rnd, nr_unmap_rnd;
  383. unsigned int p_unmap_rnd;
  384. int ret;
  385. if (dmz_bdev_is_dying(zrc->dev))
  386. return;
  387. if (!dmz_should_reclaim(zrc)) {
  388. mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
  389. return;
  390. }
  391. /*
  392. * We need to start reclaiming random zones: set up zone copy
  393. * throttling to either go fast if we are very low on random zones
  394. * and slower if there are still some free random zones to avoid
  395. * as much as possible to negatively impact the user workload.
  396. */
  397. nr_rnd = dmz_nr_rnd_zones(zmd);
  398. nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
  399. p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
  400. if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
  401. /* Idle or very low percentage: go fast */
  402. zrc->kc_throttle.throttle = 100;
  403. } else {
  404. /* Busy but we still have some random zone: throttle */
  405. zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2);
  406. }
  407. dmz_dev_debug(zrc->dev,
  408. "Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
  409. zrc->kc_throttle.throttle,
  410. (dmz_target_idle(zrc) ? "Idle" : "Busy"),
  411. p_unmap_rnd, nr_unmap_rnd, nr_rnd);
  412. ret = dmz_do_reclaim(zrc);
  413. if (ret) {
  414. dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
  415. if (!dmz_check_bdev(zrc->dev))
  416. return;
  417. }
  418. dmz_schedule_reclaim(zrc);
  419. }
  420. /*
  421. * Initialize reclaim.
  422. */
  423. int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
  424. struct dmz_reclaim **reclaim)
  425. {
  426. struct dmz_reclaim *zrc;
  427. int ret;
  428. zrc = kzalloc(sizeof(struct dmz_reclaim), GFP_KERNEL);
  429. if (!zrc)
  430. return -ENOMEM;
  431. zrc->dev = dev;
  432. zrc->metadata = zmd;
  433. zrc->atime = jiffies;
  434. /* Reclaim kcopyd client */
  435. zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
  436. if (IS_ERR(zrc->kc)) {
  437. ret = PTR_ERR(zrc->kc);
  438. zrc->kc = NULL;
  439. goto err;
  440. }
  441. /* Reclaim work */
  442. INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
  443. zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
  444. dev->name);
  445. if (!zrc->wq) {
  446. ret = -ENOMEM;
  447. goto err;
  448. }
  449. *reclaim = zrc;
  450. queue_delayed_work(zrc->wq, &zrc->work, 0);
  451. return 0;
  452. err:
  453. if (zrc->kc)
  454. dm_kcopyd_client_destroy(zrc->kc);
  455. kfree(zrc);
  456. return ret;
  457. }
  458. /*
  459. * Terminate reclaim.
  460. */
  461. void dmz_dtr_reclaim(struct dmz_reclaim *zrc)
  462. {
  463. cancel_delayed_work_sync(&zrc->work);
  464. destroy_workqueue(zrc->wq);
  465. dm_kcopyd_client_destroy(zrc->kc);
  466. kfree(zrc);
  467. }
  468. /*
  469. * Suspend reclaim.
  470. */
  471. void dmz_suspend_reclaim(struct dmz_reclaim *zrc)
  472. {
  473. cancel_delayed_work_sync(&zrc->work);
  474. }
  475. /*
  476. * Resume reclaim.
  477. */
  478. void dmz_resume_reclaim(struct dmz_reclaim *zrc)
  479. {
  480. queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
  481. }
  482. /*
  483. * BIO accounting.
  484. */
  485. void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
  486. {
  487. zrc->atime = jiffies;
  488. }
  489. /*
  490. * Start reclaim if necessary.
  491. */
  492. void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
  493. {
  494. if (dmz_should_reclaim(zrc))
  495. mod_delayed_work(zrc->wq, &zrc->work, 0);
  496. }