dm-zoned-reclaim.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2017 Western Digital Corporation or its affiliates.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm-zoned.h"
  8. #include <linux/module.h>
  9. #define DM_MSG_PREFIX "zoned reclaim"
  10. struct dmz_reclaim {
  11. struct dmz_metadata *metadata;
  12. struct dmz_dev *dev;
  13. struct delayed_work work;
  14. struct workqueue_struct *wq;
  15. struct dm_kcopyd_client *kc;
  16. struct dm_kcopyd_throttle kc_throttle;
  17. int kc_err;
  18. unsigned long flags;
  19. /* Last target access time */
  20. unsigned long atime;
  21. };
  22. /*
  23. * Reclaim state flags.
  24. */
  25. enum {
  26. DMZ_RECLAIM_KCOPY,
  27. };
  28. /*
  29. * Number of seconds of target BIO inactivity to consider the target idle.
  30. */
  31. #define DMZ_IDLE_PERIOD (10UL * HZ)
  32. /*
  33. * Percentage of unmapped (free) random zones below which reclaim starts
  34. * even if the target is busy.
  35. */
  36. #define DMZ_RECLAIM_LOW_UNMAP_RND 30
  37. /*
  38. * Percentage of unmapped (free) random zones above which reclaim will
  39. * stop if the target is busy.
  40. */
  41. #define DMZ_RECLAIM_HIGH_UNMAP_RND 50
  42. /*
  43. * Align a sequential zone write pointer to chunk_block.
  44. */
  45. static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
  46. sector_t block)
  47. {
  48. struct dmz_metadata *zmd = zrc->metadata;
  49. sector_t wp_block = zone->wp_block;
  50. unsigned int nr_blocks;
  51. int ret;
  52. if (wp_block == block)
  53. return 0;
  54. if (wp_block > block)
  55. return -EIO;
  56. /*
  57. * Zeroout the space between the write
  58. * pointer and the requested position.
  59. */
  60. nr_blocks = block - wp_block;
  61. ret = blkdev_issue_zeroout(zrc->dev->bdev,
  62. dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
  63. dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
  64. if (ret) {
  65. dmz_dev_err(zrc->dev,
  66. "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
  67. dmz_id(zmd, zone), (unsigned long long)wp_block,
  68. (unsigned long long)block, nr_blocks, ret);
  69. dmz_check_bdev(zrc->dev);
  70. return ret;
  71. }
  72. zone->wp_block = block;
  73. return 0;
  74. }
  75. /*
  76. * dm_kcopyd_copy end notification.
  77. */
  78. static void dmz_reclaim_kcopy_end(int read_err, unsigned long write_err,
  79. void *context)
  80. {
  81. struct dmz_reclaim *zrc = context;
  82. if (read_err || write_err)
  83. zrc->kc_err = -EIO;
  84. else
  85. zrc->kc_err = 0;
  86. clear_bit_unlock(DMZ_RECLAIM_KCOPY, &zrc->flags);
  87. smp_mb__after_atomic();
  88. wake_up_bit(&zrc->flags, DMZ_RECLAIM_KCOPY);
  89. }
  90. /*
  91. * Copy valid blocks of src_zone into dst_zone.
  92. */
  93. static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
  94. struct dm_zone *src_zone, struct dm_zone *dst_zone)
  95. {
  96. struct dmz_metadata *zmd = zrc->metadata;
  97. struct dmz_dev *dev = zrc->dev;
  98. struct dm_io_region src, dst;
  99. sector_t block = 0, end_block;
  100. sector_t nr_blocks;
  101. sector_t src_zone_block;
  102. sector_t dst_zone_block;
  103. unsigned long flags = 0;
  104. int ret;
  105. if (dmz_is_seq(src_zone))
  106. end_block = src_zone->wp_block;
  107. else
  108. end_block = dev->zone_nr_blocks;
  109. src_zone_block = dmz_start_block(zmd, src_zone);
  110. dst_zone_block = dmz_start_block(zmd, dst_zone);
  111. if (dmz_is_seq(dst_zone))
  112. set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
  113. while (block < end_block) {
  114. if (dev->flags & DMZ_BDEV_DYING)
  115. return -EIO;
  116. /* Get a valid region from the source zone */
  117. ret = dmz_first_valid_block(zmd, src_zone, &block);
  118. if (ret <= 0)
  119. return ret;
  120. nr_blocks = ret;
  121. /*
  122. * If we are writing in a sequential zone, we must make sure
  123. * that writes are sequential. So Zeroout any eventual hole
  124. * between writes.
  125. */
  126. if (dmz_is_seq(dst_zone)) {
  127. ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
  128. if (ret)
  129. return ret;
  130. }
  131. src.bdev = dev->bdev;
  132. src.sector = dmz_blk2sect(src_zone_block + block);
  133. src.count = dmz_blk2sect(nr_blocks);
  134. dst.bdev = dev->bdev;
  135. dst.sector = dmz_blk2sect(dst_zone_block + block);
  136. dst.count = src.count;
  137. /* Copy the valid region */
  138. set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
  139. dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
  140. dmz_reclaim_kcopy_end, zrc);
  141. /* Wait for copy to complete */
  142. wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
  143. TASK_UNINTERRUPTIBLE);
  144. if (zrc->kc_err)
  145. return zrc->kc_err;
  146. block += nr_blocks;
  147. if (dmz_is_seq(dst_zone))
  148. dst_zone->wp_block = block;
  149. }
  150. return 0;
  151. }
  152. /*
  153. * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
  154. * and free the buffer zone.
  155. */
  156. static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  157. {
  158. struct dm_zone *bzone = dzone->bzone;
  159. sector_t chunk_block = dzone->wp_block;
  160. struct dmz_metadata *zmd = zrc->metadata;
  161. int ret;
  162. dmz_dev_debug(zrc->dev,
  163. "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
  164. dzone->chunk, dmz_id(zmd, bzone), dmz_weight(bzone),
  165. dmz_id(zmd, dzone), dmz_weight(dzone));
  166. /* Flush data zone into the buffer zone */
  167. ret = dmz_reclaim_copy(zrc, bzone, dzone);
  168. if (ret < 0)
  169. return ret;
  170. dmz_lock_flush(zmd);
  171. /* Validate copied blocks */
  172. ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
  173. if (ret == 0) {
  174. /* Free the buffer zone */
  175. dmz_invalidate_blocks(zmd, bzone, 0, zrc->dev->zone_nr_blocks);
  176. dmz_lock_map(zmd);
  177. dmz_unmap_zone(zmd, bzone);
  178. dmz_unlock_zone_reclaim(dzone);
  179. dmz_free_zone(zmd, bzone);
  180. dmz_unlock_map(zmd);
  181. }
  182. dmz_unlock_flush(zmd);
  183. return ret;
  184. }
  185. /*
  186. * Merge valid blocks of dzone into its buffer zone and free dzone.
  187. */
  188. static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  189. {
  190. unsigned int chunk = dzone->chunk;
  191. struct dm_zone *bzone = dzone->bzone;
  192. struct dmz_metadata *zmd = zrc->metadata;
  193. int ret = 0;
  194. dmz_dev_debug(zrc->dev,
  195. "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
  196. chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
  197. dmz_id(zmd, bzone), dmz_weight(bzone));
  198. /* Flush data zone into the buffer zone */
  199. ret = dmz_reclaim_copy(zrc, dzone, bzone);
  200. if (ret < 0)
  201. return ret;
  202. dmz_lock_flush(zmd);
  203. /* Validate copied blocks */
  204. ret = dmz_merge_valid_blocks(zmd, dzone, bzone, 0);
  205. if (ret == 0) {
  206. /*
  207. * Free the data zone and remap the chunk to
  208. * the buffer zone.
  209. */
  210. dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
  211. dmz_lock_map(zmd);
  212. dmz_unmap_zone(zmd, bzone);
  213. dmz_unmap_zone(zmd, dzone);
  214. dmz_unlock_zone_reclaim(dzone);
  215. dmz_free_zone(zmd, dzone);
  216. dmz_map_zone(zmd, bzone, chunk);
  217. dmz_unlock_map(zmd);
  218. }
  219. dmz_unlock_flush(zmd);
  220. return ret;
  221. }
  222. /*
  223. * Move valid blocks of the random data zone dzone into a free sequential zone.
  224. * Once blocks are moved, remap the zone chunk to the sequential zone.
  225. */
  226. static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  227. {
  228. unsigned int chunk = dzone->chunk;
  229. struct dm_zone *szone = NULL;
  230. struct dmz_metadata *zmd = zrc->metadata;
  231. int ret;
  232. /* Get a free sequential zone */
  233. dmz_lock_map(zmd);
  234. szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM);
  235. dmz_unlock_map(zmd);
  236. if (!szone)
  237. return -ENOSPC;
  238. dmz_dev_debug(zrc->dev,
  239. "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
  240. chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
  241. dmz_id(zmd, szone));
  242. /* Flush the random data zone into the sequential zone */
  243. ret = dmz_reclaim_copy(zrc, dzone, szone);
  244. dmz_lock_flush(zmd);
  245. if (ret == 0) {
  246. /* Validate copied blocks */
  247. ret = dmz_copy_valid_blocks(zmd, dzone, szone);
  248. }
  249. if (ret) {
  250. /* Free the sequential zone */
  251. dmz_lock_map(zmd);
  252. dmz_free_zone(zmd, szone);
  253. dmz_unlock_map(zmd);
  254. } else {
  255. /* Free the data zone and remap the chunk */
  256. dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
  257. dmz_lock_map(zmd);
  258. dmz_unmap_zone(zmd, dzone);
  259. dmz_unlock_zone_reclaim(dzone);
  260. dmz_free_zone(zmd, dzone);
  261. dmz_map_zone(zmd, szone, chunk);
  262. dmz_unlock_map(zmd);
  263. }
  264. dmz_unlock_flush(zmd);
  265. return ret;
  266. }
  267. /*
  268. * Reclaim an empty zone.
  269. */
  270. static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  271. {
  272. struct dmz_metadata *zmd = zrc->metadata;
  273. dmz_lock_flush(zmd);
  274. dmz_lock_map(zmd);
  275. dmz_unmap_zone(zmd, dzone);
  276. dmz_unlock_zone_reclaim(dzone);
  277. dmz_free_zone(zmd, dzone);
  278. dmz_unlock_map(zmd);
  279. dmz_unlock_flush(zmd);
  280. }
  281. /*
  282. * Find a candidate zone for reclaim and process it.
  283. */
  284. static int dmz_do_reclaim(struct dmz_reclaim *zrc)
  285. {
  286. struct dmz_metadata *zmd = zrc->metadata;
  287. struct dm_zone *dzone;
  288. struct dm_zone *rzone;
  289. unsigned long start;
  290. int ret;
  291. /* Get a data zone */
  292. dzone = dmz_get_zone_for_reclaim(zmd);
  293. if (!dzone)
  294. return -EBUSY;
  295. start = jiffies;
  296. if (dmz_is_rnd(dzone)) {
  297. if (!dmz_weight(dzone)) {
  298. /* Empty zone */
  299. dmz_reclaim_empty(zrc, dzone);
  300. ret = 0;
  301. } else {
  302. /*
  303. * Reclaim the random data zone by moving its
  304. * valid data blocks to a free sequential zone.
  305. */
  306. ret = dmz_reclaim_rnd_data(zrc, dzone);
  307. }
  308. rzone = dzone;
  309. } else {
  310. struct dm_zone *bzone = dzone->bzone;
  311. sector_t chunk_block = 0;
  312. ret = dmz_first_valid_block(zmd, bzone, &chunk_block);
  313. if (ret < 0)
  314. goto out;
  315. if (ret == 0 || chunk_block >= dzone->wp_block) {
  316. /*
  317. * The buffer zone is empty or its valid blocks are
  318. * after the data zone write pointer.
  319. */
  320. ret = dmz_reclaim_buf(zrc, dzone);
  321. rzone = bzone;
  322. } else {
  323. /*
  324. * Reclaim the data zone by merging it into the
  325. * buffer zone so that the buffer zone itself can
  326. * be later reclaimed.
  327. */
  328. ret = dmz_reclaim_seq_data(zrc, dzone);
  329. rzone = dzone;
  330. }
  331. }
  332. out:
  333. if (ret) {
  334. dmz_unlock_zone_reclaim(dzone);
  335. return ret;
  336. }
  337. ret = dmz_flush_metadata(zrc->metadata);
  338. if (ret) {
  339. dmz_dev_debug(zrc->dev,
  340. "Metadata flush for zone %u failed, err %d\n",
  341. dmz_id(zmd, rzone), ret);
  342. return ret;
  343. }
  344. dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
  345. dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
  346. return 0;
  347. }
  348. /*
  349. * Test if the target device is idle.
  350. */
  351. static inline int dmz_target_idle(struct dmz_reclaim *zrc)
  352. {
  353. return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
  354. }
  355. /*
  356. * Test if reclaim is necessary.
  357. */
  358. static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
  359. {
  360. struct dmz_metadata *zmd = zrc->metadata;
  361. unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
  362. unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
  363. unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
  364. /* Reclaim when idle */
  365. if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd)
  366. return true;
  367. /* If there are still plenty of random zones, do not reclaim */
  368. if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND)
  369. return false;
  370. /*
  371. * If the percentage of unmapped random zones is low,
  372. * reclaim even if the target is busy.
  373. */
  374. return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
  375. }
  376. /*
  377. * Reclaim work function.
  378. */
  379. static void dmz_reclaim_work(struct work_struct *work)
  380. {
  381. struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
  382. struct dmz_metadata *zmd = zrc->metadata;
  383. unsigned int nr_rnd, nr_unmap_rnd;
  384. unsigned int p_unmap_rnd;
  385. int ret;
  386. if (dmz_bdev_is_dying(zrc->dev))
  387. return;
  388. if (!dmz_should_reclaim(zrc)) {
  389. mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
  390. return;
  391. }
  392. /*
  393. * We need to start reclaiming random zones: set up zone copy
  394. * throttling to either go fast if we are very low on random zones
  395. * and slower if there are still some free random zones to avoid
  396. * as much as possible to negatively impact the user workload.
  397. */
  398. nr_rnd = dmz_nr_rnd_zones(zmd);
  399. nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
  400. p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
  401. if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
  402. /* Idle or very low percentage: go fast */
  403. zrc->kc_throttle.throttle = 100;
  404. } else {
  405. /* Busy but we still have some random zone: throttle */
  406. zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2);
  407. }
  408. dmz_dev_debug(zrc->dev,
  409. "Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
  410. zrc->kc_throttle.throttle,
  411. (dmz_target_idle(zrc) ? "Idle" : "Busy"),
  412. p_unmap_rnd, nr_unmap_rnd, nr_rnd);
  413. ret = dmz_do_reclaim(zrc);
  414. if (ret) {
  415. dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
  416. if (!dmz_check_bdev(zrc->dev))
  417. return;
  418. }
  419. dmz_schedule_reclaim(zrc);
  420. }
  421. /*
  422. * Initialize reclaim.
  423. */
  424. int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
  425. struct dmz_reclaim **reclaim)
  426. {
  427. struct dmz_reclaim *zrc;
  428. int ret;
  429. zrc = kzalloc(sizeof(struct dmz_reclaim), GFP_KERNEL);
  430. if (!zrc)
  431. return -ENOMEM;
  432. zrc->dev = dev;
  433. zrc->metadata = zmd;
  434. zrc->atime = jiffies;
  435. /* Reclaim kcopyd client */
  436. zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
  437. if (IS_ERR(zrc->kc)) {
  438. ret = PTR_ERR(zrc->kc);
  439. zrc->kc = NULL;
  440. goto err;
  441. }
  442. /* Reclaim work */
  443. INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
  444. zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
  445. dev->name);
  446. if (!zrc->wq) {
  447. ret = -ENOMEM;
  448. goto err;
  449. }
  450. *reclaim = zrc;
  451. queue_delayed_work(zrc->wq, &zrc->work, 0);
  452. return 0;
  453. err:
  454. if (zrc->kc)
  455. dm_kcopyd_client_destroy(zrc->kc);
  456. kfree(zrc);
  457. return ret;
  458. }
  459. /*
  460. * Terminate reclaim.
  461. */
  462. void dmz_dtr_reclaim(struct dmz_reclaim *zrc)
  463. {
  464. cancel_delayed_work_sync(&zrc->work);
  465. destroy_workqueue(zrc->wq);
  466. dm_kcopyd_client_destroy(zrc->kc);
  467. kfree(zrc);
  468. }
  469. /*
  470. * Suspend reclaim.
  471. */
  472. void dmz_suspend_reclaim(struct dmz_reclaim *zrc)
  473. {
  474. cancel_delayed_work_sync(&zrc->work);
  475. }
  476. /*
  477. * Resume reclaim.
  478. */
  479. void dmz_resume_reclaim(struct dmz_reclaim *zrc)
  480. {
  481. queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
  482. }
  483. /*
  484. * BIO accounting.
  485. */
  486. void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
  487. {
  488. zrc->atime = jiffies;
  489. }
  490. /*
  491. * Start reclaim if necessary.
  492. */
  493. void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
  494. {
  495. if (dmz_should_reclaim(zrc))
  496. mod_delayed_work(zrc->wq, &zrc->work, 0);
  497. }