dm-zoned-reclaim.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. /*
  2. * Copyright (C) 2017 Western Digital Corporation or its affiliates.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm-zoned.h"
  7. #include <linux/module.h>
  8. #define DM_MSG_PREFIX "zoned reclaim"
  9. struct dmz_reclaim {
  10. struct dmz_metadata *metadata;
  11. struct dmz_dev *dev;
  12. struct delayed_work work;
  13. struct workqueue_struct *wq;
  14. struct dm_kcopyd_client *kc;
  15. struct dm_kcopyd_throttle kc_throttle;
  16. int kc_err;
  17. unsigned long flags;
  18. /* Last target access time */
  19. unsigned long atime;
  20. };
  21. /*
  22. * Reclaim state flags.
  23. */
  24. enum {
  25. DMZ_RECLAIM_KCOPY,
  26. };
  27. /*
  28. * Number of seconds of target BIO inactivity to consider the target idle.
  29. */
  30. #define DMZ_IDLE_PERIOD (10UL * HZ)
  31. /*
  32. * Percentage of unmapped (free) random zones below which reclaim starts
  33. * even if the target is busy.
  34. */
  35. #define DMZ_RECLAIM_LOW_UNMAP_RND 30
  36. /*
  37. * Percentage of unmapped (free) random zones above which reclaim will
  38. * stop if the target is busy.
  39. */
  40. #define DMZ_RECLAIM_HIGH_UNMAP_RND 50
  41. /*
  42. * Align a sequential zone write pointer to chunk_block.
  43. */
  44. static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
  45. sector_t block)
  46. {
  47. struct dmz_metadata *zmd = zrc->metadata;
  48. sector_t wp_block = zone->wp_block;
  49. unsigned int nr_blocks;
  50. int ret;
  51. if (wp_block == block)
  52. return 0;
  53. if (wp_block > block)
  54. return -EIO;
  55. /*
  56. * Zeroout the space between the write
  57. * pointer and the requested position.
  58. */
  59. nr_blocks = block - wp_block;
  60. ret = blkdev_issue_zeroout(zrc->dev->bdev,
  61. dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
  62. dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
  63. if (ret) {
  64. dmz_dev_err(zrc->dev,
  65. "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
  66. dmz_id(zmd, zone), (unsigned long long)wp_block,
  67. (unsigned long long)block, nr_blocks, ret);
  68. dmz_check_bdev(zrc->dev);
  69. return ret;
  70. }
  71. zone->wp_block = block;
  72. return 0;
  73. }
  74. /*
  75. * dm_kcopyd_copy end notification.
  76. */
  77. static void dmz_reclaim_kcopy_end(int read_err, unsigned long write_err,
  78. void *context)
  79. {
  80. struct dmz_reclaim *zrc = context;
  81. if (read_err || write_err)
  82. zrc->kc_err = -EIO;
  83. else
  84. zrc->kc_err = 0;
  85. clear_bit_unlock(DMZ_RECLAIM_KCOPY, &zrc->flags);
  86. smp_mb__after_atomic();
  87. wake_up_bit(&zrc->flags, DMZ_RECLAIM_KCOPY);
  88. }
  89. /*
  90. * Copy valid blocks of src_zone into dst_zone.
  91. */
  92. static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
  93. struct dm_zone *src_zone, struct dm_zone *dst_zone)
  94. {
  95. struct dmz_metadata *zmd = zrc->metadata;
  96. struct dmz_dev *dev = zrc->dev;
  97. struct dm_io_region src, dst;
  98. sector_t block = 0, end_block;
  99. sector_t nr_blocks;
  100. sector_t src_zone_block;
  101. sector_t dst_zone_block;
  102. unsigned long flags = 0;
  103. int ret;
  104. if (dmz_is_seq(src_zone))
  105. end_block = src_zone->wp_block;
  106. else
  107. end_block = dev->zone_nr_blocks;
  108. src_zone_block = dmz_start_block(zmd, src_zone);
  109. dst_zone_block = dmz_start_block(zmd, dst_zone);
  110. if (dmz_is_seq(dst_zone))
  111. set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
  112. while (block < end_block) {
  113. if (dev->flags & DMZ_BDEV_DYING)
  114. return -EIO;
  115. /* Get a valid region from the source zone */
  116. ret = dmz_first_valid_block(zmd, src_zone, &block);
  117. if (ret <= 0)
  118. return ret;
  119. nr_blocks = ret;
  120. /*
  121. * If we are writing in a sequential zone, we must make sure
  122. * that writes are sequential. So Zeroout any eventual hole
  123. * between writes.
  124. */
  125. if (dmz_is_seq(dst_zone)) {
  126. ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
  127. if (ret)
  128. return ret;
  129. }
  130. src.bdev = dev->bdev;
  131. src.sector = dmz_blk2sect(src_zone_block + block);
  132. src.count = dmz_blk2sect(nr_blocks);
  133. dst.bdev = dev->bdev;
  134. dst.sector = dmz_blk2sect(dst_zone_block + block);
  135. dst.count = src.count;
  136. /* Copy the valid region */
  137. set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
  138. ret = dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
  139. dmz_reclaim_kcopy_end, zrc);
  140. if (ret)
  141. return ret;
  142. /* Wait for copy to complete */
  143. wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
  144. TASK_UNINTERRUPTIBLE);
  145. if (zrc->kc_err)
  146. return zrc->kc_err;
  147. block += nr_blocks;
  148. if (dmz_is_seq(dst_zone))
  149. dst_zone->wp_block = block;
  150. }
  151. return 0;
  152. }
  153. /*
  154. * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
  155. * and free the buffer zone.
  156. */
  157. static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  158. {
  159. struct dm_zone *bzone = dzone->bzone;
  160. sector_t chunk_block = dzone->wp_block;
  161. struct dmz_metadata *zmd = zrc->metadata;
  162. int ret;
  163. dmz_dev_debug(zrc->dev,
  164. "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
  165. dzone->chunk, dmz_id(zmd, bzone), dmz_weight(bzone),
  166. dmz_id(zmd, dzone), dmz_weight(dzone));
  167. /* Flush data zone into the buffer zone */
  168. ret = dmz_reclaim_copy(zrc, bzone, dzone);
  169. if (ret < 0)
  170. return ret;
  171. dmz_lock_flush(zmd);
  172. /* Validate copied blocks */
  173. ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
  174. if (ret == 0) {
  175. /* Free the buffer zone */
  176. dmz_invalidate_blocks(zmd, bzone, 0, zrc->dev->zone_nr_blocks);
  177. dmz_lock_map(zmd);
  178. dmz_unmap_zone(zmd, bzone);
  179. dmz_unlock_zone_reclaim(dzone);
  180. dmz_free_zone(zmd, bzone);
  181. dmz_unlock_map(zmd);
  182. }
  183. dmz_unlock_flush(zmd);
  184. return ret;
  185. }
  186. /*
  187. * Merge valid blocks of dzone into its buffer zone and free dzone.
  188. */
  189. static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  190. {
  191. unsigned int chunk = dzone->chunk;
  192. struct dm_zone *bzone = dzone->bzone;
  193. struct dmz_metadata *zmd = zrc->metadata;
  194. int ret = 0;
  195. dmz_dev_debug(zrc->dev,
  196. "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
  197. chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
  198. dmz_id(zmd, bzone), dmz_weight(bzone));
  199. /* Flush data zone into the buffer zone */
  200. ret = dmz_reclaim_copy(zrc, dzone, bzone);
  201. if (ret < 0)
  202. return ret;
  203. dmz_lock_flush(zmd);
  204. /* Validate copied blocks */
  205. ret = dmz_merge_valid_blocks(zmd, dzone, bzone, 0);
  206. if (ret == 0) {
  207. /*
  208. * Free the data zone and remap the chunk to
  209. * the buffer zone.
  210. */
  211. dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
  212. dmz_lock_map(zmd);
  213. dmz_unmap_zone(zmd, bzone);
  214. dmz_unmap_zone(zmd, dzone);
  215. dmz_unlock_zone_reclaim(dzone);
  216. dmz_free_zone(zmd, dzone);
  217. dmz_map_zone(zmd, bzone, chunk);
  218. dmz_unlock_map(zmd);
  219. }
  220. dmz_unlock_flush(zmd);
  221. return ret;
  222. }
  223. /*
  224. * Move valid blocks of the random data zone dzone into a free sequential zone.
  225. * Once blocks are moved, remap the zone chunk to the sequential zone.
  226. */
  227. static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  228. {
  229. unsigned int chunk = dzone->chunk;
  230. struct dm_zone *szone = NULL;
  231. struct dmz_metadata *zmd = zrc->metadata;
  232. int ret;
  233. /* Get a free sequential zone */
  234. dmz_lock_map(zmd);
  235. szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM);
  236. dmz_unlock_map(zmd);
  237. if (!szone)
  238. return -ENOSPC;
  239. dmz_dev_debug(zrc->dev,
  240. "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
  241. chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
  242. dmz_id(zmd, szone));
  243. /* Flush the random data zone into the sequential zone */
  244. ret = dmz_reclaim_copy(zrc, dzone, szone);
  245. dmz_lock_flush(zmd);
  246. if (ret == 0) {
  247. /* Validate copied blocks */
  248. ret = dmz_copy_valid_blocks(zmd, dzone, szone);
  249. }
  250. if (ret) {
  251. /* Free the sequential zone */
  252. dmz_lock_map(zmd);
  253. dmz_free_zone(zmd, szone);
  254. dmz_unlock_map(zmd);
  255. } else {
  256. /* Free the data zone and remap the chunk */
  257. dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
  258. dmz_lock_map(zmd);
  259. dmz_unmap_zone(zmd, dzone);
  260. dmz_unlock_zone_reclaim(dzone);
  261. dmz_free_zone(zmd, dzone);
  262. dmz_map_zone(zmd, szone, chunk);
  263. dmz_unlock_map(zmd);
  264. }
  265. dmz_unlock_flush(zmd);
  266. return ret;
  267. }
  268. /*
  269. * Reclaim an empty zone.
  270. */
  271. static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  272. {
  273. struct dmz_metadata *zmd = zrc->metadata;
  274. dmz_lock_flush(zmd);
  275. dmz_lock_map(zmd);
  276. dmz_unmap_zone(zmd, dzone);
  277. dmz_unlock_zone_reclaim(dzone);
  278. dmz_free_zone(zmd, dzone);
  279. dmz_unlock_map(zmd);
  280. dmz_unlock_flush(zmd);
  281. }
  282. /*
  283. * Find a candidate zone for reclaim and process it.
  284. */
  285. static int dmz_do_reclaim(struct dmz_reclaim *zrc)
  286. {
  287. struct dmz_metadata *zmd = zrc->metadata;
  288. struct dm_zone *dzone;
  289. struct dm_zone *rzone;
  290. unsigned long start;
  291. int ret;
  292. /* Get a data zone */
  293. dzone = dmz_get_zone_for_reclaim(zmd);
  294. if (!dzone)
  295. return -EBUSY;
  296. start = jiffies;
  297. if (dmz_is_rnd(dzone)) {
  298. if (!dmz_weight(dzone)) {
  299. /* Empty zone */
  300. dmz_reclaim_empty(zrc, dzone);
  301. ret = 0;
  302. } else {
  303. /*
  304. * Reclaim the random data zone by moving its
  305. * valid data blocks to a free sequential zone.
  306. */
  307. ret = dmz_reclaim_rnd_data(zrc, dzone);
  308. }
  309. rzone = dzone;
  310. } else {
  311. struct dm_zone *bzone = dzone->bzone;
  312. sector_t chunk_block = 0;
  313. ret = dmz_first_valid_block(zmd, bzone, &chunk_block);
  314. if (ret < 0)
  315. goto out;
  316. if (ret == 0 || chunk_block >= dzone->wp_block) {
  317. /*
  318. * The buffer zone is empty or its valid blocks are
  319. * after the data zone write pointer.
  320. */
  321. ret = dmz_reclaim_buf(zrc, dzone);
  322. rzone = bzone;
  323. } else {
  324. /*
  325. * Reclaim the data zone by merging it into the
  326. * buffer zone so that the buffer zone itself can
  327. * be later reclaimed.
  328. */
  329. ret = dmz_reclaim_seq_data(zrc, dzone);
  330. rzone = dzone;
  331. }
  332. }
  333. out:
  334. if (ret) {
  335. dmz_unlock_zone_reclaim(dzone);
  336. return ret;
  337. }
  338. ret = dmz_flush_metadata(zrc->metadata);
  339. if (ret) {
  340. dmz_dev_debug(zrc->dev,
  341. "Metadata flush for zone %u failed, err %d\n",
  342. dmz_id(zmd, rzone), ret);
  343. return ret;
  344. }
  345. dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
  346. dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
  347. return 0;
  348. }
  349. /*
  350. * Test if the target device is idle.
  351. */
  352. static inline int dmz_target_idle(struct dmz_reclaim *zrc)
  353. {
  354. return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
  355. }
  356. /*
  357. * Test if reclaim is necessary.
  358. */
  359. static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
  360. {
  361. struct dmz_metadata *zmd = zrc->metadata;
  362. unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
  363. unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
  364. unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
  365. /* Reclaim when idle */
  366. if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd)
  367. return true;
  368. /* If there are still plenty of random zones, do not reclaim */
  369. if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND)
  370. return false;
  371. /*
  372. * If the percentage of unmappped random zones is low,
  373. * reclaim even if the target is busy.
  374. */
  375. return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
  376. }
  377. /*
  378. * Reclaim work function.
  379. */
  380. static void dmz_reclaim_work(struct work_struct *work)
  381. {
  382. struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
  383. struct dmz_metadata *zmd = zrc->metadata;
  384. unsigned int nr_rnd, nr_unmap_rnd;
  385. unsigned int p_unmap_rnd;
  386. int ret;
  387. if (dmz_bdev_is_dying(zrc->dev))
  388. return;
  389. if (!dmz_should_reclaim(zrc)) {
  390. mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
  391. return;
  392. }
  393. /*
  394. * We need to start reclaiming random zones: set up zone copy
  395. * throttling to either go fast if we are very low on random zones
  396. * and slower if there are still some free random zones to avoid
  397. * as much as possible to negatively impact the user workload.
  398. */
  399. nr_rnd = dmz_nr_rnd_zones(zmd);
  400. nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
  401. p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
  402. if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
  403. /* Idle or very low percentage: go fast */
  404. zrc->kc_throttle.throttle = 100;
  405. } else {
  406. /* Busy but we still have some random zone: throttle */
  407. zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2);
  408. }
  409. dmz_dev_debug(zrc->dev,
  410. "Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
  411. zrc->kc_throttle.throttle,
  412. (dmz_target_idle(zrc) ? "Idle" : "Busy"),
  413. p_unmap_rnd, nr_unmap_rnd, nr_rnd);
  414. ret = dmz_do_reclaim(zrc);
  415. if (ret) {
  416. dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
  417. if (!dmz_check_bdev(zrc->dev))
  418. return;
  419. }
  420. dmz_schedule_reclaim(zrc);
  421. }
  422. /*
  423. * Initialize reclaim.
  424. */
  425. int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
  426. struct dmz_reclaim **reclaim)
  427. {
  428. struct dmz_reclaim *zrc;
  429. int ret;
  430. zrc = kzalloc(sizeof(struct dmz_reclaim), GFP_KERNEL);
  431. if (!zrc)
  432. return -ENOMEM;
  433. zrc->dev = dev;
  434. zrc->metadata = zmd;
  435. zrc->atime = jiffies;
  436. /* Reclaim kcopyd client */
  437. zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
  438. if (IS_ERR(zrc->kc)) {
  439. ret = PTR_ERR(zrc->kc);
  440. zrc->kc = NULL;
  441. goto err;
  442. }
  443. /* Reclaim work */
  444. INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
  445. zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
  446. dev->name);
  447. if (!zrc->wq) {
  448. ret = -ENOMEM;
  449. goto err;
  450. }
  451. *reclaim = zrc;
  452. queue_delayed_work(zrc->wq, &zrc->work, 0);
  453. return 0;
  454. err:
  455. if (zrc->kc)
  456. dm_kcopyd_client_destroy(zrc->kc);
  457. kfree(zrc);
  458. return ret;
  459. }
  460. /*
  461. * Terminate reclaim.
  462. */
  463. void dmz_dtr_reclaim(struct dmz_reclaim *zrc)
  464. {
  465. cancel_delayed_work_sync(&zrc->work);
  466. destroy_workqueue(zrc->wq);
  467. dm_kcopyd_client_destroy(zrc->kc);
  468. kfree(zrc);
  469. }
  470. /*
  471. * Suspend reclaim.
  472. */
  473. void dmz_suspend_reclaim(struct dmz_reclaim *zrc)
  474. {
  475. cancel_delayed_work_sync(&zrc->work);
  476. }
  477. /*
  478. * Resume reclaim.
  479. */
  480. void dmz_resume_reclaim(struct dmz_reclaim *zrc)
  481. {
  482. queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
  483. }
  484. /*
  485. * BIO accounting.
  486. */
  487. void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
  488. {
  489. zrc->atime = jiffies;
  490. }
  491. /*
  492. * Start reclaim if necessary.
  493. */
  494. void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
  495. {
  496. if (dmz_should_reclaim(zrc))
  497. mod_delayed_work(zrc->wq, &zrc->work, 0);
  498. }