dm-cache-background-tracker.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * Copyright (C) 2017 Red Hat. All rights reserved.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm-cache-background-tracker.h"
  7. /*----------------------------------------------------------------*/
  8. #define DM_MSG_PREFIX "dm-background-tracker"
  9. struct bt_work {
  10. struct list_head list;
  11. struct rb_node node;
  12. struct policy_work work;
  13. };
  14. struct background_tracker {
  15. unsigned max_work;
  16. atomic_t pending_promotes;
  17. atomic_t pending_writebacks;
  18. atomic_t pending_demotes;
  19. struct list_head issued;
  20. struct list_head queued;
  21. struct rb_root pending;
  22. struct kmem_cache *work_cache;
  23. };
  24. struct background_tracker *btracker_create(unsigned max_work)
  25. {
  26. struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
  27. if (!b) {
  28. DMERR("couldn't create background_tracker");
  29. return NULL;
  30. }
  31. b->max_work = max_work;
  32. atomic_set(&b->pending_promotes, 0);
  33. atomic_set(&b->pending_writebacks, 0);
  34. atomic_set(&b->pending_demotes, 0);
  35. INIT_LIST_HEAD(&b->issued);
  36. INIT_LIST_HEAD(&b->queued);
  37. b->pending = RB_ROOT;
  38. b->work_cache = KMEM_CACHE(bt_work, 0);
  39. if (!b->work_cache) {
  40. DMERR("couldn't create mempool for background work items");
  41. kfree(b);
  42. b = NULL;
  43. }
  44. return b;
  45. }
  46. EXPORT_SYMBOL_GPL(btracker_create);
  47. void btracker_destroy(struct background_tracker *b)
  48. {
  49. kmem_cache_destroy(b->work_cache);
  50. kfree(b);
  51. }
  52. EXPORT_SYMBOL_GPL(btracker_destroy);
  53. static int cmp_oblock(dm_oblock_t lhs, dm_oblock_t rhs)
  54. {
  55. if (from_oblock(lhs) < from_oblock(rhs))
  56. return -1;
  57. if (from_oblock(rhs) < from_oblock(lhs))
  58. return 1;
  59. return 0;
  60. }
  61. static bool __insert_pending(struct background_tracker *b,
  62. struct bt_work *nw)
  63. {
  64. int cmp;
  65. struct bt_work *w;
  66. struct rb_node **new = &b->pending.rb_node, *parent = NULL;
  67. while (*new) {
  68. w = container_of(*new, struct bt_work, node);
  69. parent = *new;
  70. cmp = cmp_oblock(w->work.oblock, nw->work.oblock);
  71. if (cmp < 0)
  72. new = &((*new)->rb_left);
  73. else if (cmp > 0)
  74. new = &((*new)->rb_right);
  75. else
  76. /* already present */
  77. return false;
  78. }
  79. rb_link_node(&nw->node, parent, new);
  80. rb_insert_color(&nw->node, &b->pending);
  81. return true;
  82. }
  83. static struct bt_work *__find_pending(struct background_tracker *b,
  84. dm_oblock_t oblock)
  85. {
  86. int cmp;
  87. struct bt_work *w;
  88. struct rb_node **new = &b->pending.rb_node;
  89. while (*new) {
  90. w = container_of(*new, struct bt_work, node);
  91. cmp = cmp_oblock(w->work.oblock, oblock);
  92. if (cmp < 0)
  93. new = &((*new)->rb_left);
  94. else if (cmp > 0)
  95. new = &((*new)->rb_right);
  96. else
  97. break;
  98. }
  99. return *new ? w : NULL;
  100. }
  101. static void update_stats(struct background_tracker *b, struct policy_work *w, int delta)
  102. {
  103. switch (w->op) {
  104. case POLICY_PROMOTE:
  105. atomic_add(delta, &b->pending_promotes);
  106. break;
  107. case POLICY_DEMOTE:
  108. atomic_add(delta, &b->pending_demotes);
  109. break;
  110. case POLICY_WRITEBACK:
  111. atomic_add(delta, &b->pending_writebacks);
  112. break;
  113. }
  114. }
  115. unsigned btracker_nr_writebacks_queued(struct background_tracker *b)
  116. {
  117. return atomic_read(&b->pending_writebacks);
  118. }
  119. EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
  120. unsigned btracker_nr_demotions_queued(struct background_tracker *b)
  121. {
  122. return atomic_read(&b->pending_demotes);
  123. }
  124. EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);
  125. static bool max_work_reached(struct background_tracker *b)
  126. {
  127. return atomic_read(&b->pending_promotes) +
  128. atomic_read(&b->pending_writebacks) +
  129. atomic_read(&b->pending_demotes) >= b->max_work;
  130. }
  131. static struct bt_work *alloc_work(struct background_tracker *b)
  132. {
  133. if (max_work_reached(b))
  134. return NULL;
  135. return kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
  136. }
  137. int btracker_queue(struct background_tracker *b,
  138. struct policy_work *work,
  139. struct policy_work **pwork)
  140. {
  141. struct bt_work *w;
  142. if (pwork)
  143. *pwork = NULL;
  144. w = alloc_work(b);
  145. if (!w)
  146. return -ENOMEM;
  147. memcpy(&w->work, work, sizeof(*work));
  148. if (!__insert_pending(b, w)) {
  149. /*
  150. * There was a race, we'll just ignore this second
  151. * bit of work for the same oblock.
  152. */
  153. kmem_cache_free(b->work_cache, w);
  154. return -EINVAL;
  155. }
  156. if (pwork) {
  157. *pwork = &w->work;
  158. list_add(&w->list, &b->issued);
  159. } else
  160. list_add(&w->list, &b->queued);
  161. update_stats(b, &w->work, 1);
  162. return 0;
  163. }
  164. EXPORT_SYMBOL_GPL(btracker_queue);
  165. /*
  166. * Returns -ENODATA if there's no work.
  167. */
  168. int btracker_issue(struct background_tracker *b, struct policy_work **work)
  169. {
  170. struct bt_work *w;
  171. if (list_empty(&b->queued))
  172. return -ENODATA;
  173. w = list_first_entry(&b->queued, struct bt_work, list);
  174. list_move(&w->list, &b->issued);
  175. *work = &w->work;
  176. return 0;
  177. }
  178. EXPORT_SYMBOL_GPL(btracker_issue);
  179. void btracker_complete(struct background_tracker *b,
  180. struct policy_work *op)
  181. {
  182. struct bt_work *w = container_of(op, struct bt_work, work);
  183. update_stats(b, &w->work, -1);
  184. rb_erase(&w->node, &b->pending);
  185. list_del(&w->list);
  186. kmem_cache_free(b->work_cache, w);
  187. }
  188. EXPORT_SYMBOL_GPL(btracker_complete);
  189. bool btracker_promotion_already_present(struct background_tracker *b,
  190. dm_oblock_t oblock)
  191. {
  192. return __find_pending(b, oblock) != NULL;
  193. }
  194. EXPORT_SYMBOL_GPL(btracker_promotion_already_present);
  195. /*----------------------------------------------------------------*/