dm-bio-prison-v1.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. /*
  2. * Copyright (C) 2012 Red Hat, Inc.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-prison-v1.h"
  8. #include "dm-bio-prison-v2.h"
  9. #include <linux/spinlock.h>
  10. #include <linux/mempool.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. /*----------------------------------------------------------------*/
  14. #define MIN_CELLS 1024
  15. struct dm_bio_prison {
  16. spinlock_t lock;
  17. struct rb_root cells;
  18. mempool_t cell_pool;
  19. };
  20. static struct kmem_cache *_cell_cache;
  21. /*----------------------------------------------------------------*/
  22. /*
  23. * @nr_cells should be the number of cells you want in use _concurrently_.
  24. * Don't confuse it with the number of distinct keys.
  25. */
  26. struct dm_bio_prison *dm_bio_prison_create(void)
  27. {
  28. struct dm_bio_prison *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
  29. int ret;
  30. if (!prison)
  31. return NULL;
  32. spin_lock_init(&prison->lock);
  33. ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
  34. if (ret) {
  35. kfree(prison);
  36. return NULL;
  37. }
  38. prison->cells = RB_ROOT;
  39. return prison;
  40. }
  41. EXPORT_SYMBOL_GPL(dm_bio_prison_create);
  42. void dm_bio_prison_destroy(struct dm_bio_prison *prison)
  43. {
  44. mempool_exit(&prison->cell_pool);
  45. kfree(prison);
  46. }
  47. EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
  48. struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
  49. {
  50. return mempool_alloc(&prison->cell_pool, gfp);
  51. }
  52. EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
  53. void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
  54. struct dm_bio_prison_cell *cell)
  55. {
  56. mempool_free(cell, &prison->cell_pool);
  57. }
  58. EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
  59. static void __setup_new_cell(struct dm_cell_key *key,
  60. struct bio *holder,
  61. struct dm_bio_prison_cell *cell)
  62. {
  63. memcpy(&cell->key, key, sizeof(cell->key));
  64. cell->holder = holder;
  65. bio_list_init(&cell->bios);
  66. }
  67. static int cmp_keys(struct dm_cell_key *lhs,
  68. struct dm_cell_key *rhs)
  69. {
  70. if (lhs->virtual < rhs->virtual)
  71. return -1;
  72. if (lhs->virtual > rhs->virtual)
  73. return 1;
  74. if (lhs->dev < rhs->dev)
  75. return -1;
  76. if (lhs->dev > rhs->dev)
  77. return 1;
  78. if (lhs->block_end <= rhs->block_begin)
  79. return -1;
  80. if (lhs->block_begin >= rhs->block_end)
  81. return 1;
  82. return 0;
  83. }
  84. static int __bio_detain(struct dm_bio_prison *prison,
  85. struct dm_cell_key *key,
  86. struct bio *inmate,
  87. struct dm_bio_prison_cell *cell_prealloc,
  88. struct dm_bio_prison_cell **cell_result)
  89. {
  90. int r;
  91. struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
  92. while (*new) {
  93. struct dm_bio_prison_cell *cell =
  94. rb_entry(*new, struct dm_bio_prison_cell, node);
  95. r = cmp_keys(key, &cell->key);
  96. parent = *new;
  97. if (r < 0)
  98. new = &((*new)->rb_left);
  99. else if (r > 0)
  100. new = &((*new)->rb_right);
  101. else {
  102. if (inmate)
  103. bio_list_add(&cell->bios, inmate);
  104. *cell_result = cell;
  105. return 1;
  106. }
  107. }
  108. __setup_new_cell(key, inmate, cell_prealloc);
  109. *cell_result = cell_prealloc;
  110. rb_link_node(&cell_prealloc->node, parent, new);
  111. rb_insert_color(&cell_prealloc->node, &prison->cells);
  112. return 0;
  113. }
  114. static int bio_detain(struct dm_bio_prison *prison,
  115. struct dm_cell_key *key,
  116. struct bio *inmate,
  117. struct dm_bio_prison_cell *cell_prealloc,
  118. struct dm_bio_prison_cell **cell_result)
  119. {
  120. int r;
  121. unsigned long flags;
  122. spin_lock_irqsave(&prison->lock, flags);
  123. r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
  124. spin_unlock_irqrestore(&prison->lock, flags);
  125. return r;
  126. }
  127. int dm_bio_detain(struct dm_bio_prison *prison,
  128. struct dm_cell_key *key,
  129. struct bio *inmate,
  130. struct dm_bio_prison_cell *cell_prealloc,
  131. struct dm_bio_prison_cell **cell_result)
  132. {
  133. return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
  134. }
  135. EXPORT_SYMBOL_GPL(dm_bio_detain);
  136. int dm_get_cell(struct dm_bio_prison *prison,
  137. struct dm_cell_key *key,
  138. struct dm_bio_prison_cell *cell_prealloc,
  139. struct dm_bio_prison_cell **cell_result)
  140. {
  141. return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
  142. }
  143. EXPORT_SYMBOL_GPL(dm_get_cell);
  144. /*
  145. * @inmates must have been initialised prior to this call
  146. */
  147. static void __cell_release(struct dm_bio_prison *prison,
  148. struct dm_bio_prison_cell *cell,
  149. struct bio_list *inmates)
  150. {
  151. rb_erase(&cell->node, &prison->cells);
  152. if (inmates) {
  153. if (cell->holder)
  154. bio_list_add(inmates, cell->holder);
  155. bio_list_merge(inmates, &cell->bios);
  156. }
  157. }
  158. void dm_cell_release(struct dm_bio_prison *prison,
  159. struct dm_bio_prison_cell *cell,
  160. struct bio_list *bios)
  161. {
  162. unsigned long flags;
  163. spin_lock_irqsave(&prison->lock, flags);
  164. __cell_release(prison, cell, bios);
  165. spin_unlock_irqrestore(&prison->lock, flags);
  166. }
  167. EXPORT_SYMBOL_GPL(dm_cell_release);
  168. /*
  169. * Sometimes we don't want the holder, just the additional bios.
  170. */
  171. static void __cell_release_no_holder(struct dm_bio_prison *prison,
  172. struct dm_bio_prison_cell *cell,
  173. struct bio_list *inmates)
  174. {
  175. rb_erase(&cell->node, &prison->cells);
  176. bio_list_merge(inmates, &cell->bios);
  177. }
  178. void dm_cell_release_no_holder(struct dm_bio_prison *prison,
  179. struct dm_bio_prison_cell *cell,
  180. struct bio_list *inmates)
  181. {
  182. unsigned long flags;
  183. spin_lock_irqsave(&prison->lock, flags);
  184. __cell_release_no_holder(prison, cell, inmates);
  185. spin_unlock_irqrestore(&prison->lock, flags);
  186. }
  187. EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
  188. void dm_cell_error(struct dm_bio_prison *prison,
  189. struct dm_bio_prison_cell *cell, blk_status_t error)
  190. {
  191. struct bio_list bios;
  192. struct bio *bio;
  193. bio_list_init(&bios);
  194. dm_cell_release(prison, cell, &bios);
  195. while ((bio = bio_list_pop(&bios))) {
  196. bio->bi_status = error;
  197. bio_endio(bio);
  198. }
  199. }
  200. EXPORT_SYMBOL_GPL(dm_cell_error);
  201. void dm_cell_visit_release(struct dm_bio_prison *prison,
  202. void (*visit_fn)(void *, struct dm_bio_prison_cell *),
  203. void *context,
  204. struct dm_bio_prison_cell *cell)
  205. {
  206. unsigned long flags;
  207. spin_lock_irqsave(&prison->lock, flags);
  208. visit_fn(context, cell);
  209. rb_erase(&cell->node, &prison->cells);
  210. spin_unlock_irqrestore(&prison->lock, flags);
  211. }
  212. EXPORT_SYMBOL_GPL(dm_cell_visit_release);
  213. static int __promote_or_release(struct dm_bio_prison *prison,
  214. struct dm_bio_prison_cell *cell)
  215. {
  216. if (bio_list_empty(&cell->bios)) {
  217. rb_erase(&cell->node, &prison->cells);
  218. return 1;
  219. }
  220. cell->holder = bio_list_pop(&cell->bios);
  221. return 0;
  222. }
  223. int dm_cell_promote_or_release(struct dm_bio_prison *prison,
  224. struct dm_bio_prison_cell *cell)
  225. {
  226. int r;
  227. unsigned long flags;
  228. spin_lock_irqsave(&prison->lock, flags);
  229. r = __promote_or_release(prison, cell);
  230. spin_unlock_irqrestore(&prison->lock, flags);
  231. return r;
  232. }
  233. EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
  234. /*----------------------------------------------------------------*/
  235. #define DEFERRED_SET_SIZE 64
  236. struct dm_deferred_entry {
  237. struct dm_deferred_set *ds;
  238. unsigned count;
  239. struct list_head work_items;
  240. };
  241. struct dm_deferred_set {
  242. spinlock_t lock;
  243. unsigned current_entry;
  244. unsigned sweeper;
  245. struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
  246. };
  247. struct dm_deferred_set *dm_deferred_set_create(void)
  248. {
  249. int i;
  250. struct dm_deferred_set *ds;
  251. ds = kmalloc(sizeof(*ds), GFP_KERNEL);
  252. if (!ds)
  253. return NULL;
  254. spin_lock_init(&ds->lock);
  255. ds->current_entry = 0;
  256. ds->sweeper = 0;
  257. for (i = 0; i < DEFERRED_SET_SIZE; i++) {
  258. ds->entries[i].ds = ds;
  259. ds->entries[i].count = 0;
  260. INIT_LIST_HEAD(&ds->entries[i].work_items);
  261. }
  262. return ds;
  263. }
  264. EXPORT_SYMBOL_GPL(dm_deferred_set_create);
  265. void dm_deferred_set_destroy(struct dm_deferred_set *ds)
  266. {
  267. kfree(ds);
  268. }
  269. EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
  270. struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
  271. {
  272. unsigned long flags;
  273. struct dm_deferred_entry *entry;
  274. spin_lock_irqsave(&ds->lock, flags);
  275. entry = ds->entries + ds->current_entry;
  276. entry->count++;
  277. spin_unlock_irqrestore(&ds->lock, flags);
  278. return entry;
  279. }
  280. EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
  281. static unsigned ds_next(unsigned index)
  282. {
  283. return (index + 1) % DEFERRED_SET_SIZE;
  284. }
  285. static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
  286. {
  287. while ((ds->sweeper != ds->current_entry) &&
  288. !ds->entries[ds->sweeper].count) {
  289. list_splice_init(&ds->entries[ds->sweeper].work_items, head);
  290. ds->sweeper = ds_next(ds->sweeper);
  291. }
  292. if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
  293. list_splice_init(&ds->entries[ds->sweeper].work_items, head);
  294. }
  295. void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
  296. {
  297. unsigned long flags;
  298. spin_lock_irqsave(&entry->ds->lock, flags);
  299. BUG_ON(!entry->count);
  300. --entry->count;
  301. __sweep(entry->ds, head);
  302. spin_unlock_irqrestore(&entry->ds->lock, flags);
  303. }
  304. EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
  305. /*
  306. * Returns 1 if deferred or 0 if no pending items to delay job.
  307. */
  308. int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
  309. {
  310. int r = 1;
  311. unsigned long flags;
  312. unsigned next_entry;
  313. spin_lock_irqsave(&ds->lock, flags);
  314. if ((ds->sweeper == ds->current_entry) &&
  315. !ds->entries[ds->current_entry].count)
  316. r = 0;
  317. else {
  318. list_add(work, &ds->entries[ds->current_entry].work_items);
  319. next_entry = ds_next(ds->current_entry);
  320. if (!ds->entries[next_entry].count)
  321. ds->current_entry = next_entry;
  322. }
  323. spin_unlock_irqrestore(&ds->lock, flags);
  324. return r;
  325. }
  326. EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
  327. /*----------------------------------------------------------------*/
  328. static int __init dm_bio_prison_init_v1(void)
  329. {
  330. _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
  331. if (!_cell_cache)
  332. return -ENOMEM;
  333. return 0;
  334. }
  335. static void dm_bio_prison_exit_v1(void)
  336. {
  337. kmem_cache_destroy(_cell_cache);
  338. _cell_cache = NULL;
  339. }
  340. static int (*_inits[])(void) __initdata = {
  341. dm_bio_prison_init_v1,
  342. dm_bio_prison_init_v2,
  343. };
  344. static void (*_exits[])(void) = {
  345. dm_bio_prison_exit_v1,
  346. dm_bio_prison_exit_v2,
  347. };
  348. static int __init dm_bio_prison_init(void)
  349. {
  350. const int count = ARRAY_SIZE(_inits);
  351. int r, i;
  352. for (i = 0; i < count; i++) {
  353. r = _inits[i]();
  354. if (r)
  355. goto bad;
  356. }
  357. return 0;
  358. bad:
  359. while (i--)
  360. _exits[i]();
  361. return r;
  362. }
  363. static void __exit dm_bio_prison_exit(void)
  364. {
  365. int i = ARRAY_SIZE(_exits);
  366. while (i--)
  367. _exits[i]();
  368. }
  369. /*
  370. * module hooks
  371. */
  372. module_init(dm_bio_prison_init);
  373. module_exit(dm_bio_prison_exit);
  374. MODULE_DESCRIPTION(DM_NAME " bio prison");
  375. MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
  376. MODULE_LICENSE("GPL");