md-linear.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. linear.c : Multiple Devices driver for Linux
  4. Copyright (C) 1994-96 Marc ZYNGIER
  5. <zyngier@ufr-info-p7.ibp.fr> or
  6. <maz@gloups.fdn.fr>
  7. Linear mode management functions.
  8. */
  9. #include <linux/blkdev.h>
  10. #include <linux/raid/md_u.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <trace/events/block.h>
  15. #include "md.h"
  16. #include "md-linear.h"
  17. /*
  18. * find which device holds a particular offset
  19. */
  20. static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
  21. {
  22. int lo, mid, hi;
  23. struct linear_conf *conf;
  24. lo = 0;
  25. hi = mddev->raid_disks - 1;
  26. conf = mddev->private;
  27. /*
  28. * Binary Search
  29. */
  30. while (hi > lo) {
  31. mid = (hi + lo) / 2;
  32. if (sector < conf->disks[mid].end_sector)
  33. hi = mid;
  34. else
  35. lo = mid + 1;
  36. }
  37. return conf->disks + lo;
  38. }
  39. /*
  40. * In linear_congested() conf->raid_disks is used as a copy of
  41. * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks
  42. * and conf->disks[] are created in linear_conf(), they are always
  43. * consitent with each other, but mddev->raid_disks does not.
  44. */
  45. static int linear_congested(struct mddev *mddev, int bits)
  46. {
  47. struct linear_conf *conf;
  48. int i, ret = 0;
  49. rcu_read_lock();
  50. conf = rcu_dereference(mddev->private);
  51. for (i = 0; i < conf->raid_disks && !ret ; i++) {
  52. struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
  53. ret |= bdi_congested(q->backing_dev_info, bits);
  54. }
  55. rcu_read_unlock();
  56. return ret;
  57. }
  58. static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  59. {
  60. struct linear_conf *conf;
  61. sector_t array_sectors;
  62. conf = mddev->private;
  63. WARN_ONCE(sectors || raid_disks,
  64. "%s does not support generic reshape\n", __func__);
  65. array_sectors = conf->array_sectors;
  66. return array_sectors;
  67. }
  68. static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
  69. {
  70. struct linear_conf *conf;
  71. struct md_rdev *rdev;
  72. int i, cnt;
  73. bool discard_supported = false;
  74. conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL);
  75. if (!conf)
  76. return NULL;
  77. cnt = 0;
  78. conf->array_sectors = 0;
  79. rdev_for_each(rdev, mddev) {
  80. int j = rdev->raid_disk;
  81. struct dev_info *disk = conf->disks + j;
  82. sector_t sectors;
  83. if (j < 0 || j >= raid_disks || disk->rdev) {
  84. pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
  85. mdname(mddev));
  86. goto out;
  87. }
  88. disk->rdev = rdev;
  89. if (mddev->chunk_sectors) {
  90. sectors = rdev->sectors;
  91. sector_div(sectors, mddev->chunk_sectors);
  92. rdev->sectors = sectors * mddev->chunk_sectors;
  93. }
  94. disk_stack_limits(mddev->gendisk, rdev->bdev,
  95. rdev->data_offset << 9);
  96. conf->array_sectors += rdev->sectors;
  97. cnt++;
  98. if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
  99. discard_supported = true;
  100. }
  101. if (cnt != raid_disks) {
  102. pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
  103. mdname(mddev));
  104. goto out;
  105. }
  106. if (!discard_supported)
  107. blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
  108. else
  109. blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
  110. /*
  111. * Here we calculate the device offsets.
  112. */
  113. conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
  114. for (i = 1; i < raid_disks; i++)
  115. conf->disks[i].end_sector =
  116. conf->disks[i-1].end_sector +
  117. conf->disks[i].rdev->sectors;
  118. /*
  119. * conf->raid_disks is copy of mddev->raid_disks. The reason to
  120. * keep a copy of mddev->raid_disks in struct linear_conf is,
  121. * mddev->raid_disks may not be consistent with pointers number of
  122. * conf->disks[] when it is updated in linear_add() and used to
  123. * iterate old conf->disks[] earray in linear_congested().
  124. * Here conf->raid_disks is always consitent with number of
  125. * pointers in conf->disks[] array, and mddev->private is updated
  126. * with rcu_assign_pointer() in linear_addr(), such race can be
  127. * avoided.
  128. */
  129. conf->raid_disks = raid_disks;
  130. return conf;
  131. out:
  132. kfree(conf);
  133. return NULL;
  134. }
  135. static int linear_run (struct mddev *mddev)
  136. {
  137. struct linear_conf *conf;
  138. int ret;
  139. if (md_check_no_bitmap(mddev))
  140. return -EINVAL;
  141. conf = linear_conf(mddev, mddev->raid_disks);
  142. if (!conf)
  143. return 1;
  144. mddev->private = conf;
  145. md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
  146. ret = md_integrity_register(mddev);
  147. if (ret) {
  148. kfree(conf);
  149. mddev->private = NULL;
  150. }
  151. return ret;
  152. }
  153. static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
  154. {
  155. /* Adding a drive to a linear array allows the array to grow.
  156. * It is permitted if the new drive has a matching superblock
  157. * already on it, with raid_disk equal to raid_disks.
  158. * It is achieved by creating a new linear_private_data structure
  159. * and swapping it in in-place of the current one.
  160. * The current one is never freed until the array is stopped.
  161. * This avoids races.
  162. */
  163. struct linear_conf *newconf, *oldconf;
  164. if (rdev->saved_raid_disk != mddev->raid_disks)
  165. return -EINVAL;
  166. rdev->raid_disk = rdev->saved_raid_disk;
  167. rdev->saved_raid_disk = -1;
  168. newconf = linear_conf(mddev,mddev->raid_disks+1);
  169. if (!newconf)
  170. return -ENOMEM;
  171. /* newconf->raid_disks already keeps a copy of * the increased
  172. * value of mddev->raid_disks, WARN_ONCE() is just used to make
  173. * sure of this. It is possible that oldconf is still referenced
  174. * in linear_congested(), therefore kfree_rcu() is used to free
  175. * oldconf until no one uses it anymore.
  176. */
  177. mddev_suspend(mddev);
  178. oldconf = rcu_dereference_protected(mddev->private,
  179. lockdep_is_held(&mddev->reconfig_mutex));
  180. mddev->raid_disks++;
  181. WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
  182. "copied raid_disks doesn't match mddev->raid_disks");
  183. rcu_assign_pointer(mddev->private, newconf);
  184. md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
  185. set_capacity(mddev->gendisk, mddev->array_sectors);
  186. mddev_resume(mddev);
  187. revalidate_disk(mddev->gendisk);
  188. kfree_rcu(oldconf, rcu);
  189. return 0;
  190. }
  191. static void linear_free(struct mddev *mddev, void *priv)
  192. {
  193. struct linear_conf *conf = priv;
  194. kfree(conf);
  195. }
  196. static bool linear_make_request(struct mddev *mddev, struct bio *bio)
  197. {
  198. char b[BDEVNAME_SIZE];
  199. struct dev_info *tmp_dev;
  200. sector_t start_sector, end_sector, data_offset;
  201. sector_t bio_sector = bio->bi_iter.bi_sector;
  202. if (unlikely(bio->bi_opf & REQ_PREFLUSH)
  203. && md_flush_request(mddev, bio))
  204. return true;
  205. tmp_dev = which_dev(mddev, bio_sector);
  206. start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
  207. end_sector = tmp_dev->end_sector;
  208. data_offset = tmp_dev->rdev->data_offset;
  209. if (unlikely(bio_sector >= end_sector ||
  210. bio_sector < start_sector))
  211. goto out_of_bounds;
  212. if (unlikely(is_mddev_broken(tmp_dev->rdev, "linear"))) {
  213. bio_io_error(bio);
  214. return true;
  215. }
  216. if (unlikely(bio_end_sector(bio) > end_sector)) {
  217. /* This bio crosses a device boundary, so we have to split it */
  218. struct bio *split = bio_split(bio, end_sector - bio_sector,
  219. GFP_NOIO, &mddev->bio_set);
  220. bio_chain(split, bio);
  221. generic_make_request(bio);
  222. bio = split;
  223. }
  224. bio_set_dev(bio, tmp_dev->rdev->bdev);
  225. bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
  226. start_sector + data_offset;
  227. if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
  228. !blk_queue_discard(bio->bi_disk->queue))) {
  229. /* Just ignore it */
  230. bio_endio(bio);
  231. } else {
  232. if (mddev->gendisk)
  233. trace_block_bio_remap(bio->bi_disk->queue,
  234. bio, disk_devt(mddev->gendisk),
  235. bio_sector);
  236. mddev_check_writesame(mddev, bio);
  237. mddev_check_write_zeroes(mddev, bio);
  238. generic_make_request(bio);
  239. }
  240. return true;
  241. out_of_bounds:
  242. pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
  243. mdname(mddev),
  244. (unsigned long long)bio->bi_iter.bi_sector,
  245. bdevname(tmp_dev->rdev->bdev, b),
  246. (unsigned long long)tmp_dev->rdev->sectors,
  247. (unsigned long long)start_sector);
  248. bio_io_error(bio);
  249. return true;
  250. }
  251. static void linear_status (struct seq_file *seq, struct mddev *mddev)
  252. {
  253. seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
  254. }
  255. static void linear_quiesce(struct mddev *mddev, int state)
  256. {
  257. }
  258. static struct md_personality linear_personality =
  259. {
  260. .name = "linear",
  261. .level = LEVEL_LINEAR,
  262. .owner = THIS_MODULE,
  263. .make_request = linear_make_request,
  264. .run = linear_run,
  265. .free = linear_free,
  266. .status = linear_status,
  267. .hot_add_disk = linear_add,
  268. .size = linear_size,
  269. .quiesce = linear_quiesce,
  270. .congested = linear_congested,
  271. };
  272. static int __init linear_init (void)
  273. {
  274. return register_md_personality (&linear_personality);
  275. }
  276. static void linear_exit (void)
  277. {
  278. unregister_md_personality (&linear_personality);
  279. }
  280. module_init(linear_init);
  281. module_exit(linear_exit);
  282. MODULE_LICENSE("GPL");
  283. MODULE_DESCRIPTION("Linear device concatenation personality for MD");
  284. MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
  285. MODULE_ALIAS("md-linear");
  286. MODULE_ALIAS("md-level--1");