md-faulty.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. /*
  2. * faulty.c : Multiple Devices driver for Linux
  3. *
  4. * Copyright (C) 2004 Neil Brown
  5. *
  6. * fautly-device-simulator personality for md
  7. *
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2, or (at your option)
  12. * any later version.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * (for example /usr/src/linux/COPYING); if not, write to the Free
  16. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. /*
  19. * The "faulty" personality causes some requests to fail.
  20. *
  21. * Possible failure modes are:
  22. * reads fail "randomly" but succeed on retry
  23. * writes fail "randomly" but succeed on retry
  24. * reads for some address fail and then persist until a write
  25. * reads for some address fail and then persist irrespective of write
  26. * writes for some address fail and persist
  27. * all writes fail
  28. *
  29. * Different modes can be active at a time, but only
  30. * one can be set at array creation. Others can be added later.
  31. * A mode can be one-shot or recurrent with the recurrence being
  32. * once in every N requests.
  33. * The bottom 5 bits of the "layout" indicate the mode. The
  34. * remainder indicate a period, or 0 for one-shot.
  35. *
  36. * There is an implementation limit on the number of concurrently
  37. * persisting-faulty blocks. When a new fault is requested that would
  38. * exceed the limit, it is ignored.
  39. * All current faults can be clear using a layout of "0".
  40. *
  41. * Requests are always sent to the device. If they are to fail,
  42. * we clone the bio and insert a new b_end_io into the chain.
  43. */
  44. #define WriteTransient 0
  45. #define ReadTransient 1
  46. #define WritePersistent 2
  47. #define ReadPersistent 3
  48. #define WriteAll 4 /* doesn't go to device */
  49. #define ReadFixable 5
  50. #define Modes 6
  51. #define ClearErrors 31
  52. #define ClearFaults 30
  53. #define AllPersist 100 /* internal use only */
  54. #define NoPersist 101
  55. #define ModeMask 0x1f
  56. #define ModeShift 5
  57. #define MaxFault 50
  58. #include <linux/blkdev.h>
  59. #include <linux/module.h>
  60. #include <linux/raid/md_u.h>
  61. #include <linux/slab.h>
  62. #include "md.h"
  63. #include <linux/seq_file.h>
  64. static void faulty_fail(struct bio *bio)
  65. {
  66. struct bio *b = bio->bi_private;
  67. b->bi_iter.bi_size = bio->bi_iter.bi_size;
  68. b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
  69. bio_put(bio);
  70. bio_io_error(b);
  71. }
  72. struct faulty_conf {
  73. int period[Modes];
  74. atomic_t counters[Modes];
  75. sector_t faults[MaxFault];
  76. int modes[MaxFault];
  77. int nfaults;
  78. struct md_rdev *rdev;
  79. };
  80. static int check_mode(struct faulty_conf *conf, int mode)
  81. {
  82. if (conf->period[mode] == 0 &&
  83. atomic_read(&conf->counters[mode]) <= 0)
  84. return 0; /* no failure, no decrement */
  85. if (atomic_dec_and_test(&conf->counters[mode])) {
  86. if (conf->period[mode])
  87. atomic_set(&conf->counters[mode], conf->period[mode]);
  88. return 1;
  89. }
  90. return 0;
  91. }
  92. static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir)
  93. {
  94. /* If we find a ReadFixable sector, we fix it ... */
  95. int i;
  96. for (i=0; i<conf->nfaults; i++)
  97. if (conf->faults[i] >= start &&
  98. conf->faults[i] < end) {
  99. /* found it ... */
  100. switch (conf->modes[i] * 2 + dir) {
  101. case WritePersistent*2+WRITE: return 1;
  102. case ReadPersistent*2+READ: return 1;
  103. case ReadFixable*2+READ: return 1;
  104. case ReadFixable*2+WRITE:
  105. conf->modes[i] = NoPersist;
  106. return 0;
  107. case AllPersist*2+READ:
  108. case AllPersist*2+WRITE: return 1;
  109. default:
  110. return 0;
  111. }
  112. }
  113. return 0;
  114. }
  115. static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
  116. {
  117. int i;
  118. int n = conf->nfaults;
  119. for (i=0; i<conf->nfaults; i++)
  120. if (conf->faults[i] == start) {
  121. switch(mode) {
  122. case NoPersist: conf->modes[i] = mode; return;
  123. case WritePersistent:
  124. if (conf->modes[i] == ReadPersistent ||
  125. conf->modes[i] == ReadFixable)
  126. conf->modes[i] = AllPersist;
  127. else
  128. conf->modes[i] = WritePersistent;
  129. return;
  130. case ReadPersistent:
  131. if (conf->modes[i] == WritePersistent)
  132. conf->modes[i] = AllPersist;
  133. else
  134. conf->modes[i] = ReadPersistent;
  135. return;
  136. case ReadFixable:
  137. if (conf->modes[i] == WritePersistent ||
  138. conf->modes[i] == ReadPersistent)
  139. conf->modes[i] = AllPersist;
  140. else
  141. conf->modes[i] = ReadFixable;
  142. return;
  143. }
  144. } else if (conf->modes[i] == NoPersist)
  145. n = i;
  146. if (n >= MaxFault)
  147. return;
  148. conf->faults[n] = start;
  149. conf->modes[n] = mode;
  150. if (conf->nfaults == n)
  151. conf->nfaults = n+1;
  152. }
  153. static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
  154. {
  155. struct faulty_conf *conf = mddev->private;
  156. int failit = 0;
  157. if (bio_data_dir(bio) == WRITE) {
  158. /* write request */
  159. if (atomic_read(&conf->counters[WriteAll])) {
  160. /* special case - don't decrement, don't generic_make_request,
  161. * just fail immediately
  162. */
  163. bio_io_error(bio);
  164. return true;
  165. }
  166. if (check_sector(conf, bio->bi_iter.bi_sector,
  167. bio_end_sector(bio), WRITE))
  168. failit = 1;
  169. if (check_mode(conf, WritePersistent)) {
  170. add_sector(conf, bio->bi_iter.bi_sector,
  171. WritePersistent);
  172. failit = 1;
  173. }
  174. if (check_mode(conf, WriteTransient))
  175. failit = 1;
  176. } else {
  177. /* read request */
  178. if (check_sector(conf, bio->bi_iter.bi_sector,
  179. bio_end_sector(bio), READ))
  180. failit = 1;
  181. if (check_mode(conf, ReadTransient))
  182. failit = 1;
  183. if (check_mode(conf, ReadPersistent)) {
  184. add_sector(conf, bio->bi_iter.bi_sector,
  185. ReadPersistent);
  186. failit = 1;
  187. }
  188. if (check_mode(conf, ReadFixable)) {
  189. add_sector(conf, bio->bi_iter.bi_sector,
  190. ReadFixable);
  191. failit = 1;
  192. }
  193. }
  194. if (failit) {
  195. struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
  196. bio_set_dev(b, conf->rdev->bdev);
  197. b->bi_private = bio;
  198. b->bi_end_io = faulty_fail;
  199. bio = b;
  200. } else
  201. bio_set_dev(bio, conf->rdev->bdev);
  202. generic_make_request(bio);
  203. return true;
  204. }
  205. static void faulty_status(struct seq_file *seq, struct mddev *mddev)
  206. {
  207. struct faulty_conf *conf = mddev->private;
  208. int n;
  209. if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
  210. seq_printf(seq, " WriteTransient=%d(%d)",
  211. n, conf->period[WriteTransient]);
  212. if ((n=atomic_read(&conf->counters[ReadTransient])) != 0)
  213. seq_printf(seq, " ReadTransient=%d(%d)",
  214. n, conf->period[ReadTransient]);
  215. if ((n=atomic_read(&conf->counters[WritePersistent])) != 0)
  216. seq_printf(seq, " WritePersistent=%d(%d)",
  217. n, conf->period[WritePersistent]);
  218. if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0)
  219. seq_printf(seq, " ReadPersistent=%d(%d)",
  220. n, conf->period[ReadPersistent]);
  221. if ((n=atomic_read(&conf->counters[ReadFixable])) != 0)
  222. seq_printf(seq, " ReadFixable=%d(%d)",
  223. n, conf->period[ReadFixable]);
  224. if ((n=atomic_read(&conf->counters[WriteAll])) != 0)
  225. seq_printf(seq, " WriteAll");
  226. seq_printf(seq, " nfaults=%d", conf->nfaults);
  227. }
  228. static int faulty_reshape(struct mddev *mddev)
  229. {
  230. int mode = mddev->new_layout & ModeMask;
  231. int count = mddev->new_layout >> ModeShift;
  232. struct faulty_conf *conf = mddev->private;
  233. if (mddev->new_layout < 0)
  234. return 0;
  235. /* new layout */
  236. if (mode == ClearFaults)
  237. conf->nfaults = 0;
  238. else if (mode == ClearErrors) {
  239. int i;
  240. for (i=0 ; i < Modes ; i++) {
  241. conf->period[i] = 0;
  242. atomic_set(&conf->counters[i], 0);
  243. }
  244. } else if (mode < Modes) {
  245. conf->period[mode] = count;
  246. if (!count) count++;
  247. atomic_set(&conf->counters[mode], count);
  248. } else
  249. return -EINVAL;
  250. mddev->new_layout = -1;
  251. mddev->layout = -1; /* makes sure further changes come through */
  252. return 0;
  253. }
  254. static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  255. {
  256. WARN_ONCE(raid_disks,
  257. "%s does not support generic reshape\n", __func__);
  258. if (sectors == 0)
  259. return mddev->dev_sectors;
  260. return sectors;
  261. }
  262. static int faulty_run(struct mddev *mddev)
  263. {
  264. struct md_rdev *rdev;
  265. int i;
  266. struct faulty_conf *conf;
  267. if (md_check_no_bitmap(mddev))
  268. return -EINVAL;
  269. conf = kmalloc(sizeof(*conf), GFP_KERNEL);
  270. if (!conf)
  271. return -ENOMEM;
  272. for (i=0; i<Modes; i++) {
  273. atomic_set(&conf->counters[i], 0);
  274. conf->period[i] = 0;
  275. }
  276. conf->nfaults = 0;
  277. rdev_for_each(rdev, mddev) {
  278. conf->rdev = rdev;
  279. disk_stack_limits(mddev->gendisk, rdev->bdev,
  280. rdev->data_offset << 9);
  281. }
  282. md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
  283. mddev->private = conf;
  284. faulty_reshape(mddev);
  285. return 0;
  286. }
  287. static void faulty_free(struct mddev *mddev, void *priv)
  288. {
  289. struct faulty_conf *conf = priv;
  290. kfree(conf);
  291. }
  292. static struct md_personality faulty_personality =
  293. {
  294. .name = "faulty",
  295. .level = LEVEL_FAULTY,
  296. .owner = THIS_MODULE,
  297. .make_request = faulty_make_request,
  298. .run = faulty_run,
  299. .free = faulty_free,
  300. .status = faulty_status,
  301. .check_reshape = faulty_reshape,
  302. .size = faulty_size,
  303. };
  304. static int __init raid_init(void)
  305. {
  306. return register_md_personality(&faulty_personality);
  307. }
  308. static void raid_exit(void)
  309. {
  310. unregister_md_personality(&faulty_personality);
  311. }
  312. module_init(raid_init);
  313. module_exit(raid_exit);
  314. MODULE_LICENSE("GPL");
  315. MODULE_DESCRIPTION("Fault injection personality for MD");
  316. MODULE_ALIAS("md-personality-10"); /* faulty */
  317. MODULE_ALIAS("md-faulty");
  318. MODULE_ALIAS("md-level--5");