drbd_req.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639
  1. /*
  2. drbd_req.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/slab.h>
  21. #include <linux/drbd.h>
  22. #include "drbd_int.h"
  23. #include "drbd_req.h"
  24. static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
  25. /* Update disk stats at start of I/O request */
  26. static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
  27. {
  28. generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9,
  29. &device->vdisk->part0);
  30. }
  31. /* Update disk stats when completing request upwards */
  32. static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
  33. {
  34. generic_end_io_acct(bio_data_dir(req->master_bio),
  35. &device->vdisk->part0, req->start_jif);
  36. }
  37. static struct drbd_request *drbd_req_new(struct drbd_device *device,
  38. struct bio *bio_src)
  39. {
  40. struct drbd_request *req;
  41. req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
  42. if (!req)
  43. return NULL;
  44. memset(req, 0, sizeof(*req));
  45. drbd_req_make_private_bio(req, bio_src);
  46. req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
  47. req->device = device;
  48. req->master_bio = bio_src;
  49. req->epoch = 0;
  50. drbd_clear_interval(&req->i);
  51. req->i.sector = bio_src->bi_iter.bi_sector;
  52. req->i.size = bio_src->bi_iter.bi_size;
  53. req->i.local = true;
  54. req->i.waiting = false;
  55. INIT_LIST_HEAD(&req->tl_requests);
  56. INIT_LIST_HEAD(&req->w.list);
  57. INIT_LIST_HEAD(&req->req_pending_master_completion);
  58. INIT_LIST_HEAD(&req->req_pending_local);
  59. /* one reference to be put by __drbd_make_request */
  60. atomic_set(&req->completion_ref, 1);
  61. /* one kref as long as completion_ref > 0 */
  62. kref_init(&req->kref);
  63. return req;
  64. }
  65. static void drbd_remove_request_interval(struct rb_root *root,
  66. struct drbd_request *req)
  67. {
  68. struct drbd_device *device = req->device;
  69. struct drbd_interval *i = &req->i;
  70. drbd_remove_interval(root, i);
  71. /* Wake up any processes waiting for this request to complete. */
  72. if (i->waiting)
  73. wake_up(&device->misc_wait);
  74. }
  75. void drbd_req_destroy(struct kref *kref)
  76. {
  77. struct drbd_request *req = container_of(kref, struct drbd_request, kref);
  78. struct drbd_device *device = req->device;
  79. const unsigned s = req->rq_state;
  80. if ((req->master_bio && !(s & RQ_POSTPONED)) ||
  81. atomic_read(&req->completion_ref) ||
  82. (s & RQ_LOCAL_PENDING) ||
  83. ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
  84. drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
  85. s, atomic_read(&req->completion_ref));
  86. return;
  87. }
  88. /* If called from mod_rq_state (expected normal case) or
  89. * drbd_send_and_submit (the less likely normal path), this holds the
  90. * req_lock, and req->tl_requests will typicaly be on ->transfer_log,
  91. * though it may be still empty (never added to the transfer log).
  92. *
  93. * If called from do_retry(), we do NOT hold the req_lock, but we are
  94. * still allowed to unconditionally list_del(&req->tl_requests),
  95. * because it will be on a local on-stack list only. */
  96. list_del_init(&req->tl_requests);
  97. /* finally remove the request from the conflict detection
  98. * respective block_id verification interval tree. */
  99. if (!drbd_interval_empty(&req->i)) {
  100. struct rb_root *root;
  101. if (s & RQ_WRITE)
  102. root = &device->write_requests;
  103. else
  104. root = &device->read_requests;
  105. drbd_remove_request_interval(root, req);
  106. } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
  107. drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n",
  108. s, (unsigned long long)req->i.sector, req->i.size);
  109. /* if it was a write, we may have to set the corresponding
  110. * bit(s) out-of-sync first. If it had a local part, we need to
  111. * release the reference to the activity log. */
  112. if (s & RQ_WRITE) {
  113. /* Set out-of-sync unless both OK flags are set
  114. * (local only or remote failed).
  115. * Other places where we set out-of-sync:
  116. * READ with local io-error */
  117. /* There is a special case:
  118. * we may notice late that IO was suspended,
  119. * and postpone, or schedule for retry, a write,
  120. * before it even was submitted or sent.
  121. * In that case we do not want to touch the bitmap at all.
  122. */
  123. if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
  124. if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
  125. drbd_set_out_of_sync(device, req->i.sector, req->i.size);
  126. if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
  127. drbd_set_in_sync(device, req->i.sector, req->i.size);
  128. }
  129. /* one might be tempted to move the drbd_al_complete_io
  130. * to the local io completion callback drbd_request_endio.
  131. * but, if this was a mirror write, we may only
  132. * drbd_al_complete_io after this is RQ_NET_DONE,
  133. * otherwise the extent could be dropped from the al
  134. * before it has actually been written on the peer.
  135. * if we crash before our peer knows about the request,
  136. * but after the extent has been dropped from the al,
  137. * we would forget to resync the corresponding extent.
  138. */
  139. if (s & RQ_IN_ACT_LOG) {
  140. if (get_ldev_if_state(device, D_FAILED)) {
  141. drbd_al_complete_io(device, &req->i);
  142. put_ldev(device);
  143. } else if (__ratelimit(&drbd_ratelimit_state)) {
  144. drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
  145. "but my Disk seems to have failed :(\n",
  146. (unsigned long long) req->i.sector, req->i.size);
  147. }
  148. }
  149. }
  150. mempool_free(req, drbd_request_mempool);
  151. }
  152. static void wake_all_senders(struct drbd_connection *connection)
  153. {
  154. wake_up(&connection->sender_work.q_wait);
  155. }
  156. /* must hold resource->req_lock */
  157. void start_new_tl_epoch(struct drbd_connection *connection)
  158. {
  159. /* no point closing an epoch, if it is empty, anyways. */
  160. if (connection->current_tle_writes == 0)
  161. return;
  162. connection->current_tle_writes = 0;
  163. atomic_inc(&connection->current_tle_nr);
  164. wake_all_senders(connection);
  165. }
  166. void complete_master_bio(struct drbd_device *device,
  167. struct bio_and_error *m)
  168. {
  169. bio_endio(m->bio, m->error);
  170. dec_ap_bio(device);
  171. }
  172. /* Helper for __req_mod().
  173. * Set m->bio to the master bio, if it is fit to be completed,
  174. * or leave it alone (it is initialized to NULL in __req_mod),
  175. * if it has already been completed, or cannot be completed yet.
  176. * If m->bio is set, the error status to be returned is placed in m->error.
  177. */
  178. static
  179. void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
  180. {
  181. const unsigned s = req->rq_state;
  182. struct drbd_device *device = req->device;
  183. int rw;
  184. int error, ok;
  185. /* we must not complete the master bio, while it is
  186. * still being processed by _drbd_send_zc_bio (drbd_send_dblock)
  187. * not yet acknowledged by the peer
  188. * not yet completed by the local io subsystem
  189. * these flags may get cleared in any order by
  190. * the worker,
  191. * the receiver,
  192. * the bio_endio completion callbacks.
  193. */
  194. if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
  195. (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
  196. (s & RQ_COMPLETION_SUSP)) {
  197. drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
  198. return;
  199. }
  200. if (!req->master_bio) {
  201. drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
  202. return;
  203. }
  204. rw = bio_rw(req->master_bio);
  205. /*
  206. * figure out whether to report success or failure.
  207. *
  208. * report success when at least one of the operations succeeded.
  209. * or, to put the other way,
  210. * only report failure, when both operations failed.
  211. *
  212. * what to do about the failures is handled elsewhere.
  213. * what we need to do here is just: complete the master_bio.
  214. *
  215. * local completion error, if any, has been stored as ERR_PTR
  216. * in private_bio within drbd_request_endio.
  217. */
  218. ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
  219. error = PTR_ERR(req->private_bio);
  220. /* Before we can signal completion to the upper layers,
  221. * we may need to close the current transfer log epoch.
  222. * We are within the request lock, so we can simply compare
  223. * the request epoch number with the current transfer log
  224. * epoch number. If they match, increase the current_tle_nr,
  225. * and reset the transfer log epoch write_cnt.
  226. */
  227. if (rw == WRITE &&
  228. req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
  229. start_new_tl_epoch(first_peer_device(device)->connection);
  230. /* Update disk stats */
  231. _drbd_end_io_acct(device, req);
  232. /* If READ failed,
  233. * have it be pushed back to the retry work queue,
  234. * so it will re-enter __drbd_make_request(),
  235. * and be re-assigned to a suitable local or remote path,
  236. * or failed if we do not have access to good data anymore.
  237. *
  238. * Unless it was failed early by __drbd_make_request(),
  239. * because no path was available, in which case
  240. * it was not even added to the transfer_log.
  241. *
  242. * READA may fail, and will not be retried.
  243. *
  244. * WRITE should have used all available paths already.
  245. */
  246. if (!ok && rw == READ && !list_empty(&req->tl_requests))
  247. req->rq_state |= RQ_POSTPONED;
  248. if (!(req->rq_state & RQ_POSTPONED)) {
  249. m->error = ok ? 0 : (error ?: -EIO);
  250. m->bio = req->master_bio;
  251. req->master_bio = NULL;
  252. /* We leave it in the tree, to be able to verify later
  253. * write-acks in protocol != C during resync.
  254. * But we mark it as "complete", so it won't be counted as
  255. * conflict in a multi-primary setup. */
  256. req->i.completed = true;
  257. }
  258. if (req->i.waiting)
  259. wake_up(&device->misc_wait);
  260. /* Either we are about to complete to upper layers,
  261. * or we will restart this request.
  262. * In either case, the request object will be destroyed soon,
  263. * so better remove it from all lists. */
  264. list_del_init(&req->req_pending_master_completion);
  265. }
  266. /* still holds resource->req_lock */
  267. static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
  268. {
  269. struct drbd_device *device = req->device;
  270. D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
  271. if (!atomic_sub_and_test(put, &req->completion_ref))
  272. return 0;
  273. drbd_req_complete(req, m);
  274. if (req->rq_state & RQ_POSTPONED) {
  275. /* don't destroy the req object just yet,
  276. * but queue it for retry */
  277. drbd_restart_request(req);
  278. return 0;
  279. }
  280. return 1;
  281. }
  282. static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
  283. {
  284. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  285. if (!connection)
  286. return;
  287. if (connection->req_next == NULL)
  288. connection->req_next = req;
  289. }
  290. static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
  291. {
  292. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  293. if (!connection)
  294. return;
  295. if (connection->req_next != req)
  296. return;
  297. list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
  298. const unsigned s = req->rq_state;
  299. if (s & RQ_NET_QUEUED)
  300. break;
  301. }
  302. if (&req->tl_requests == &connection->transfer_log)
  303. req = NULL;
  304. connection->req_next = req;
  305. }
  306. static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
  307. {
  308. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  309. if (!connection)
  310. return;
  311. if (connection->req_ack_pending == NULL)
  312. connection->req_ack_pending = req;
  313. }
  314. static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
  315. {
  316. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  317. if (!connection)
  318. return;
  319. if (connection->req_ack_pending != req)
  320. return;
  321. list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
  322. const unsigned s = req->rq_state;
  323. if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
  324. break;
  325. }
  326. if (&req->tl_requests == &connection->transfer_log)
  327. req = NULL;
  328. connection->req_ack_pending = req;
  329. }
  330. static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
  331. {
  332. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  333. if (!connection)
  334. return;
  335. if (connection->req_not_net_done == NULL)
  336. connection->req_not_net_done = req;
  337. }
  338. static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
  339. {
  340. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  341. if (!connection)
  342. return;
  343. if (connection->req_not_net_done != req)
  344. return;
  345. list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
  346. const unsigned s = req->rq_state;
  347. if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
  348. break;
  349. }
  350. if (&req->tl_requests == &connection->transfer_log)
  351. req = NULL;
  352. connection->req_not_net_done = req;
  353. }
  354. /* I'd like this to be the only place that manipulates
  355. * req->completion_ref and req->kref. */
  356. static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
  357. int clear, int set)
  358. {
  359. struct drbd_device *device = req->device;
  360. struct drbd_peer_device *peer_device = first_peer_device(device);
  361. unsigned s = req->rq_state;
  362. int c_put = 0;
  363. int k_put = 0;
  364. if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
  365. set |= RQ_COMPLETION_SUSP;
  366. /* apply */
  367. req->rq_state &= ~clear;
  368. req->rq_state |= set;
  369. /* no change? */
  370. if (req->rq_state == s)
  371. return;
  372. /* intent: get references */
  373. if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
  374. atomic_inc(&req->completion_ref);
  375. if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
  376. inc_ap_pending(device);
  377. atomic_inc(&req->completion_ref);
  378. }
  379. if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
  380. atomic_inc(&req->completion_ref);
  381. set_if_null_req_next(peer_device, req);
  382. }
  383. if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
  384. kref_get(&req->kref); /* wait for the DONE */
  385. if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
  386. /* potentially already completed in the asender thread */
  387. if (!(s & RQ_NET_DONE)) {
  388. atomic_add(req->i.size >> 9, &device->ap_in_flight);
  389. set_if_null_req_not_net_done(peer_device, req);
  390. }
  391. if (s & RQ_NET_PENDING)
  392. set_if_null_req_ack_pending(peer_device, req);
  393. }
  394. if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
  395. atomic_inc(&req->completion_ref);
  396. /* progress: put references */
  397. if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
  398. ++c_put;
  399. if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
  400. D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
  401. /* local completion may still come in later,
  402. * we need to keep the req object around. */
  403. kref_get(&req->kref);
  404. ++c_put;
  405. }
  406. if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
  407. if (req->rq_state & RQ_LOCAL_ABORTED)
  408. ++k_put;
  409. else
  410. ++c_put;
  411. list_del_init(&req->req_pending_local);
  412. }
  413. if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
  414. dec_ap_pending(device);
  415. ++c_put;
  416. req->acked_jif = jiffies;
  417. advance_conn_req_ack_pending(peer_device, req);
  418. }
  419. if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) {
  420. ++c_put;
  421. advance_conn_req_next(peer_device, req);
  422. }
  423. if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
  424. if (s & RQ_NET_SENT)
  425. atomic_sub(req->i.size >> 9, &device->ap_in_flight);
  426. if (s & RQ_EXP_BARR_ACK)
  427. ++k_put;
  428. req->net_done_jif = jiffies;
  429. /* in ahead/behind mode, or just in case,
  430. * before we finally destroy this request,
  431. * the caching pointers must not reference it anymore */
  432. advance_conn_req_next(peer_device, req);
  433. advance_conn_req_ack_pending(peer_device, req);
  434. advance_conn_req_not_net_done(peer_device, req);
  435. }
  436. /* potentially complete and destroy */
  437. if (k_put || c_put) {
  438. /* Completion does it's own kref_put. If we are going to
  439. * kref_sub below, we need req to be still around then. */
  440. int at_least = k_put + !!c_put;
  441. int refcount = atomic_read(&req->kref.refcount);
  442. if (refcount < at_least)
  443. drbd_err(device,
  444. "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
  445. s, req->rq_state, refcount, at_least);
  446. }
  447. /* If we made progress, retry conflicting peer requests, if any. */
  448. if (req->i.waiting)
  449. wake_up(&device->misc_wait);
  450. if (c_put)
  451. k_put += drbd_req_put_completion_ref(req, m, c_put);
  452. if (k_put)
  453. kref_sub(&req->kref, k_put, drbd_req_destroy);
  454. }
  455. static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
  456. {
  457. char b[BDEVNAME_SIZE];
  458. if (!__ratelimit(&drbd_ratelimit_state))
  459. return;
  460. drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
  461. (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
  462. (unsigned long long)req->i.sector,
  463. req->i.size >> 9,
  464. bdevname(device->ldev->backing_bdev, b));
  465. }
  466. /* Helper for HANDED_OVER_TO_NETWORK.
  467. * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)?
  468. * Is it also still "PENDING"?
  469. * --> If so, clear PENDING and set NET_OK below.
  470. * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
  471. * (and we must not set RQ_NET_OK) */
  472. static inline bool is_pending_write_protocol_A(struct drbd_request *req)
  473. {
  474. return (req->rq_state &
  475. (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
  476. == (RQ_WRITE|RQ_NET_PENDING);
  477. }
  478. /* obviously this could be coded as many single functions
  479. * instead of one huge switch,
  480. * or by putting the code directly in the respective locations
  481. * (as it has been before).
  482. *
  483. * but having it this way
  484. * enforces that it is all in this one place, where it is easier to audit,
  485. * it makes it obvious that whatever "event" "happens" to a request should
  486. * happen "atomically" within the req_lock,
  487. * and it enforces that we have to think in a very structured manner
  488. * about the "events" that may happen to a request during its life time ...
  489. */
  490. int __req_mod(struct drbd_request *req, enum drbd_req_event what,
  491. struct bio_and_error *m)
  492. {
  493. struct drbd_device *const device = req->device;
  494. struct drbd_peer_device *const peer_device = first_peer_device(device);
  495. struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
  496. struct net_conf *nc;
  497. int p, rv = 0;
  498. if (m)
  499. m->bio = NULL;
  500. switch (what) {
  501. default:
  502. drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
  503. break;
  504. /* does not happen...
  505. * initialization done in drbd_req_new
  506. case CREATED:
  507. break;
  508. */
  509. case TO_BE_SENT: /* via network */
  510. /* reached via __drbd_make_request
  511. * and from w_read_retry_remote */
  512. D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
  513. rcu_read_lock();
  514. nc = rcu_dereference(connection->net_conf);
  515. p = nc->wire_protocol;
  516. rcu_read_unlock();
  517. req->rq_state |=
  518. p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
  519. p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
  520. mod_rq_state(req, m, 0, RQ_NET_PENDING);
  521. break;
  522. case TO_BE_SUBMITTED: /* locally */
  523. /* reached via __drbd_make_request */
  524. D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
  525. mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
  526. break;
  527. case COMPLETED_OK:
  528. if (req->rq_state & RQ_WRITE)
  529. device->writ_cnt += req->i.size >> 9;
  530. else
  531. device->read_cnt += req->i.size >> 9;
  532. mod_rq_state(req, m, RQ_LOCAL_PENDING,
  533. RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
  534. break;
  535. case ABORT_DISK_IO:
  536. mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
  537. break;
  538. case WRITE_COMPLETED_WITH_ERROR:
  539. drbd_report_io_error(device, req);
  540. __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
  541. mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
  542. break;
  543. case READ_COMPLETED_WITH_ERROR:
  544. drbd_set_out_of_sync(device, req->i.sector, req->i.size);
  545. drbd_report_io_error(device, req);
  546. __drbd_chk_io_error(device, DRBD_READ_ERROR);
  547. /* fall through. */
  548. case READ_AHEAD_COMPLETED_WITH_ERROR:
  549. /* it is legal to fail READA, no __drbd_chk_io_error in that case. */
  550. mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
  551. break;
  552. case DISCARD_COMPLETED_NOTSUPP:
  553. case DISCARD_COMPLETED_WITH_ERROR:
  554. /* I'd rather not detach from local disk just because it
  555. * failed a REQ_DISCARD. */
  556. mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
  557. break;
  558. case QUEUE_FOR_NET_READ:
  559. /* READ or READA, and
  560. * no local disk,
  561. * or target area marked as invalid,
  562. * or just got an io-error. */
  563. /* from __drbd_make_request
  564. * or from bio_endio during read io-error recovery */
  565. /* So we can verify the handle in the answer packet.
  566. * Corresponding drbd_remove_request_interval is in
  567. * drbd_req_complete() */
  568. D_ASSERT(device, drbd_interval_empty(&req->i));
  569. drbd_insert_interval(&device->read_requests, &req->i);
  570. set_bit(UNPLUG_REMOTE, &device->flags);
  571. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  572. D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
  573. mod_rq_state(req, m, 0, RQ_NET_QUEUED);
  574. req->w.cb = w_send_read_req;
  575. drbd_queue_work(&connection->sender_work,
  576. &req->w);
  577. break;
  578. case QUEUE_FOR_NET_WRITE:
  579. /* assert something? */
  580. /* from __drbd_make_request only */
  581. /* Corresponding drbd_remove_request_interval is in
  582. * drbd_req_complete() */
  583. D_ASSERT(device, drbd_interval_empty(&req->i));
  584. drbd_insert_interval(&device->write_requests, &req->i);
  585. /* NOTE
  586. * In case the req ended up on the transfer log before being
  587. * queued on the worker, it could lead to this request being
  588. * missed during cleanup after connection loss.
  589. * So we have to do both operations here,
  590. * within the same lock that protects the transfer log.
  591. *
  592. * _req_add_to_epoch(req); this has to be after the
  593. * _maybe_start_new_epoch(req); which happened in
  594. * __drbd_make_request, because we now may set the bit
  595. * again ourselves to close the current epoch.
  596. *
  597. * Add req to the (now) current epoch (barrier). */
  598. /* otherwise we may lose an unplug, which may cause some remote
  599. * io-scheduler timeout to expire, increasing maximum latency,
  600. * hurting performance. */
  601. set_bit(UNPLUG_REMOTE, &device->flags);
  602. /* queue work item to send data */
  603. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  604. mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
  605. req->w.cb = w_send_dblock;
  606. drbd_queue_work(&connection->sender_work,
  607. &req->w);
  608. /* close the epoch, in case it outgrew the limit */
  609. rcu_read_lock();
  610. nc = rcu_dereference(connection->net_conf);
  611. p = nc->max_epoch_size;
  612. rcu_read_unlock();
  613. if (connection->current_tle_writes >= p)
  614. start_new_tl_epoch(connection);
  615. break;
  616. case QUEUE_FOR_SEND_OOS:
  617. mod_rq_state(req, m, 0, RQ_NET_QUEUED);
  618. req->w.cb = w_send_out_of_sync;
  619. drbd_queue_work(&connection->sender_work,
  620. &req->w);
  621. break;
  622. case READ_RETRY_REMOTE_CANCELED:
  623. case SEND_CANCELED:
  624. case SEND_FAILED:
  625. /* real cleanup will be done from tl_clear. just update flags
  626. * so it is no longer marked as on the worker queue */
  627. mod_rq_state(req, m, RQ_NET_QUEUED, 0);
  628. break;
  629. case HANDED_OVER_TO_NETWORK:
  630. /* assert something? */
  631. if (is_pending_write_protocol_A(req))
  632. /* this is what is dangerous about protocol A:
  633. * pretend it was successfully written on the peer. */
  634. mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
  635. RQ_NET_SENT|RQ_NET_OK);
  636. else
  637. mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
  638. /* It is still not yet RQ_NET_DONE until the
  639. * corresponding epoch barrier got acked as well,
  640. * so we know what to dirty on connection loss. */
  641. break;
  642. case OOS_HANDED_TO_NETWORK:
  643. /* Was not set PENDING, no longer QUEUED, so is now DONE
  644. * as far as this connection is concerned. */
  645. mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
  646. break;
  647. case CONNECTION_LOST_WHILE_PENDING:
  648. /* transfer log cleanup after connection loss */
  649. mod_rq_state(req, m,
  650. RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
  651. RQ_NET_DONE);
  652. break;
  653. case CONFLICT_RESOLVED:
  654. /* for superseded conflicting writes of multiple primaries,
  655. * there is no need to keep anything in the tl, potential
  656. * node crashes are covered by the activity log.
  657. *
  658. * If this request had been marked as RQ_POSTPONED before,
  659. * it will actually not be completed, but "restarted",
  660. * resubmitted from the retry worker context. */
  661. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  662. D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
  663. mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
  664. break;
  665. case WRITE_ACKED_BY_PEER_AND_SIS:
  666. req->rq_state |= RQ_NET_SIS;
  667. case WRITE_ACKED_BY_PEER:
  668. /* Normal operation protocol C: successfully written on peer.
  669. * During resync, even in protocol != C,
  670. * we requested an explicit write ack anyways.
  671. * Which means we cannot even assert anything here.
  672. * Nothing more to do here.
  673. * We want to keep the tl in place for all protocols, to cater
  674. * for volatile write-back caches on lower level devices. */
  675. goto ack_common;
  676. case RECV_ACKED_BY_PEER:
  677. D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
  678. /* protocol B; pretends to be successfully written on peer.
  679. * see also notes above in HANDED_OVER_TO_NETWORK about
  680. * protocol != C */
  681. ack_common:
  682. mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
  683. break;
  684. case POSTPONE_WRITE:
  685. D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
  686. /* If this node has already detected the write conflict, the
  687. * worker will be waiting on misc_wait. Wake it up once this
  688. * request has completed locally.
  689. */
  690. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  691. req->rq_state |= RQ_POSTPONED;
  692. if (req->i.waiting)
  693. wake_up(&device->misc_wait);
  694. /* Do not clear RQ_NET_PENDING. This request will make further
  695. * progress via restart_conflicting_writes() or
  696. * fail_postponed_requests(). Hopefully. */
  697. break;
  698. case NEG_ACKED:
  699. mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
  700. break;
  701. case FAIL_FROZEN_DISK_IO:
  702. if (!(req->rq_state & RQ_LOCAL_COMPLETED))
  703. break;
  704. mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
  705. break;
  706. case RESTART_FROZEN_DISK_IO:
  707. if (!(req->rq_state & RQ_LOCAL_COMPLETED))
  708. break;
  709. mod_rq_state(req, m,
  710. RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
  711. RQ_LOCAL_PENDING);
  712. rv = MR_READ;
  713. if (bio_data_dir(req->master_bio) == WRITE)
  714. rv = MR_WRITE;
  715. get_ldev(device); /* always succeeds in this call path */
  716. req->w.cb = w_restart_disk_io;
  717. drbd_queue_work(&connection->sender_work,
  718. &req->w);
  719. break;
  720. case RESEND:
  721. /* Simply complete (local only) READs. */
  722. if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
  723. mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
  724. break;
  725. }
  726. /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
  727. before the connection loss (B&C only); only P_BARRIER_ACK
  728. (or the local completion?) was missing when we suspended.
  729. Throwing them out of the TL here by pretending we got a BARRIER_ACK.
  730. During connection handshake, we ensure that the peer was not rebooted. */
  731. if (!(req->rq_state & RQ_NET_OK)) {
  732. /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
  733. * in that case we must not set RQ_NET_PENDING. */
  734. mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
  735. if (req->w.cb) {
  736. /* w.cb expected to be w_send_dblock, or w_send_read_req */
  737. drbd_queue_work(&connection->sender_work,
  738. &req->w);
  739. rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
  740. } /* else: FIXME can this happen? */
  741. break;
  742. }
  743. /* else, fall through to BARRIER_ACKED */
  744. case BARRIER_ACKED:
  745. /* barrier ack for READ requests does not make sense */
  746. if (!(req->rq_state & RQ_WRITE))
  747. break;
  748. if (req->rq_state & RQ_NET_PENDING) {
  749. /* barrier came in before all requests were acked.
  750. * this is bad, because if the connection is lost now,
  751. * we won't be able to clean them up... */
  752. drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
  753. }
  754. /* Allowed to complete requests, even while suspended.
  755. * As this is called for all requests within a matching epoch,
  756. * we need to filter, and only set RQ_NET_DONE for those that
  757. * have actually been on the wire. */
  758. mod_rq_state(req, m, RQ_COMPLETION_SUSP,
  759. (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
  760. break;
  761. case DATA_RECEIVED:
  762. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  763. mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
  764. break;
  765. case QUEUE_AS_DRBD_BARRIER:
  766. start_new_tl_epoch(connection);
  767. mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
  768. break;
  769. };
  770. return rv;
  771. }
  772. /* we may do a local read if:
  773. * - we are consistent (of course),
  774. * - or we are generally inconsistent,
  775. * BUT we are still/already IN SYNC for this area.
  776. * since size may be bigger than BM_BLOCK_SIZE,
  777. * we may need to check several bits.
  778. */
  779. static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
  780. {
  781. unsigned long sbnr, ebnr;
  782. sector_t esector, nr_sectors;
  783. if (device->state.disk == D_UP_TO_DATE)
  784. return true;
  785. if (device->state.disk != D_INCONSISTENT)
  786. return false;
  787. esector = sector + (size >> 9) - 1;
  788. nr_sectors = drbd_get_capacity(device->this_bdev);
  789. D_ASSERT(device, sector < nr_sectors);
  790. D_ASSERT(device, esector < nr_sectors);
  791. sbnr = BM_SECT_TO_BIT(sector);
  792. ebnr = BM_SECT_TO_BIT(esector);
  793. return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
  794. }
  795. static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
  796. enum drbd_read_balancing rbm)
  797. {
  798. struct backing_dev_info *bdi;
  799. int stripe_shift;
  800. switch (rbm) {
  801. case RB_CONGESTED_REMOTE:
  802. bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
  803. return bdi_read_congested(bdi);
  804. case RB_LEAST_PENDING:
  805. return atomic_read(&device->local_cnt) >
  806. atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
  807. case RB_32K_STRIPING: /* stripe_shift = 15 */
  808. case RB_64K_STRIPING:
  809. case RB_128K_STRIPING:
  810. case RB_256K_STRIPING:
  811. case RB_512K_STRIPING:
  812. case RB_1M_STRIPING: /* stripe_shift = 20 */
  813. stripe_shift = (rbm - RB_32K_STRIPING + 15);
  814. return (sector >> (stripe_shift - 9)) & 1;
  815. case RB_ROUND_ROBIN:
  816. return test_and_change_bit(READ_BALANCE_RR, &device->flags);
  817. case RB_PREFER_REMOTE:
  818. return true;
  819. case RB_PREFER_LOCAL:
  820. default:
  821. return false;
  822. }
  823. }
  824. /*
  825. * complete_conflicting_writes - wait for any conflicting write requests
  826. *
  827. * The write_requests tree contains all active write requests which we
  828. * currently know about. Wait for any requests to complete which conflict with
  829. * the new one.
  830. *
  831. * Only way out: remove the conflicting intervals from the tree.
  832. */
  833. static void complete_conflicting_writes(struct drbd_request *req)
  834. {
  835. DEFINE_WAIT(wait);
  836. struct drbd_device *device = req->device;
  837. struct drbd_interval *i;
  838. sector_t sector = req->i.sector;
  839. int size = req->i.size;
  840. i = drbd_find_overlap(&device->write_requests, sector, size);
  841. if (!i)
  842. return;
  843. for (;;) {
  844. prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
  845. i = drbd_find_overlap(&device->write_requests, sector, size);
  846. if (!i)
  847. break;
  848. /* Indicate to wake up device->misc_wait on progress. */
  849. i->waiting = true;
  850. spin_unlock_irq(&device->resource->req_lock);
  851. schedule();
  852. spin_lock_irq(&device->resource->req_lock);
  853. }
  854. finish_wait(&device->misc_wait, &wait);
  855. }
  856. /* called within req_lock and rcu_read_lock() */
  857. static void maybe_pull_ahead(struct drbd_device *device)
  858. {
  859. struct drbd_connection *connection = first_peer_device(device)->connection;
  860. struct net_conf *nc;
  861. bool congested = false;
  862. enum drbd_on_congestion on_congestion;
  863. rcu_read_lock();
  864. nc = rcu_dereference(connection->net_conf);
  865. on_congestion = nc ? nc->on_congestion : OC_BLOCK;
  866. rcu_read_unlock();
  867. if (on_congestion == OC_BLOCK ||
  868. connection->agreed_pro_version < 96)
  869. return;
  870. if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD)
  871. return; /* nothing to do ... */
  872. /* If I don't even have good local storage, we can not reasonably try
  873. * to pull ahead of the peer. We also need the local reference to make
  874. * sure device->act_log is there.
  875. */
  876. if (!get_ldev_if_state(device, D_UP_TO_DATE))
  877. return;
  878. if (nc->cong_fill &&
  879. atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
  880. drbd_info(device, "Congestion-fill threshold reached\n");
  881. congested = true;
  882. }
  883. if (device->act_log->used >= nc->cong_extents) {
  884. drbd_info(device, "Congestion-extents threshold reached\n");
  885. congested = true;
  886. }
  887. if (congested) {
  888. /* start a new epoch for non-mirrored writes */
  889. start_new_tl_epoch(first_peer_device(device)->connection);
  890. if (on_congestion == OC_PULL_AHEAD)
  891. _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
  892. else /*nc->on_congestion == OC_DISCONNECT */
  893. _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
  894. }
  895. put_ldev(device);
  896. }
  897. /* If this returns false, and req->private_bio is still set,
  898. * this should be submitted locally.
  899. *
  900. * If it returns false, but req->private_bio is not set,
  901. * we do not have access to good data :(
  902. *
  903. * Otherwise, this destroys req->private_bio, if any,
  904. * and returns true.
  905. */
  906. static bool do_remote_read(struct drbd_request *req)
  907. {
  908. struct drbd_device *device = req->device;
  909. enum drbd_read_balancing rbm;
  910. if (req->private_bio) {
  911. if (!drbd_may_do_local_read(device,
  912. req->i.sector, req->i.size)) {
  913. bio_put(req->private_bio);
  914. req->private_bio = NULL;
  915. put_ldev(device);
  916. }
  917. }
  918. if (device->state.pdsk != D_UP_TO_DATE)
  919. return false;
  920. if (req->private_bio == NULL)
  921. return true;
  922. /* TODO: improve read balancing decisions, take into account drbd
  923. * protocol, pending requests etc. */
  924. rcu_read_lock();
  925. rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
  926. rcu_read_unlock();
  927. if (rbm == RB_PREFER_LOCAL && req->private_bio)
  928. return false; /* submit locally */
  929. if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
  930. if (req->private_bio) {
  931. bio_put(req->private_bio);
  932. req->private_bio = NULL;
  933. put_ldev(device);
  934. }
  935. return true;
  936. }
  937. return false;
  938. }
  939. /* returns number of connections (== 1, for drbd 8.4)
  940. * expected to actually write this data,
  941. * which does NOT include those that we are L_AHEAD for. */
  942. static int drbd_process_write_request(struct drbd_request *req)
  943. {
  944. struct drbd_device *device = req->device;
  945. int remote, send_oos;
  946. remote = drbd_should_do_remote(device->state);
  947. send_oos = drbd_should_send_out_of_sync(device->state);
  948. /* Need to replicate writes. Unless it is an empty flush,
  949. * which is better mapped to a DRBD P_BARRIER packet,
  950. * also for drbd wire protocol compatibility reasons.
  951. * If this was a flush, just start a new epoch.
  952. * Unless the current epoch was empty anyways, or we are not currently
  953. * replicating, in which case there is no point. */
  954. if (unlikely(req->i.size == 0)) {
  955. /* The only size==0 bios we expect are empty flushes. */
  956. D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH);
  957. if (remote)
  958. _req_mod(req, QUEUE_AS_DRBD_BARRIER);
  959. return remote;
  960. }
  961. if (!remote && !send_oos)
  962. return 0;
  963. D_ASSERT(device, !(remote && send_oos));
  964. if (remote) {
  965. _req_mod(req, TO_BE_SENT);
  966. _req_mod(req, QUEUE_FOR_NET_WRITE);
  967. } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
  968. _req_mod(req, QUEUE_FOR_SEND_OOS);
  969. return remote;
  970. }
  971. static void
  972. drbd_submit_req_private_bio(struct drbd_request *req)
  973. {
  974. struct drbd_device *device = req->device;
  975. struct bio *bio = req->private_bio;
  976. const int rw = bio_rw(bio);
  977. bio->bi_bdev = device->ldev->backing_bdev;
  978. /* State may have changed since we grabbed our reference on the
  979. * ->ldev member. Double check, and short-circuit to endio.
  980. * In case the last activity log transaction failed to get on
  981. * stable storage, and this is a WRITE, we may not even submit
  982. * this bio. */
  983. if (get_ldev(device)) {
  984. req->pre_submit_jif = jiffies;
  985. if (drbd_insert_fault(device,
  986. rw == WRITE ? DRBD_FAULT_DT_WR
  987. : rw == READ ? DRBD_FAULT_DT_RD
  988. : DRBD_FAULT_DT_RA))
  989. bio_endio(bio, -EIO);
  990. else
  991. generic_make_request(bio);
  992. put_ldev(device);
  993. } else
  994. bio_endio(bio, -EIO);
  995. }
  996. static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
  997. {
  998. spin_lock_irq(&device->resource->req_lock);
  999. list_add_tail(&req->tl_requests, &device->submit.writes);
  1000. list_add_tail(&req->req_pending_master_completion,
  1001. &device->pending_master_completion[1 /* WRITE */]);
  1002. spin_unlock_irq(&device->resource->req_lock);
  1003. queue_work(device->submit.wq, &device->submit.worker);
  1004. /* do_submit() may sleep internally on al_wait, too */
  1005. wake_up(&device->al_wait);
  1006. }
  1007. /* returns the new drbd_request pointer, if the caller is expected to
  1008. * drbd_send_and_submit() it (to save latency), or NULL if we queued the
  1009. * request on the submitter thread.
  1010. * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
  1011. */
  1012. static struct drbd_request *
  1013. drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
  1014. {
  1015. const int rw = bio_data_dir(bio);
  1016. struct drbd_request *req;
  1017. /* allocate outside of all locks; */
  1018. req = drbd_req_new(device, bio);
  1019. if (!req) {
  1020. dec_ap_bio(device);
  1021. /* only pass the error to the upper layers.
  1022. * if user cannot handle io errors, that's not our business. */
  1023. drbd_err(device, "could not kmalloc() req\n");
  1024. bio_endio(bio, -ENOMEM);
  1025. return ERR_PTR(-ENOMEM);
  1026. }
  1027. req->start_jif = start_jif;
  1028. if (!get_ldev(device)) {
  1029. bio_put(req->private_bio);
  1030. req->private_bio = NULL;
  1031. }
  1032. /* Update disk stats */
  1033. _drbd_start_io_acct(device, req);
  1034. if (rw == WRITE && req->private_bio && req->i.size
  1035. && !test_bit(AL_SUSPENDED, &device->flags)) {
  1036. if (!drbd_al_begin_io_fastpath(device, &req->i)) {
  1037. atomic_inc(&device->ap_actlog_cnt);
  1038. drbd_queue_write(device, req);
  1039. return NULL;
  1040. }
  1041. req->rq_state |= RQ_IN_ACT_LOG;
  1042. req->in_actlog_jif = jiffies;
  1043. }
  1044. return req;
  1045. }
  1046. static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
  1047. {
  1048. struct drbd_resource *resource = device->resource;
  1049. const int rw = bio_rw(req->master_bio);
  1050. struct bio_and_error m = { NULL, };
  1051. bool no_remote = false;
  1052. bool submit_private_bio = false;
  1053. spin_lock_irq(&resource->req_lock);
  1054. if (rw == WRITE) {
  1055. /* This may temporarily give up the req_lock,
  1056. * but will re-aquire it before it returns here.
  1057. * Needs to be before the check on drbd_suspended() */
  1058. complete_conflicting_writes(req);
  1059. /* no more giving up req_lock from now on! */
  1060. /* check for congestion, and potentially stop sending
  1061. * full data updates, but start sending "dirty bits" only. */
  1062. maybe_pull_ahead(device);
  1063. }
  1064. if (drbd_suspended(device)) {
  1065. /* push back and retry: */
  1066. req->rq_state |= RQ_POSTPONED;
  1067. if (req->private_bio) {
  1068. bio_put(req->private_bio);
  1069. req->private_bio = NULL;
  1070. put_ldev(device);
  1071. }
  1072. goto out;
  1073. }
  1074. /* We fail READ/READA early, if we can not serve it.
  1075. * We must do this before req is registered on any lists.
  1076. * Otherwise, drbd_req_complete() will queue failed READ for retry. */
  1077. if (rw != WRITE) {
  1078. if (!do_remote_read(req) && !req->private_bio)
  1079. goto nodata;
  1080. }
  1081. /* which transfer log epoch does this belong to? */
  1082. req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
  1083. /* no point in adding empty flushes to the transfer log,
  1084. * they are mapped to drbd barriers already. */
  1085. if (likely(req->i.size!=0)) {
  1086. if (rw == WRITE)
  1087. first_peer_device(device)->connection->current_tle_writes++;
  1088. list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
  1089. }
  1090. if (rw == WRITE) {
  1091. if (!drbd_process_write_request(req))
  1092. no_remote = true;
  1093. } else {
  1094. /* We either have a private_bio, or we can read from remote.
  1095. * Otherwise we had done the goto nodata above. */
  1096. if (req->private_bio == NULL) {
  1097. _req_mod(req, TO_BE_SENT);
  1098. _req_mod(req, QUEUE_FOR_NET_READ);
  1099. } else
  1100. no_remote = true;
  1101. }
  1102. /* If it took the fast path in drbd_request_prepare, add it here.
  1103. * The slow path has added it already. */
  1104. if (list_empty(&req->req_pending_master_completion))
  1105. list_add_tail(&req->req_pending_master_completion,
  1106. &device->pending_master_completion[rw == WRITE]);
  1107. if (req->private_bio) {
  1108. /* needs to be marked within the same spinlock */
  1109. list_add_tail(&req->req_pending_local,
  1110. &device->pending_completion[rw == WRITE]);
  1111. _req_mod(req, TO_BE_SUBMITTED);
  1112. /* but we need to give up the spinlock to submit */
  1113. submit_private_bio = true;
  1114. } else if (no_remote) {
  1115. nodata:
  1116. if (__ratelimit(&drbd_ratelimit_state))
  1117. drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
  1118. (unsigned long long)req->i.sector, req->i.size >> 9);
  1119. /* A write may have been queued for send_oos, however.
  1120. * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
  1121. }
  1122. out:
  1123. if (drbd_req_put_completion_ref(req, &m, 1))
  1124. kref_put(&req->kref, drbd_req_destroy);
  1125. spin_unlock_irq(&resource->req_lock);
  1126. /* Even though above is a kref_put(), this is safe.
  1127. * As long as we still need to submit our private bio,
  1128. * we hold a completion ref, and the request cannot disappear.
  1129. * If however this request did not even have a private bio to submit
  1130. * (e.g. remote read), req may already be invalid now.
  1131. * That's why we cannot check on req->private_bio. */
  1132. if (submit_private_bio)
  1133. drbd_submit_req_private_bio(req);
  1134. if (m.bio)
  1135. complete_master_bio(device, &m);
  1136. }
  1137. void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
  1138. {
  1139. struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
  1140. if (IS_ERR_OR_NULL(req))
  1141. return;
  1142. drbd_send_and_submit(device, req);
  1143. }
  1144. static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
  1145. {
  1146. struct drbd_request *req, *tmp;
  1147. list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
  1148. const int rw = bio_data_dir(req->master_bio);
  1149. if (rw == WRITE /* rw != WRITE should not even end up here! */
  1150. && req->private_bio && req->i.size
  1151. && !test_bit(AL_SUSPENDED, &device->flags)) {
  1152. if (!drbd_al_begin_io_fastpath(device, &req->i))
  1153. continue;
  1154. req->rq_state |= RQ_IN_ACT_LOG;
  1155. req->in_actlog_jif = jiffies;
  1156. atomic_dec(&device->ap_actlog_cnt);
  1157. }
  1158. list_del_init(&req->tl_requests);
  1159. drbd_send_and_submit(device, req);
  1160. }
  1161. }
  1162. static bool prepare_al_transaction_nonblock(struct drbd_device *device,
  1163. struct list_head *incoming,
  1164. struct list_head *pending,
  1165. struct list_head *later)
  1166. {
  1167. struct drbd_request *req, *tmp;
  1168. int wake = 0;
  1169. int err;
  1170. spin_lock_irq(&device->al_lock);
  1171. list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
  1172. err = drbd_al_begin_io_nonblock(device, &req->i);
  1173. if (err == -ENOBUFS)
  1174. break;
  1175. if (err == -EBUSY)
  1176. wake = 1;
  1177. if (err)
  1178. list_move_tail(&req->tl_requests, later);
  1179. else
  1180. list_move_tail(&req->tl_requests, pending);
  1181. }
  1182. spin_unlock_irq(&device->al_lock);
  1183. if (wake)
  1184. wake_up(&device->al_wait);
  1185. return !list_empty(pending);
  1186. }
  1187. void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
  1188. {
  1189. struct drbd_request *req, *tmp;
  1190. list_for_each_entry_safe(req, tmp, pending, tl_requests) {
  1191. req->rq_state |= RQ_IN_ACT_LOG;
  1192. req->in_actlog_jif = jiffies;
  1193. atomic_dec(&device->ap_actlog_cnt);
  1194. list_del_init(&req->tl_requests);
  1195. drbd_send_and_submit(device, req);
  1196. }
  1197. }
  1198. void do_submit(struct work_struct *ws)
  1199. {
  1200. struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
  1201. LIST_HEAD(incoming); /* from drbd_make_request() */
  1202. LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */
  1203. LIST_HEAD(busy); /* blocked by resync requests */
  1204. /* grab new incoming requests */
  1205. spin_lock_irq(&device->resource->req_lock);
  1206. list_splice_tail_init(&device->submit.writes, &incoming);
  1207. spin_unlock_irq(&device->resource->req_lock);
  1208. for (;;) {
  1209. DEFINE_WAIT(wait);
  1210. /* move used-to-be-busy back to front of incoming */
  1211. list_splice_init(&busy, &incoming);
  1212. submit_fast_path(device, &incoming);
  1213. if (list_empty(&incoming))
  1214. break;
  1215. for (;;) {
  1216. prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
  1217. list_splice_init(&busy, &incoming);
  1218. prepare_al_transaction_nonblock(device, &incoming, &pending, &busy);
  1219. if (!list_empty(&pending))
  1220. break;
  1221. schedule();
  1222. /* If all currently "hot" activity log extents are kept busy by
  1223. * incoming requests, we still must not totally starve new
  1224. * requests to "cold" extents.
  1225. * Something left on &incoming means there had not been
  1226. * enough update slots available, and the activity log
  1227. * has been marked as "starving".
  1228. *
  1229. * Try again now, without looking for new requests,
  1230. * effectively blocking all new requests until we made
  1231. * at least _some_ progress with what we currently have.
  1232. */
  1233. if (!list_empty(&incoming))
  1234. continue;
  1235. /* Nothing moved to pending, but nothing left
  1236. * on incoming: all moved to busy!
  1237. * Grab new and iterate. */
  1238. spin_lock_irq(&device->resource->req_lock);
  1239. list_splice_tail_init(&device->submit.writes, &incoming);
  1240. spin_unlock_irq(&device->resource->req_lock);
  1241. }
  1242. finish_wait(&device->al_wait, &wait);
  1243. /* If the transaction was full, before all incoming requests
  1244. * had been processed, skip ahead to commit, and iterate
  1245. * without splicing in more incoming requests from upper layers.
  1246. *
  1247. * Else, if all incoming have been processed,
  1248. * they have become either "pending" (to be submitted after
  1249. * next transaction commit) or "busy" (blocked by resync).
  1250. *
  1251. * Maybe more was queued, while we prepared the transaction?
  1252. * Try to stuff those into this transaction as well.
  1253. * Be strictly non-blocking here,
  1254. * we already have something to commit.
  1255. *
  1256. * Commit if we don't make any more progres.
  1257. */
  1258. while (list_empty(&incoming)) {
  1259. LIST_HEAD(more_pending);
  1260. LIST_HEAD(more_incoming);
  1261. bool made_progress;
  1262. /* It is ok to look outside the lock,
  1263. * it's only an optimization anyways */
  1264. if (list_empty(&device->submit.writes))
  1265. break;
  1266. spin_lock_irq(&device->resource->req_lock);
  1267. list_splice_tail_init(&device->submit.writes, &more_incoming);
  1268. spin_unlock_irq(&device->resource->req_lock);
  1269. if (list_empty(&more_incoming))
  1270. break;
  1271. made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy);
  1272. list_splice_tail_init(&more_pending, &pending);
  1273. list_splice_tail_init(&more_incoming, &incoming);
  1274. if (!made_progress)
  1275. break;
  1276. }
  1277. drbd_al_begin_io_commit(device);
  1278. send_and_submit_pending(device, &pending);
  1279. }
  1280. }
  1281. void drbd_make_request(struct request_queue *q, struct bio *bio)
  1282. {
  1283. struct drbd_device *device = (struct drbd_device *) q->queuedata;
  1284. unsigned long start_jif;
  1285. start_jif = jiffies;
  1286. /*
  1287. * what we "blindly" assume:
  1288. */
  1289. D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
  1290. inc_ap_bio(device);
  1291. __drbd_make_request(device, bio, start_jif);
  1292. }
  1293. /* This is called by bio_add_page().
  1294. *
  1295. * q->max_hw_sectors and other global limits are already enforced there.
  1296. *
  1297. * We need to call down to our lower level device,
  1298. * in case it has special restrictions.
  1299. *
  1300. * We also may need to enforce configured max-bio-bvecs limits.
  1301. *
  1302. * As long as the BIO is empty we have to allow at least one bvec,
  1303. * regardless of size and offset, so no need to ask lower levels.
  1304. */
  1305. int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
  1306. {
  1307. struct drbd_device *device = (struct drbd_device *) q->queuedata;
  1308. unsigned int bio_size = bvm->bi_size;
  1309. int limit = DRBD_MAX_BIO_SIZE;
  1310. int backing_limit;
  1311. if (bio_size && get_ldev(device)) {
  1312. unsigned int max_hw_sectors = queue_max_hw_sectors(q);
  1313. struct request_queue * const b =
  1314. device->ldev->backing_bdev->bd_disk->queue;
  1315. if (b->merge_bvec_fn) {
  1316. bvm->bi_bdev = device->ldev->backing_bdev;
  1317. backing_limit = b->merge_bvec_fn(b, bvm, bvec);
  1318. limit = min(limit, backing_limit);
  1319. }
  1320. put_ldev(device);
  1321. if ((limit >> 9) > max_hw_sectors)
  1322. limit = max_hw_sectors << 9;
  1323. }
  1324. return limit;
  1325. }
  1326. void request_timer_fn(unsigned long data)
  1327. {
  1328. struct drbd_device *device = (struct drbd_device *) data;
  1329. struct drbd_connection *connection = first_peer_device(device)->connection;
  1330. struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
  1331. struct net_conf *nc;
  1332. unsigned long oldest_submit_jif;
  1333. unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
  1334. unsigned long now;
  1335. rcu_read_lock();
  1336. nc = rcu_dereference(connection->net_conf);
  1337. if (nc && device->state.conn >= C_WF_REPORT_PARAMS)
  1338. ent = nc->timeout * HZ/10 * nc->ko_count;
  1339. if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
  1340. dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
  1341. put_ldev(device);
  1342. }
  1343. rcu_read_unlock();
  1344. et = min_not_zero(dt, ent);
  1345. if (!et)
  1346. return; /* Recurring timer stopped */
  1347. now = jiffies;
  1348. nt = now + et;
  1349. spin_lock_irq(&device->resource->req_lock);
  1350. req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
  1351. req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);
  1352. req_peer = connection->req_not_net_done;
  1353. /* maybe the oldest request waiting for the peer is in fact still
  1354. * blocking in tcp sendmsg */
  1355. if (!req_peer && connection->req_next && connection->req_next->pre_send_jif)
  1356. req_peer = connection->req_next;
  1357. /* evaluate the oldest peer request only in one timer! */
  1358. if (req_peer && req_peer->device != device)
  1359. req_peer = NULL;
  1360. /* do we have something to evaluate? */
  1361. if (req_peer == NULL && req_write == NULL && req_read == NULL)
  1362. goto out;
  1363. oldest_submit_jif =
  1364. (req_write && req_read)
  1365. ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif)
  1366. ? req_write->pre_submit_jif : req_read->pre_submit_jif )
  1367. : req_write ? req_write->pre_submit_jif
  1368. : req_read ? req_read->pre_submit_jif : now;
  1369. /* The request is considered timed out, if
  1370. * - we have some effective timeout from the configuration,
  1371. * with above state restrictions applied,
  1372. * - the oldest request is waiting for a response from the network
  1373. * resp. the local disk,
  1374. * - the oldest request is in fact older than the effective timeout,
  1375. * - the connection was established (resp. disk was attached)
  1376. * for longer than the timeout already.
  1377. * Note that for 32bit jiffies and very stable connections/disks,
  1378. * we may have a wrap around, which is catched by
  1379. * !time_in_range(now, last_..._jif, last_..._jif + timeout).
  1380. *
  1381. * Side effect: once per 32bit wrap-around interval, which means every
  1382. * ~198 days with 250 HZ, we have a window where the timeout would need
  1383. * to expire twice (worst case) to become effective. Good enough.
  1384. */
  1385. if (ent && req_peer &&
  1386. time_after(now, req_peer->pre_send_jif + ent) &&
  1387. !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
  1388. drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n");
  1389. _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
  1390. }
  1391. if (dt && oldest_submit_jif != now &&
  1392. time_after(now, oldest_submit_jif + dt) &&
  1393. !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
  1394. drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
  1395. __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
  1396. }
  1397. /* Reschedule timer for the nearest not already expired timeout.
  1398. * Fallback to now + min(effective network timeout, disk timeout). */
  1399. ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
  1400. ? req_peer->pre_send_jif + ent : now + et;
  1401. dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt))
  1402. ? oldest_submit_jif + dt : now + et;
  1403. nt = time_before(ent, dt) ? ent : dt;
  1404. out:
  1405. spin_unlock_irq(&device->resource->req_lock);
  1406. mod_timer(&device->request_timer, nt);
  1407. }