drbd_req.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744
  1. /*
  2. drbd_req.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/slab.h>
  21. #include <linux/drbd.h>
  22. #include "drbd_int.h"
  23. #include "drbd_req.h"
  24. static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
  25. /* Update disk stats at start of I/O request */
  26. static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
  27. {
  28. generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9,
  29. &device->vdisk->part0);
  30. }
  31. /* Update disk stats when completing request upwards */
  32. static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
  33. {
  34. generic_end_io_acct(bio_data_dir(req->master_bio),
  35. &device->vdisk->part0, req->start_jif);
  36. }
  37. static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
  38. {
  39. struct drbd_request *req;
  40. req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
  41. if (!req)
  42. return NULL;
  43. memset(req, 0, sizeof(*req));
  44. drbd_req_make_private_bio(req, bio_src);
  45. req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
  46. | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
  47. | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
  48. req->device = device;
  49. req->master_bio = bio_src;
  50. req->epoch = 0;
  51. drbd_clear_interval(&req->i);
  52. req->i.sector = bio_src->bi_iter.bi_sector;
  53. req->i.size = bio_src->bi_iter.bi_size;
  54. req->i.local = true;
  55. req->i.waiting = false;
  56. INIT_LIST_HEAD(&req->tl_requests);
  57. INIT_LIST_HEAD(&req->w.list);
  58. INIT_LIST_HEAD(&req->req_pending_master_completion);
  59. INIT_LIST_HEAD(&req->req_pending_local);
  60. /* one reference to be put by __drbd_make_request */
  61. atomic_set(&req->completion_ref, 1);
  62. /* one kref as long as completion_ref > 0 */
  63. kref_init(&req->kref);
  64. return req;
  65. }
  66. static void drbd_remove_request_interval(struct rb_root *root,
  67. struct drbd_request *req)
  68. {
  69. struct drbd_device *device = req->device;
  70. struct drbd_interval *i = &req->i;
  71. drbd_remove_interval(root, i);
  72. /* Wake up any processes waiting for this request to complete. */
  73. if (i->waiting)
  74. wake_up(&device->misc_wait);
  75. }
  76. void drbd_req_destroy(struct kref *kref)
  77. {
  78. struct drbd_request *req = container_of(kref, struct drbd_request, kref);
  79. struct drbd_device *device = req->device;
  80. const unsigned s = req->rq_state;
  81. if ((req->master_bio && !(s & RQ_POSTPONED)) ||
  82. atomic_read(&req->completion_ref) ||
  83. (s & RQ_LOCAL_PENDING) ||
  84. ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
  85. drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
  86. s, atomic_read(&req->completion_ref));
  87. return;
  88. }
  89. /* If called from mod_rq_state (expected normal case) or
  90. * drbd_send_and_submit (the less likely normal path), this holds the
  91. * req_lock, and req->tl_requests will typicaly be on ->transfer_log,
  92. * though it may be still empty (never added to the transfer log).
  93. *
  94. * If called from do_retry(), we do NOT hold the req_lock, but we are
  95. * still allowed to unconditionally list_del(&req->tl_requests),
  96. * because it will be on a local on-stack list only. */
  97. list_del_init(&req->tl_requests);
  98. /* finally remove the request from the conflict detection
  99. * respective block_id verification interval tree. */
  100. if (!drbd_interval_empty(&req->i)) {
  101. struct rb_root *root;
  102. if (s & RQ_WRITE)
  103. root = &device->write_requests;
  104. else
  105. root = &device->read_requests;
  106. drbd_remove_request_interval(root, req);
  107. } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
  108. drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n",
  109. s, (unsigned long long)req->i.sector, req->i.size);
  110. /* if it was a write, we may have to set the corresponding
  111. * bit(s) out-of-sync first. If it had a local part, we need to
  112. * release the reference to the activity log. */
  113. if (s & RQ_WRITE) {
  114. /* Set out-of-sync unless both OK flags are set
  115. * (local only or remote failed).
  116. * Other places where we set out-of-sync:
  117. * READ with local io-error */
  118. /* There is a special case:
  119. * we may notice late that IO was suspended,
  120. * and postpone, or schedule for retry, a write,
  121. * before it even was submitted or sent.
  122. * In that case we do not want to touch the bitmap at all.
  123. */
  124. if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
  125. if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
  126. drbd_set_out_of_sync(device, req->i.sector, req->i.size);
  127. if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
  128. drbd_set_in_sync(device, req->i.sector, req->i.size);
  129. }
  130. /* one might be tempted to move the drbd_al_complete_io
  131. * to the local io completion callback drbd_request_endio.
  132. * but, if this was a mirror write, we may only
  133. * drbd_al_complete_io after this is RQ_NET_DONE,
  134. * otherwise the extent could be dropped from the al
  135. * before it has actually been written on the peer.
  136. * if we crash before our peer knows about the request,
  137. * but after the extent has been dropped from the al,
  138. * we would forget to resync the corresponding extent.
  139. */
  140. if (s & RQ_IN_ACT_LOG) {
  141. if (get_ldev_if_state(device, D_FAILED)) {
  142. drbd_al_complete_io(device, &req->i);
  143. put_ldev(device);
  144. } else if (__ratelimit(&drbd_ratelimit_state)) {
  145. drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
  146. "but my Disk seems to have failed :(\n",
  147. (unsigned long long) req->i.sector, req->i.size);
  148. }
  149. }
  150. }
  151. mempool_free(req, drbd_request_mempool);
  152. }
  153. static void wake_all_senders(struct drbd_connection *connection)
  154. {
  155. wake_up(&connection->sender_work.q_wait);
  156. }
  157. /* must hold resource->req_lock */
  158. void start_new_tl_epoch(struct drbd_connection *connection)
  159. {
  160. /* no point closing an epoch, if it is empty, anyways. */
  161. if (connection->current_tle_writes == 0)
  162. return;
  163. connection->current_tle_writes = 0;
  164. atomic_inc(&connection->current_tle_nr);
  165. wake_all_senders(connection);
  166. }
  167. void complete_master_bio(struct drbd_device *device,
  168. struct bio_and_error *m)
  169. {
  170. m->bio->bi_error = m->error;
  171. bio_endio(m->bio);
  172. dec_ap_bio(device);
  173. }
  174. /* Helper for __req_mod().
  175. * Set m->bio to the master bio, if it is fit to be completed,
  176. * or leave it alone (it is initialized to NULL in __req_mod),
  177. * if it has already been completed, or cannot be completed yet.
  178. * If m->bio is set, the error status to be returned is placed in m->error.
  179. */
  180. static
  181. void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
  182. {
  183. const unsigned s = req->rq_state;
  184. struct drbd_device *device = req->device;
  185. int error, ok;
  186. /* we must not complete the master bio, while it is
  187. * still being processed by _drbd_send_zc_bio (drbd_send_dblock)
  188. * not yet acknowledged by the peer
  189. * not yet completed by the local io subsystem
  190. * these flags may get cleared in any order by
  191. * the worker,
  192. * the receiver,
  193. * the bio_endio completion callbacks.
  194. */
  195. if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
  196. (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
  197. (s & RQ_COMPLETION_SUSP)) {
  198. drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
  199. return;
  200. }
  201. if (!req->master_bio) {
  202. drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
  203. return;
  204. }
  205. /*
  206. * figure out whether to report success or failure.
  207. *
  208. * report success when at least one of the operations succeeded.
  209. * or, to put the other way,
  210. * only report failure, when both operations failed.
  211. *
  212. * what to do about the failures is handled elsewhere.
  213. * what we need to do here is just: complete the master_bio.
  214. *
  215. * local completion error, if any, has been stored as ERR_PTR
  216. * in private_bio within drbd_request_endio.
  217. */
  218. ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
  219. error = PTR_ERR(req->private_bio);
  220. /* Before we can signal completion to the upper layers,
  221. * we may need to close the current transfer log epoch.
  222. * We are within the request lock, so we can simply compare
  223. * the request epoch number with the current transfer log
  224. * epoch number. If they match, increase the current_tle_nr,
  225. * and reset the transfer log epoch write_cnt.
  226. */
  227. if (op_is_write(bio_op(req->master_bio)) &&
  228. req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
  229. start_new_tl_epoch(first_peer_device(device)->connection);
  230. /* Update disk stats */
  231. _drbd_end_io_acct(device, req);
  232. /* If READ failed,
  233. * have it be pushed back to the retry work queue,
  234. * so it will re-enter __drbd_make_request(),
  235. * and be re-assigned to a suitable local or remote path,
  236. * or failed if we do not have access to good data anymore.
  237. *
  238. * Unless it was failed early by __drbd_make_request(),
  239. * because no path was available, in which case
  240. * it was not even added to the transfer_log.
  241. *
  242. * read-ahead may fail, and will not be retried.
  243. *
  244. * WRITE should have used all available paths already.
  245. */
  246. if (!ok &&
  247. bio_op(req->master_bio) == REQ_OP_READ &&
  248. !(req->master_bio->bi_opf & REQ_RAHEAD) &&
  249. !list_empty(&req->tl_requests))
  250. req->rq_state |= RQ_POSTPONED;
  251. if (!(req->rq_state & RQ_POSTPONED)) {
  252. m->error = ok ? 0 : (error ?: -EIO);
  253. m->bio = req->master_bio;
  254. req->master_bio = NULL;
  255. /* We leave it in the tree, to be able to verify later
  256. * write-acks in protocol != C during resync.
  257. * But we mark it as "complete", so it won't be counted as
  258. * conflict in a multi-primary setup. */
  259. req->i.completed = true;
  260. }
  261. if (req->i.waiting)
  262. wake_up(&device->misc_wait);
  263. /* Either we are about to complete to upper layers,
  264. * or we will restart this request.
  265. * In either case, the request object will be destroyed soon,
  266. * so better remove it from all lists. */
  267. list_del_init(&req->req_pending_master_completion);
  268. }
  269. /* still holds resource->req_lock */
  270. static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
  271. {
  272. struct drbd_device *device = req->device;
  273. D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
  274. if (!atomic_sub_and_test(put, &req->completion_ref))
  275. return 0;
  276. drbd_req_complete(req, m);
  277. if (req->rq_state & RQ_POSTPONED) {
  278. /* don't destroy the req object just yet,
  279. * but queue it for retry */
  280. drbd_restart_request(req);
  281. return 0;
  282. }
  283. return 1;
  284. }
  285. static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
  286. {
  287. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  288. if (!connection)
  289. return;
  290. if (connection->req_next == NULL)
  291. connection->req_next = req;
  292. }
  293. static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
  294. {
  295. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  296. if (!connection)
  297. return;
  298. if (connection->req_next != req)
  299. return;
  300. list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
  301. const unsigned s = req->rq_state;
  302. if (s & RQ_NET_QUEUED)
  303. break;
  304. }
  305. if (&req->tl_requests == &connection->transfer_log)
  306. req = NULL;
  307. connection->req_next = req;
  308. }
  309. static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
  310. {
  311. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  312. if (!connection)
  313. return;
  314. if (connection->req_ack_pending == NULL)
  315. connection->req_ack_pending = req;
  316. }
  317. static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
  318. {
  319. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  320. if (!connection)
  321. return;
  322. if (connection->req_ack_pending != req)
  323. return;
  324. list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
  325. const unsigned s = req->rq_state;
  326. if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
  327. break;
  328. }
  329. if (&req->tl_requests == &connection->transfer_log)
  330. req = NULL;
  331. connection->req_ack_pending = req;
  332. }
  333. static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
  334. {
  335. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  336. if (!connection)
  337. return;
  338. if (connection->req_not_net_done == NULL)
  339. connection->req_not_net_done = req;
  340. }
  341. static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
  342. {
  343. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  344. if (!connection)
  345. return;
  346. if (connection->req_not_net_done != req)
  347. return;
  348. list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
  349. const unsigned s = req->rq_state;
  350. if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
  351. break;
  352. }
  353. if (&req->tl_requests == &connection->transfer_log)
  354. req = NULL;
  355. connection->req_not_net_done = req;
  356. }
  357. /* I'd like this to be the only place that manipulates
  358. * req->completion_ref and req->kref. */
  359. static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
  360. int clear, int set)
  361. {
  362. struct drbd_device *device = req->device;
  363. struct drbd_peer_device *peer_device = first_peer_device(device);
  364. unsigned s = req->rq_state;
  365. int c_put = 0;
  366. int k_put = 0;
  367. if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
  368. set |= RQ_COMPLETION_SUSP;
  369. /* apply */
  370. req->rq_state &= ~clear;
  371. req->rq_state |= set;
  372. /* no change? */
  373. if (req->rq_state == s)
  374. return;
  375. /* intent: get references */
  376. if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
  377. atomic_inc(&req->completion_ref);
  378. if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
  379. inc_ap_pending(device);
  380. atomic_inc(&req->completion_ref);
  381. }
  382. if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
  383. atomic_inc(&req->completion_ref);
  384. set_if_null_req_next(peer_device, req);
  385. }
  386. if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
  387. kref_get(&req->kref); /* wait for the DONE */
  388. if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
  389. /* potentially already completed in the ack_receiver thread */
  390. if (!(s & RQ_NET_DONE)) {
  391. atomic_add(req->i.size >> 9, &device->ap_in_flight);
  392. set_if_null_req_not_net_done(peer_device, req);
  393. }
  394. if (req->rq_state & RQ_NET_PENDING)
  395. set_if_null_req_ack_pending(peer_device, req);
  396. }
  397. if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
  398. atomic_inc(&req->completion_ref);
  399. /* progress: put references */
  400. if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
  401. ++c_put;
  402. if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
  403. D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
  404. /* local completion may still come in later,
  405. * we need to keep the req object around. */
  406. kref_get(&req->kref);
  407. ++c_put;
  408. }
  409. if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
  410. if (req->rq_state & RQ_LOCAL_ABORTED)
  411. ++k_put;
  412. else
  413. ++c_put;
  414. list_del_init(&req->req_pending_local);
  415. }
  416. if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
  417. dec_ap_pending(device);
  418. ++c_put;
  419. req->acked_jif = jiffies;
  420. advance_conn_req_ack_pending(peer_device, req);
  421. }
  422. if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) {
  423. ++c_put;
  424. advance_conn_req_next(peer_device, req);
  425. }
  426. if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
  427. if (s & RQ_NET_SENT)
  428. atomic_sub(req->i.size >> 9, &device->ap_in_flight);
  429. if (s & RQ_EXP_BARR_ACK)
  430. ++k_put;
  431. req->net_done_jif = jiffies;
  432. /* in ahead/behind mode, or just in case,
  433. * before we finally destroy this request,
  434. * the caching pointers must not reference it anymore */
  435. advance_conn_req_next(peer_device, req);
  436. advance_conn_req_ack_pending(peer_device, req);
  437. advance_conn_req_not_net_done(peer_device, req);
  438. }
  439. /* potentially complete and destroy */
  440. if (k_put || c_put) {
  441. /* Completion does it's own kref_put. If we are going to
  442. * kref_sub below, we need req to be still around then. */
  443. int at_least = k_put + !!c_put;
  444. int refcount = atomic_read(&req->kref.refcount);
  445. if (refcount < at_least)
  446. drbd_err(device,
  447. "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
  448. s, req->rq_state, refcount, at_least);
  449. }
  450. /* If we made progress, retry conflicting peer requests, if any. */
  451. if (req->i.waiting)
  452. wake_up(&device->misc_wait);
  453. if (c_put)
  454. k_put += drbd_req_put_completion_ref(req, m, c_put);
  455. if (k_put)
  456. kref_sub(&req->kref, k_put, drbd_req_destroy);
  457. }
  458. static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
  459. {
  460. char b[BDEVNAME_SIZE];
  461. if (!__ratelimit(&drbd_ratelimit_state))
  462. return;
  463. drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
  464. (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
  465. (unsigned long long)req->i.sector,
  466. req->i.size >> 9,
  467. bdevname(device->ldev->backing_bdev, b));
  468. }
  469. /* Helper for HANDED_OVER_TO_NETWORK.
  470. * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)?
  471. * Is it also still "PENDING"?
  472. * --> If so, clear PENDING and set NET_OK below.
  473. * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
  474. * (and we must not set RQ_NET_OK) */
  475. static inline bool is_pending_write_protocol_A(struct drbd_request *req)
  476. {
  477. return (req->rq_state &
  478. (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
  479. == (RQ_WRITE|RQ_NET_PENDING);
  480. }
  481. /* obviously this could be coded as many single functions
  482. * instead of one huge switch,
  483. * or by putting the code directly in the respective locations
  484. * (as it has been before).
  485. *
  486. * but having it this way
  487. * enforces that it is all in this one place, where it is easier to audit,
  488. * it makes it obvious that whatever "event" "happens" to a request should
  489. * happen "atomically" within the req_lock,
  490. * and it enforces that we have to think in a very structured manner
  491. * about the "events" that may happen to a request during its life time ...
  492. */
  493. int __req_mod(struct drbd_request *req, enum drbd_req_event what,
  494. struct bio_and_error *m)
  495. {
  496. struct drbd_device *const device = req->device;
  497. struct drbd_peer_device *const peer_device = first_peer_device(device);
  498. struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
  499. struct net_conf *nc;
  500. int p, rv = 0;
  501. if (m)
  502. m->bio = NULL;
  503. switch (what) {
  504. default:
  505. drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
  506. break;
  507. /* does not happen...
  508. * initialization done in drbd_req_new
  509. case CREATED:
  510. break;
  511. */
  512. case TO_BE_SENT: /* via network */
  513. /* reached via __drbd_make_request
  514. * and from w_read_retry_remote */
  515. D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
  516. rcu_read_lock();
  517. nc = rcu_dereference(connection->net_conf);
  518. p = nc->wire_protocol;
  519. rcu_read_unlock();
  520. req->rq_state |=
  521. p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
  522. p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
  523. mod_rq_state(req, m, 0, RQ_NET_PENDING);
  524. break;
  525. case TO_BE_SUBMITTED: /* locally */
  526. /* reached via __drbd_make_request */
  527. D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
  528. mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
  529. break;
  530. case COMPLETED_OK:
  531. if (req->rq_state & RQ_WRITE)
  532. device->writ_cnt += req->i.size >> 9;
  533. else
  534. device->read_cnt += req->i.size >> 9;
  535. mod_rq_state(req, m, RQ_LOCAL_PENDING,
  536. RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
  537. break;
  538. case ABORT_DISK_IO:
  539. mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
  540. break;
  541. case WRITE_COMPLETED_WITH_ERROR:
  542. drbd_report_io_error(device, req);
  543. __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
  544. mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
  545. break;
  546. case READ_COMPLETED_WITH_ERROR:
  547. drbd_set_out_of_sync(device, req->i.sector, req->i.size);
  548. drbd_report_io_error(device, req);
  549. __drbd_chk_io_error(device, DRBD_READ_ERROR);
  550. /* fall through. */
  551. case READ_AHEAD_COMPLETED_WITH_ERROR:
  552. /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
  553. mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
  554. break;
  555. case DISCARD_COMPLETED_NOTSUPP:
  556. case DISCARD_COMPLETED_WITH_ERROR:
  557. /* I'd rather not detach from local disk just because it
  558. * failed a REQ_DISCARD. */
  559. mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
  560. break;
  561. case QUEUE_FOR_NET_READ:
  562. /* READ, and
  563. * no local disk,
  564. * or target area marked as invalid,
  565. * or just got an io-error. */
  566. /* from __drbd_make_request
  567. * or from bio_endio during read io-error recovery */
  568. /* So we can verify the handle in the answer packet.
  569. * Corresponding drbd_remove_request_interval is in
  570. * drbd_req_complete() */
  571. D_ASSERT(device, drbd_interval_empty(&req->i));
  572. drbd_insert_interval(&device->read_requests, &req->i);
  573. set_bit(UNPLUG_REMOTE, &device->flags);
  574. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  575. D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
  576. mod_rq_state(req, m, 0, RQ_NET_QUEUED);
  577. req->w.cb = w_send_read_req;
  578. drbd_queue_work(&connection->sender_work,
  579. &req->w);
  580. break;
  581. case QUEUE_FOR_NET_WRITE:
  582. /* assert something? */
  583. /* from __drbd_make_request only */
  584. /* Corresponding drbd_remove_request_interval is in
  585. * drbd_req_complete() */
  586. D_ASSERT(device, drbd_interval_empty(&req->i));
  587. drbd_insert_interval(&device->write_requests, &req->i);
  588. /* NOTE
  589. * In case the req ended up on the transfer log before being
  590. * queued on the worker, it could lead to this request being
  591. * missed during cleanup after connection loss.
  592. * So we have to do both operations here,
  593. * within the same lock that protects the transfer log.
  594. *
  595. * _req_add_to_epoch(req); this has to be after the
  596. * _maybe_start_new_epoch(req); which happened in
  597. * __drbd_make_request, because we now may set the bit
  598. * again ourselves to close the current epoch.
  599. *
  600. * Add req to the (now) current epoch (barrier). */
  601. /* otherwise we may lose an unplug, which may cause some remote
  602. * io-scheduler timeout to expire, increasing maximum latency,
  603. * hurting performance. */
  604. set_bit(UNPLUG_REMOTE, &device->flags);
  605. /* queue work item to send data */
  606. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  607. mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
  608. req->w.cb = w_send_dblock;
  609. drbd_queue_work(&connection->sender_work,
  610. &req->w);
  611. /* close the epoch, in case it outgrew the limit */
  612. rcu_read_lock();
  613. nc = rcu_dereference(connection->net_conf);
  614. p = nc->max_epoch_size;
  615. rcu_read_unlock();
  616. if (connection->current_tle_writes >= p)
  617. start_new_tl_epoch(connection);
  618. break;
  619. case QUEUE_FOR_SEND_OOS:
  620. mod_rq_state(req, m, 0, RQ_NET_QUEUED);
  621. req->w.cb = w_send_out_of_sync;
  622. drbd_queue_work(&connection->sender_work,
  623. &req->w);
  624. break;
  625. case READ_RETRY_REMOTE_CANCELED:
  626. case SEND_CANCELED:
  627. case SEND_FAILED:
  628. /* real cleanup will be done from tl_clear. just update flags
  629. * so it is no longer marked as on the worker queue */
  630. mod_rq_state(req, m, RQ_NET_QUEUED, 0);
  631. break;
  632. case HANDED_OVER_TO_NETWORK:
  633. /* assert something? */
  634. if (is_pending_write_protocol_A(req))
  635. /* this is what is dangerous about protocol A:
  636. * pretend it was successfully written on the peer. */
  637. mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
  638. RQ_NET_SENT|RQ_NET_OK);
  639. else
  640. mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
  641. /* It is still not yet RQ_NET_DONE until the
  642. * corresponding epoch barrier got acked as well,
  643. * so we know what to dirty on connection loss. */
  644. break;
  645. case OOS_HANDED_TO_NETWORK:
  646. /* Was not set PENDING, no longer QUEUED, so is now DONE
  647. * as far as this connection is concerned. */
  648. mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
  649. break;
  650. case CONNECTION_LOST_WHILE_PENDING:
  651. /* transfer log cleanup after connection loss */
  652. mod_rq_state(req, m,
  653. RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
  654. RQ_NET_DONE);
  655. break;
  656. case CONFLICT_RESOLVED:
  657. /* for superseded conflicting writes of multiple primaries,
  658. * there is no need to keep anything in the tl, potential
  659. * node crashes are covered by the activity log.
  660. *
  661. * If this request had been marked as RQ_POSTPONED before,
  662. * it will actually not be completed, but "restarted",
  663. * resubmitted from the retry worker context. */
  664. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  665. D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
  666. mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
  667. break;
  668. case WRITE_ACKED_BY_PEER_AND_SIS:
  669. req->rq_state |= RQ_NET_SIS;
  670. case WRITE_ACKED_BY_PEER:
  671. /* Normal operation protocol C: successfully written on peer.
  672. * During resync, even in protocol != C,
  673. * we requested an explicit write ack anyways.
  674. * Which means we cannot even assert anything here.
  675. * Nothing more to do here.
  676. * We want to keep the tl in place for all protocols, to cater
  677. * for volatile write-back caches on lower level devices. */
  678. goto ack_common;
  679. case RECV_ACKED_BY_PEER:
  680. D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
  681. /* protocol B; pretends to be successfully written on peer.
  682. * see also notes above in HANDED_OVER_TO_NETWORK about
  683. * protocol != C */
  684. ack_common:
  685. mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
  686. break;
  687. case POSTPONE_WRITE:
  688. D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
  689. /* If this node has already detected the write conflict, the
  690. * worker will be waiting on misc_wait. Wake it up once this
  691. * request has completed locally.
  692. */
  693. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  694. req->rq_state |= RQ_POSTPONED;
  695. if (req->i.waiting)
  696. wake_up(&device->misc_wait);
  697. /* Do not clear RQ_NET_PENDING. This request will make further
  698. * progress via restart_conflicting_writes() or
  699. * fail_postponed_requests(). Hopefully. */
  700. break;
  701. case NEG_ACKED:
  702. mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
  703. break;
  704. case FAIL_FROZEN_DISK_IO:
  705. if (!(req->rq_state & RQ_LOCAL_COMPLETED))
  706. break;
  707. mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
  708. break;
  709. case RESTART_FROZEN_DISK_IO:
  710. if (!(req->rq_state & RQ_LOCAL_COMPLETED))
  711. break;
  712. mod_rq_state(req, m,
  713. RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
  714. RQ_LOCAL_PENDING);
  715. rv = MR_READ;
  716. if (bio_data_dir(req->master_bio) == WRITE)
  717. rv = MR_WRITE;
  718. get_ldev(device); /* always succeeds in this call path */
  719. req->w.cb = w_restart_disk_io;
  720. drbd_queue_work(&connection->sender_work,
  721. &req->w);
  722. break;
  723. case RESEND:
  724. /* Simply complete (local only) READs. */
  725. if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
  726. mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
  727. break;
  728. }
  729. /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
  730. before the connection loss (B&C only); only P_BARRIER_ACK
  731. (or the local completion?) was missing when we suspended.
  732. Throwing them out of the TL here by pretending we got a BARRIER_ACK.
  733. During connection handshake, we ensure that the peer was not rebooted. */
  734. if (!(req->rq_state & RQ_NET_OK)) {
  735. /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
  736. * in that case we must not set RQ_NET_PENDING. */
  737. mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
  738. if (req->w.cb) {
  739. /* w.cb expected to be w_send_dblock, or w_send_read_req */
  740. drbd_queue_work(&connection->sender_work,
  741. &req->w);
  742. rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
  743. } /* else: FIXME can this happen? */
  744. break;
  745. }
  746. /* else, fall through to BARRIER_ACKED */
  747. case BARRIER_ACKED:
  748. /* barrier ack for READ requests does not make sense */
  749. if (!(req->rq_state & RQ_WRITE))
  750. break;
  751. if (req->rq_state & RQ_NET_PENDING) {
  752. /* barrier came in before all requests were acked.
  753. * this is bad, because if the connection is lost now,
  754. * we won't be able to clean them up... */
  755. drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
  756. }
  757. /* Allowed to complete requests, even while suspended.
  758. * As this is called for all requests within a matching epoch,
  759. * we need to filter, and only set RQ_NET_DONE for those that
  760. * have actually been on the wire. */
  761. mod_rq_state(req, m, RQ_COMPLETION_SUSP,
  762. (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
  763. break;
  764. case DATA_RECEIVED:
  765. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  766. mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
  767. break;
  768. case QUEUE_AS_DRBD_BARRIER:
  769. start_new_tl_epoch(connection);
  770. mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
  771. break;
  772. };
  773. return rv;
  774. }
  775. /* we may do a local read if:
  776. * - we are consistent (of course),
  777. * - or we are generally inconsistent,
  778. * BUT we are still/already IN SYNC for this area.
  779. * since size may be bigger than BM_BLOCK_SIZE,
  780. * we may need to check several bits.
  781. */
  782. static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
  783. {
  784. unsigned long sbnr, ebnr;
  785. sector_t esector, nr_sectors;
  786. if (device->state.disk == D_UP_TO_DATE)
  787. return true;
  788. if (device->state.disk != D_INCONSISTENT)
  789. return false;
  790. esector = sector + (size >> 9) - 1;
  791. nr_sectors = drbd_get_capacity(device->this_bdev);
  792. D_ASSERT(device, sector < nr_sectors);
  793. D_ASSERT(device, esector < nr_sectors);
  794. sbnr = BM_SECT_TO_BIT(sector);
  795. ebnr = BM_SECT_TO_BIT(esector);
  796. return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
  797. }
  798. static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
  799. enum drbd_read_balancing rbm)
  800. {
  801. struct backing_dev_info *bdi;
  802. int stripe_shift;
  803. switch (rbm) {
  804. case RB_CONGESTED_REMOTE:
  805. bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
  806. return bdi_read_congested(bdi);
  807. case RB_LEAST_PENDING:
  808. return atomic_read(&device->local_cnt) >
  809. atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
  810. case RB_32K_STRIPING: /* stripe_shift = 15 */
  811. case RB_64K_STRIPING:
  812. case RB_128K_STRIPING:
  813. case RB_256K_STRIPING:
  814. case RB_512K_STRIPING:
  815. case RB_1M_STRIPING: /* stripe_shift = 20 */
  816. stripe_shift = (rbm - RB_32K_STRIPING + 15);
  817. return (sector >> (stripe_shift - 9)) & 1;
  818. case RB_ROUND_ROBIN:
  819. return test_and_change_bit(READ_BALANCE_RR, &device->flags);
  820. case RB_PREFER_REMOTE:
  821. return true;
  822. case RB_PREFER_LOCAL:
  823. default:
  824. return false;
  825. }
  826. }
  827. /*
  828. * complete_conflicting_writes - wait for any conflicting write requests
  829. *
  830. * The write_requests tree contains all active write requests which we
  831. * currently know about. Wait for any requests to complete which conflict with
  832. * the new one.
  833. *
  834. * Only way out: remove the conflicting intervals from the tree.
  835. */
  836. static void complete_conflicting_writes(struct drbd_request *req)
  837. {
  838. DEFINE_WAIT(wait);
  839. struct drbd_device *device = req->device;
  840. struct drbd_interval *i;
  841. sector_t sector = req->i.sector;
  842. int size = req->i.size;
  843. for (;;) {
  844. drbd_for_each_overlap(i, &device->write_requests, sector, size) {
  845. /* Ignore, if already completed to upper layers. */
  846. if (i->completed)
  847. continue;
  848. /* Handle the first found overlap. After the schedule
  849. * we have to restart the tree walk. */
  850. break;
  851. }
  852. if (!i) /* if any */
  853. break;
  854. /* Indicate to wake up device->misc_wait on progress. */
  855. prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
  856. i->waiting = true;
  857. spin_unlock_irq(&device->resource->req_lock);
  858. schedule();
  859. spin_lock_irq(&device->resource->req_lock);
  860. }
  861. finish_wait(&device->misc_wait, &wait);
  862. }
  863. /* called within req_lock */
  864. static void maybe_pull_ahead(struct drbd_device *device)
  865. {
  866. struct drbd_connection *connection = first_peer_device(device)->connection;
  867. struct net_conf *nc;
  868. bool congested = false;
  869. enum drbd_on_congestion on_congestion;
  870. rcu_read_lock();
  871. nc = rcu_dereference(connection->net_conf);
  872. on_congestion = nc ? nc->on_congestion : OC_BLOCK;
  873. rcu_read_unlock();
  874. if (on_congestion == OC_BLOCK ||
  875. connection->agreed_pro_version < 96)
  876. return;
  877. if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD)
  878. return; /* nothing to do ... */
  879. /* If I don't even have good local storage, we can not reasonably try
  880. * to pull ahead of the peer. We also need the local reference to make
  881. * sure device->act_log is there.
  882. */
  883. if (!get_ldev_if_state(device, D_UP_TO_DATE))
  884. return;
  885. if (nc->cong_fill &&
  886. atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
  887. drbd_info(device, "Congestion-fill threshold reached\n");
  888. congested = true;
  889. }
  890. if (device->act_log->used >= nc->cong_extents) {
  891. drbd_info(device, "Congestion-extents threshold reached\n");
  892. congested = true;
  893. }
  894. if (congested) {
  895. /* start a new epoch for non-mirrored writes */
  896. start_new_tl_epoch(first_peer_device(device)->connection);
  897. if (on_congestion == OC_PULL_AHEAD)
  898. _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
  899. else /*nc->on_congestion == OC_DISCONNECT */
  900. _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
  901. }
  902. put_ldev(device);
  903. }
  904. /* If this returns false, and req->private_bio is still set,
  905. * this should be submitted locally.
  906. *
  907. * If it returns false, but req->private_bio is not set,
  908. * we do not have access to good data :(
  909. *
  910. * Otherwise, this destroys req->private_bio, if any,
  911. * and returns true.
  912. */
  913. static bool do_remote_read(struct drbd_request *req)
  914. {
  915. struct drbd_device *device = req->device;
  916. enum drbd_read_balancing rbm;
  917. if (req->private_bio) {
  918. if (!drbd_may_do_local_read(device,
  919. req->i.sector, req->i.size)) {
  920. bio_put(req->private_bio);
  921. req->private_bio = NULL;
  922. put_ldev(device);
  923. }
  924. }
  925. if (device->state.pdsk != D_UP_TO_DATE)
  926. return false;
  927. if (req->private_bio == NULL)
  928. return true;
  929. /* TODO: improve read balancing decisions, take into account drbd
  930. * protocol, pending requests etc. */
  931. rcu_read_lock();
  932. rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
  933. rcu_read_unlock();
  934. if (rbm == RB_PREFER_LOCAL && req->private_bio)
  935. return false; /* submit locally */
  936. if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
  937. if (req->private_bio) {
  938. bio_put(req->private_bio);
  939. req->private_bio = NULL;
  940. put_ldev(device);
  941. }
  942. return true;
  943. }
  944. return false;
  945. }
  946. bool drbd_should_do_remote(union drbd_dev_state s)
  947. {
  948. return s.pdsk == D_UP_TO_DATE ||
  949. (s.pdsk >= D_INCONSISTENT &&
  950. s.conn >= C_WF_BITMAP_T &&
  951. s.conn < C_AHEAD);
  952. /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
  953. That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
  954. states. */
  955. }
  956. static bool drbd_should_send_out_of_sync(union drbd_dev_state s)
  957. {
  958. return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
  959. /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
  960. since we enter state C_AHEAD only if proto >= 96 */
  961. }
  962. /* returns number of connections (== 1, for drbd 8.4)
  963. * expected to actually write this data,
  964. * which does NOT include those that we are L_AHEAD for. */
  965. static int drbd_process_write_request(struct drbd_request *req)
  966. {
  967. struct drbd_device *device = req->device;
  968. int remote, send_oos;
  969. remote = drbd_should_do_remote(device->state);
  970. send_oos = drbd_should_send_out_of_sync(device->state);
  971. /* Need to replicate writes. Unless it is an empty flush,
  972. * which is better mapped to a DRBD P_BARRIER packet,
  973. * also for drbd wire protocol compatibility reasons.
  974. * If this was a flush, just start a new epoch.
  975. * Unless the current epoch was empty anyways, or we are not currently
  976. * replicating, in which case there is no point. */
  977. if (unlikely(req->i.size == 0)) {
  978. /* The only size==0 bios we expect are empty flushes. */
  979. D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
  980. if (remote)
  981. _req_mod(req, QUEUE_AS_DRBD_BARRIER);
  982. return remote;
  983. }
  984. if (!remote && !send_oos)
  985. return 0;
  986. D_ASSERT(device, !(remote && send_oos));
  987. if (remote) {
  988. _req_mod(req, TO_BE_SENT);
  989. _req_mod(req, QUEUE_FOR_NET_WRITE);
  990. } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
  991. _req_mod(req, QUEUE_FOR_SEND_OOS);
  992. return remote;
  993. }
  994. static void drbd_process_discard_req(struct drbd_request *req)
  995. {
  996. int err = drbd_issue_discard_or_zero_out(req->device,
  997. req->i.sector, req->i.size >> 9, true);
  998. if (err)
  999. req->private_bio->bi_error = -EIO;
  1000. bio_endio(req->private_bio);
  1001. }
  1002. static void
  1003. drbd_submit_req_private_bio(struct drbd_request *req)
  1004. {
  1005. struct drbd_device *device = req->device;
  1006. struct bio *bio = req->private_bio;
  1007. unsigned int type;
  1008. if (bio_op(bio) != REQ_OP_READ)
  1009. type = DRBD_FAULT_DT_WR;
  1010. else if (bio->bi_opf & REQ_RAHEAD)
  1011. type = DRBD_FAULT_DT_RA;
  1012. else
  1013. type = DRBD_FAULT_DT_RD;
  1014. bio->bi_bdev = device->ldev->backing_bdev;
  1015. /* State may have changed since we grabbed our reference on the
  1016. * ->ldev member. Double check, and short-circuit to endio.
  1017. * In case the last activity log transaction failed to get on
  1018. * stable storage, and this is a WRITE, we may not even submit
  1019. * this bio. */
  1020. if (get_ldev(device)) {
  1021. if (drbd_insert_fault(device, type))
  1022. bio_io_error(bio);
  1023. else if (bio_op(bio) == REQ_OP_DISCARD)
  1024. drbd_process_discard_req(req);
  1025. else
  1026. generic_make_request(bio);
  1027. put_ldev(device);
  1028. } else
  1029. bio_io_error(bio);
  1030. }
  1031. static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
  1032. {
  1033. spin_lock_irq(&device->resource->req_lock);
  1034. list_add_tail(&req->tl_requests, &device->submit.writes);
  1035. list_add_tail(&req->req_pending_master_completion,
  1036. &device->pending_master_completion[1 /* WRITE */]);
  1037. spin_unlock_irq(&device->resource->req_lock);
  1038. queue_work(device->submit.wq, &device->submit.worker);
  1039. /* do_submit() may sleep internally on al_wait, too */
  1040. wake_up(&device->al_wait);
  1041. }
  1042. /* returns the new drbd_request pointer, if the caller is expected to
  1043. * drbd_send_and_submit() it (to save latency), or NULL if we queued the
  1044. * request on the submitter thread.
  1045. * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
  1046. */
  1047. static struct drbd_request *
  1048. drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
  1049. {
  1050. const int rw = bio_data_dir(bio);
  1051. struct drbd_request *req;
  1052. /* allocate outside of all locks; */
  1053. req = drbd_req_new(device, bio);
  1054. if (!req) {
  1055. dec_ap_bio(device);
  1056. /* only pass the error to the upper layers.
  1057. * if user cannot handle io errors, that's not our business. */
  1058. drbd_err(device, "could not kmalloc() req\n");
  1059. bio->bi_error = -ENOMEM;
  1060. bio_endio(bio);
  1061. return ERR_PTR(-ENOMEM);
  1062. }
  1063. req->start_jif = start_jif;
  1064. if (!get_ldev(device)) {
  1065. bio_put(req->private_bio);
  1066. req->private_bio = NULL;
  1067. }
  1068. /* Update disk stats */
  1069. _drbd_start_io_acct(device, req);
  1070. /* process discards always from our submitter thread */
  1071. if (bio_op(bio) & REQ_OP_DISCARD)
  1072. goto queue_for_submitter_thread;
  1073. if (rw == WRITE && req->private_bio && req->i.size
  1074. && !test_bit(AL_SUSPENDED, &device->flags)) {
  1075. if (!drbd_al_begin_io_fastpath(device, &req->i))
  1076. goto queue_for_submitter_thread;
  1077. req->rq_state |= RQ_IN_ACT_LOG;
  1078. req->in_actlog_jif = jiffies;
  1079. }
  1080. return req;
  1081. queue_for_submitter_thread:
  1082. atomic_inc(&device->ap_actlog_cnt);
  1083. drbd_queue_write(device, req);
  1084. return NULL;
  1085. }
  1086. /* Require at least one path to current data.
  1087. * We don't want to allow writes on C_STANDALONE D_INCONSISTENT:
  1088. * We would not allow to read what was written,
  1089. * we would not have bumped the data generation uuids,
  1090. * we would cause data divergence for all the wrong reasons.
  1091. *
  1092. * If we don't see at least one D_UP_TO_DATE, we will fail this request,
  1093. * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO,
  1094. * and queues for retry later.
  1095. */
  1096. static bool may_do_writes(struct drbd_device *device)
  1097. {
  1098. const union drbd_dev_state s = device->state;
  1099. return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
  1100. }
  1101. static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
  1102. {
  1103. struct drbd_resource *resource = device->resource;
  1104. const int rw = bio_data_dir(req->master_bio);
  1105. struct bio_and_error m = { NULL, };
  1106. bool no_remote = false;
  1107. bool submit_private_bio = false;
  1108. spin_lock_irq(&resource->req_lock);
  1109. if (rw == WRITE) {
  1110. /* This may temporarily give up the req_lock,
  1111. * but will re-aquire it before it returns here.
  1112. * Needs to be before the check on drbd_suspended() */
  1113. complete_conflicting_writes(req);
  1114. /* no more giving up req_lock from now on! */
  1115. /* check for congestion, and potentially stop sending
  1116. * full data updates, but start sending "dirty bits" only. */
  1117. maybe_pull_ahead(device);
  1118. }
  1119. if (drbd_suspended(device)) {
  1120. /* push back and retry: */
  1121. req->rq_state |= RQ_POSTPONED;
  1122. if (req->private_bio) {
  1123. bio_put(req->private_bio);
  1124. req->private_bio = NULL;
  1125. put_ldev(device);
  1126. }
  1127. goto out;
  1128. }
  1129. /* We fail READ early, if we can not serve it.
  1130. * We must do this before req is registered on any lists.
  1131. * Otherwise, drbd_req_complete() will queue failed READ for retry. */
  1132. if (rw != WRITE) {
  1133. if (!do_remote_read(req) && !req->private_bio)
  1134. goto nodata;
  1135. }
  1136. /* which transfer log epoch does this belong to? */
  1137. req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
  1138. /* no point in adding empty flushes to the transfer log,
  1139. * they are mapped to drbd barriers already. */
  1140. if (likely(req->i.size!=0)) {
  1141. if (rw == WRITE)
  1142. first_peer_device(device)->connection->current_tle_writes++;
  1143. list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
  1144. }
  1145. if (rw == WRITE) {
  1146. if (req->private_bio && !may_do_writes(device)) {
  1147. bio_put(req->private_bio);
  1148. req->private_bio = NULL;
  1149. put_ldev(device);
  1150. goto nodata;
  1151. }
  1152. if (!drbd_process_write_request(req))
  1153. no_remote = true;
  1154. } else {
  1155. /* We either have a private_bio, or we can read from remote.
  1156. * Otherwise we had done the goto nodata above. */
  1157. if (req->private_bio == NULL) {
  1158. _req_mod(req, TO_BE_SENT);
  1159. _req_mod(req, QUEUE_FOR_NET_READ);
  1160. } else
  1161. no_remote = true;
  1162. }
  1163. /* If it took the fast path in drbd_request_prepare, add it here.
  1164. * The slow path has added it already. */
  1165. if (list_empty(&req->req_pending_master_completion))
  1166. list_add_tail(&req->req_pending_master_completion,
  1167. &device->pending_master_completion[rw == WRITE]);
  1168. if (req->private_bio) {
  1169. /* needs to be marked within the same spinlock */
  1170. req->pre_submit_jif = jiffies;
  1171. list_add_tail(&req->req_pending_local,
  1172. &device->pending_completion[rw == WRITE]);
  1173. _req_mod(req, TO_BE_SUBMITTED);
  1174. /* but we need to give up the spinlock to submit */
  1175. submit_private_bio = true;
  1176. } else if (no_remote) {
  1177. nodata:
  1178. if (__ratelimit(&drbd_ratelimit_state))
  1179. drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
  1180. (unsigned long long)req->i.sector, req->i.size >> 9);
  1181. /* A write may have been queued for send_oos, however.
  1182. * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
  1183. }
  1184. out:
  1185. if (drbd_req_put_completion_ref(req, &m, 1))
  1186. kref_put(&req->kref, drbd_req_destroy);
  1187. spin_unlock_irq(&resource->req_lock);
  1188. /* Even though above is a kref_put(), this is safe.
  1189. * As long as we still need to submit our private bio,
  1190. * we hold a completion ref, and the request cannot disappear.
  1191. * If however this request did not even have a private bio to submit
  1192. * (e.g. remote read), req may already be invalid now.
  1193. * That's why we cannot check on req->private_bio. */
  1194. if (submit_private_bio)
  1195. drbd_submit_req_private_bio(req);
  1196. if (m.bio)
  1197. complete_master_bio(device, &m);
  1198. }
  1199. void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
  1200. {
  1201. struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
  1202. if (IS_ERR_OR_NULL(req))
  1203. return;
  1204. drbd_send_and_submit(device, req);
  1205. }
  1206. static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
  1207. {
  1208. struct drbd_request *req, *tmp;
  1209. list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
  1210. const int rw = bio_data_dir(req->master_bio);
  1211. if (rw == WRITE /* rw != WRITE should not even end up here! */
  1212. && req->private_bio && req->i.size
  1213. && !test_bit(AL_SUSPENDED, &device->flags)) {
  1214. if (!drbd_al_begin_io_fastpath(device, &req->i))
  1215. continue;
  1216. req->rq_state |= RQ_IN_ACT_LOG;
  1217. req->in_actlog_jif = jiffies;
  1218. atomic_dec(&device->ap_actlog_cnt);
  1219. }
  1220. list_del_init(&req->tl_requests);
  1221. drbd_send_and_submit(device, req);
  1222. }
  1223. }
  1224. static bool prepare_al_transaction_nonblock(struct drbd_device *device,
  1225. struct list_head *incoming,
  1226. struct list_head *pending,
  1227. struct list_head *later)
  1228. {
  1229. struct drbd_request *req, *tmp;
  1230. int wake = 0;
  1231. int err;
  1232. spin_lock_irq(&device->al_lock);
  1233. list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
  1234. err = drbd_al_begin_io_nonblock(device, &req->i);
  1235. if (err == -ENOBUFS)
  1236. break;
  1237. if (err == -EBUSY)
  1238. wake = 1;
  1239. if (err)
  1240. list_move_tail(&req->tl_requests, later);
  1241. else
  1242. list_move_tail(&req->tl_requests, pending);
  1243. }
  1244. spin_unlock_irq(&device->al_lock);
  1245. if (wake)
  1246. wake_up(&device->al_wait);
  1247. return !list_empty(pending);
  1248. }
  1249. void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
  1250. {
  1251. struct drbd_request *req, *tmp;
  1252. list_for_each_entry_safe(req, tmp, pending, tl_requests) {
  1253. req->rq_state |= RQ_IN_ACT_LOG;
  1254. req->in_actlog_jif = jiffies;
  1255. atomic_dec(&device->ap_actlog_cnt);
  1256. list_del_init(&req->tl_requests);
  1257. drbd_send_and_submit(device, req);
  1258. }
  1259. }
  1260. void do_submit(struct work_struct *ws)
  1261. {
  1262. struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
  1263. LIST_HEAD(incoming); /* from drbd_make_request() */
  1264. LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */
  1265. LIST_HEAD(busy); /* blocked by resync requests */
  1266. /* grab new incoming requests */
  1267. spin_lock_irq(&device->resource->req_lock);
  1268. list_splice_tail_init(&device->submit.writes, &incoming);
  1269. spin_unlock_irq(&device->resource->req_lock);
  1270. for (;;) {
  1271. DEFINE_WAIT(wait);
  1272. /* move used-to-be-busy back to front of incoming */
  1273. list_splice_init(&busy, &incoming);
  1274. submit_fast_path(device, &incoming);
  1275. if (list_empty(&incoming))
  1276. break;
  1277. for (;;) {
  1278. prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
  1279. list_splice_init(&busy, &incoming);
  1280. prepare_al_transaction_nonblock(device, &incoming, &pending, &busy);
  1281. if (!list_empty(&pending))
  1282. break;
  1283. schedule();
  1284. /* If all currently "hot" activity log extents are kept busy by
  1285. * incoming requests, we still must not totally starve new
  1286. * requests to "cold" extents.
  1287. * Something left on &incoming means there had not been
  1288. * enough update slots available, and the activity log
  1289. * has been marked as "starving".
  1290. *
  1291. * Try again now, without looking for new requests,
  1292. * effectively blocking all new requests until we made
  1293. * at least _some_ progress with what we currently have.
  1294. */
  1295. if (!list_empty(&incoming))
  1296. continue;
  1297. /* Nothing moved to pending, but nothing left
  1298. * on incoming: all moved to busy!
  1299. * Grab new and iterate. */
  1300. spin_lock_irq(&device->resource->req_lock);
  1301. list_splice_tail_init(&device->submit.writes, &incoming);
  1302. spin_unlock_irq(&device->resource->req_lock);
  1303. }
  1304. finish_wait(&device->al_wait, &wait);
  1305. /* If the transaction was full, before all incoming requests
  1306. * had been processed, skip ahead to commit, and iterate
  1307. * without splicing in more incoming requests from upper layers.
  1308. *
  1309. * Else, if all incoming have been processed,
  1310. * they have become either "pending" (to be submitted after
  1311. * next transaction commit) or "busy" (blocked by resync).
  1312. *
  1313. * Maybe more was queued, while we prepared the transaction?
  1314. * Try to stuff those into this transaction as well.
  1315. * Be strictly non-blocking here,
  1316. * we already have something to commit.
  1317. *
  1318. * Commit if we don't make any more progres.
  1319. */
  1320. while (list_empty(&incoming)) {
  1321. LIST_HEAD(more_pending);
  1322. LIST_HEAD(more_incoming);
  1323. bool made_progress;
  1324. /* It is ok to look outside the lock,
  1325. * it's only an optimization anyways */
  1326. if (list_empty(&device->submit.writes))
  1327. break;
  1328. spin_lock_irq(&device->resource->req_lock);
  1329. list_splice_tail_init(&device->submit.writes, &more_incoming);
  1330. spin_unlock_irq(&device->resource->req_lock);
  1331. if (list_empty(&more_incoming))
  1332. break;
  1333. made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy);
  1334. list_splice_tail_init(&more_pending, &pending);
  1335. list_splice_tail_init(&more_incoming, &incoming);
  1336. if (!made_progress)
  1337. break;
  1338. }
  1339. drbd_al_begin_io_commit(device);
  1340. send_and_submit_pending(device, &pending);
  1341. }
  1342. }
  1343. blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
  1344. {
  1345. struct drbd_device *device = (struct drbd_device *) q->queuedata;
  1346. unsigned long start_jif;
  1347. blk_queue_split(q, &bio, q->bio_split);
  1348. start_jif = jiffies;
  1349. /*
  1350. * what we "blindly" assume:
  1351. */
  1352. D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
  1353. inc_ap_bio(device);
  1354. __drbd_make_request(device, bio, start_jif);
  1355. return BLK_QC_T_NONE;
  1356. }
  1357. static bool net_timeout_reached(struct drbd_request *net_req,
  1358. struct drbd_connection *connection,
  1359. unsigned long now, unsigned long ent,
  1360. unsigned int ko_count, unsigned int timeout)
  1361. {
  1362. struct drbd_device *device = net_req->device;
  1363. if (!time_after(now, net_req->pre_send_jif + ent))
  1364. return false;
  1365. if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent))
  1366. return false;
  1367. if (net_req->rq_state & RQ_NET_PENDING) {
  1368. drbd_warn(device, "Remote failed to finish a request within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
  1369. jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
  1370. return true;
  1371. }
  1372. /* We received an ACK already (or are using protocol A),
  1373. * but are waiting for the epoch closing barrier ack.
  1374. * Check if we sent the barrier already. We should not blame the peer
  1375. * for being unresponsive, if we did not even ask it yet. */
  1376. if (net_req->epoch == connection->send.current_epoch_nr) {
  1377. drbd_warn(device,
  1378. "We did not send a P_BARRIER for %ums > ko-count (%u) * timeout (%u * 0.1s); drbd kernel thread blocked?\n",
  1379. jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
  1380. return false;
  1381. }
  1382. /* Worst case: we may have been blocked for whatever reason, then
  1383. * suddenly are able to send a lot of requests (and epoch separating
  1384. * barriers) in quick succession.
  1385. * The timestamp of the net_req may be much too old and not correspond
  1386. * to the sending time of the relevant unack'ed barrier packet, so
  1387. * would trigger a spurious timeout. The latest barrier packet may
  1388. * have a too recent timestamp to trigger the timeout, potentially miss
  1389. * a timeout. Right now we don't have a place to conveniently store
  1390. * these timestamps.
  1391. * But in this particular situation, the application requests are still
  1392. * completed to upper layers, DRBD should still "feel" responsive.
  1393. * No need yet to kill this connection, it may still recover.
  1394. * If not, eventually we will have queued enough into the network for
  1395. * us to block. From that point of view, the timestamp of the last sent
  1396. * barrier packet is relevant enough.
  1397. */
  1398. if (time_after(now, connection->send.last_sent_barrier_jif + ent)) {
  1399. drbd_warn(device, "Remote failed to answer a P_BARRIER (sent at %lu jif; now=%lu jif) within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
  1400. connection->send.last_sent_barrier_jif, now,
  1401. jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout);
  1402. return true;
  1403. }
  1404. return false;
  1405. }
  1406. /* A request is considered timed out, if
  1407. * - we have some effective timeout from the configuration,
  1408. * with some state restrictions applied,
  1409. * - the oldest request is waiting for a response from the network
  1410. * resp. the local disk,
  1411. * - the oldest request is in fact older than the effective timeout,
  1412. * - the connection was established (resp. disk was attached)
  1413. * for longer than the timeout already.
  1414. * Note that for 32bit jiffies and very stable connections/disks,
  1415. * we may have a wrap around, which is catched by
  1416. * !time_in_range(now, last_..._jif, last_..._jif + timeout).
  1417. *
  1418. * Side effect: once per 32bit wrap-around interval, which means every
  1419. * ~198 days with 250 HZ, we have a window where the timeout would need
  1420. * to expire twice (worst case) to become effective. Good enough.
  1421. */
  1422. void request_timer_fn(unsigned long data)
  1423. {
  1424. struct drbd_device *device = (struct drbd_device *) data;
  1425. struct drbd_connection *connection = first_peer_device(device)->connection;
  1426. struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
  1427. struct net_conf *nc;
  1428. unsigned long oldest_submit_jif;
  1429. unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
  1430. unsigned long now;
  1431. unsigned int ko_count = 0, timeout = 0;
  1432. rcu_read_lock();
  1433. nc = rcu_dereference(connection->net_conf);
  1434. if (nc && device->state.conn >= C_WF_REPORT_PARAMS) {
  1435. ko_count = nc->ko_count;
  1436. timeout = nc->timeout;
  1437. }
  1438. if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
  1439. dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
  1440. put_ldev(device);
  1441. }
  1442. rcu_read_unlock();
  1443. ent = timeout * HZ/10 * ko_count;
  1444. et = min_not_zero(dt, ent);
  1445. if (!et)
  1446. return; /* Recurring timer stopped */
  1447. now = jiffies;
  1448. nt = now + et;
  1449. spin_lock_irq(&device->resource->req_lock);
  1450. req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
  1451. req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);
  1452. /* maybe the oldest request waiting for the peer is in fact still
  1453. * blocking in tcp sendmsg. That's ok, though, that's handled via the
  1454. * socket send timeout, requesting a ping, and bumping ko-count in
  1455. * we_should_drop_the_connection().
  1456. */
  1457. /* check the oldest request we did successfully sent,
  1458. * but which is still waiting for an ACK. */
  1459. req_peer = connection->req_ack_pending;
  1460. /* if we don't have such request (e.g. protocoll A)
  1461. * check the oldest requests which is still waiting on its epoch
  1462. * closing barrier ack. */
  1463. if (!req_peer)
  1464. req_peer = connection->req_not_net_done;
  1465. /* evaluate the oldest peer request only in one timer! */
  1466. if (req_peer && req_peer->device != device)
  1467. req_peer = NULL;
  1468. /* do we have something to evaluate? */
  1469. if (req_peer == NULL && req_write == NULL && req_read == NULL)
  1470. goto out;
  1471. oldest_submit_jif =
  1472. (req_write && req_read)
  1473. ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif)
  1474. ? req_write->pre_submit_jif : req_read->pre_submit_jif )
  1475. : req_write ? req_write->pre_submit_jif
  1476. : req_read ? req_read->pre_submit_jif : now;
  1477. if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout))
  1478. _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
  1479. if (dt && oldest_submit_jif != now &&
  1480. time_after(now, oldest_submit_jif + dt) &&
  1481. !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
  1482. drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
  1483. __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
  1484. }
  1485. /* Reschedule timer for the nearest not already expired timeout.
  1486. * Fallback to now + min(effective network timeout, disk timeout). */
  1487. ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
  1488. ? req_peer->pre_send_jif + ent : now + et;
  1489. dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt))
  1490. ? oldest_submit_jif + dt : now + et;
  1491. nt = time_before(ent, dt) ? ent : dt;
  1492. out:
  1493. spin_unlock_irq(&device->resource->req_lock);
  1494. mod_timer(&device->request_timer, nt);
  1495. }