checkpoint.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783
  1. /*
  2. * linux/fs/jbd/checkpoint.c
  3. *
  4. * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
  5. *
  6. * Copyright 1999 Red Hat Software --- All Rights Reserved
  7. *
  8. * This file is part of the Linux kernel and is made available under
  9. * the terms of the GNU General Public License, version 2, or at your
  10. * option, any later version, incorporated herein by reference.
  11. *
  12. * Checkpoint routines for the generic filesystem journaling code.
  13. * Part of the ext2fs journaling system.
  14. *
  15. * Checkpointing is the process of ensuring that a section of the log is
  16. * committed fully to disk, so that that portion of the log can be
  17. * reused.
  18. */
  19. #include <linux/time.h>
  20. #include <linux/fs.h>
  21. #include <linux/jbd.h>
  22. #include <linux/errno.h>
  23. #include <linux/slab.h>
  24. #include <linux/blkdev.h>
  25. #include <trace/events/jbd.h>
  26. /*
  27. * Unlink a buffer from a transaction checkpoint list.
  28. *
  29. * Called with j_list_lock held.
  30. */
  31. static inline void __buffer_unlink_first(struct journal_head *jh)
  32. {
  33. transaction_t *transaction = jh->b_cp_transaction;
  34. jh->b_cpnext->b_cpprev = jh->b_cpprev;
  35. jh->b_cpprev->b_cpnext = jh->b_cpnext;
  36. if (transaction->t_checkpoint_list == jh) {
  37. transaction->t_checkpoint_list = jh->b_cpnext;
  38. if (transaction->t_checkpoint_list == jh)
  39. transaction->t_checkpoint_list = NULL;
  40. }
  41. }
  42. /*
  43. * Unlink a buffer from a transaction checkpoint(io) list.
  44. *
  45. * Called with j_list_lock held.
  46. */
  47. static inline void __buffer_unlink(struct journal_head *jh)
  48. {
  49. transaction_t *transaction = jh->b_cp_transaction;
  50. __buffer_unlink_first(jh);
  51. if (transaction->t_checkpoint_io_list == jh) {
  52. transaction->t_checkpoint_io_list = jh->b_cpnext;
  53. if (transaction->t_checkpoint_io_list == jh)
  54. transaction->t_checkpoint_io_list = NULL;
  55. }
  56. }
  57. /*
  58. * Move a buffer from the checkpoint list to the checkpoint io list
  59. *
  60. * Called with j_list_lock held
  61. */
  62. static inline void __buffer_relink_io(struct journal_head *jh)
  63. {
  64. transaction_t *transaction = jh->b_cp_transaction;
  65. __buffer_unlink_first(jh);
  66. if (!transaction->t_checkpoint_io_list) {
  67. jh->b_cpnext = jh->b_cpprev = jh;
  68. } else {
  69. jh->b_cpnext = transaction->t_checkpoint_io_list;
  70. jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
  71. jh->b_cpprev->b_cpnext = jh;
  72. jh->b_cpnext->b_cpprev = jh;
  73. }
  74. transaction->t_checkpoint_io_list = jh;
  75. }
  76. /*
  77. * Try to release a checkpointed buffer from its transaction.
  78. * Returns 1 if we released it and 2 if we also released the
  79. * whole transaction.
  80. *
  81. * Requires j_list_lock
  82. * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
  83. */
  84. static int __try_to_free_cp_buf(struct journal_head *jh)
  85. {
  86. int ret = 0;
  87. struct buffer_head *bh = jh2bh(jh);
  88. if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
  89. !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
  90. /*
  91. * Get our reference so that bh cannot be freed before
  92. * we unlock it
  93. */
  94. get_bh(bh);
  95. JBUFFER_TRACE(jh, "remove from checkpoint list");
  96. ret = __journal_remove_checkpoint(jh) + 1;
  97. jbd_unlock_bh_state(bh);
  98. BUFFER_TRACE(bh, "release");
  99. __brelse(bh);
  100. } else {
  101. jbd_unlock_bh_state(bh);
  102. }
  103. return ret;
  104. }
  105. /*
  106. * __log_wait_for_space: wait until there is space in the journal.
  107. *
  108. * Called under j-state_lock *only*. It will be unlocked if we have to wait
  109. * for a checkpoint to free up some space in the log.
  110. */
  111. void __log_wait_for_space(journal_t *journal)
  112. {
  113. int nblocks, space_left;
  114. assert_spin_locked(&journal->j_state_lock);
  115. nblocks = jbd_space_needed(journal);
  116. while (__log_space_left(journal) < nblocks) {
  117. if (journal->j_flags & JFS_ABORT)
  118. return;
  119. spin_unlock(&journal->j_state_lock);
  120. mutex_lock(&journal->j_checkpoint_mutex);
  121. /*
  122. * Test again, another process may have checkpointed while we
  123. * were waiting for the checkpoint lock. If there are no
  124. * transactions ready to be checkpointed, try to recover
  125. * journal space by calling cleanup_journal_tail(), and if
  126. * that doesn't work, by waiting for the currently committing
  127. * transaction to complete. If there is absolutely no way
  128. * to make progress, this is either a BUG or corrupted
  129. * filesystem, so abort the journal and leave a stack
  130. * trace for forensic evidence.
  131. */
  132. spin_lock(&journal->j_state_lock);
  133. spin_lock(&journal->j_list_lock);
  134. nblocks = jbd_space_needed(journal);
  135. space_left = __log_space_left(journal);
  136. if (space_left < nblocks) {
  137. int chkpt = journal->j_checkpoint_transactions != NULL;
  138. tid_t tid = 0;
  139. if (journal->j_committing_transaction)
  140. tid = journal->j_committing_transaction->t_tid;
  141. spin_unlock(&journal->j_list_lock);
  142. spin_unlock(&journal->j_state_lock);
  143. if (chkpt) {
  144. log_do_checkpoint(journal);
  145. } else if (cleanup_journal_tail(journal) == 0) {
  146. /* We were able to recover space; yay! */
  147. ;
  148. } else if (tid) {
  149. log_wait_commit(journal, tid);
  150. } else {
  151. printk(KERN_ERR "%s: needed %d blocks and "
  152. "only had %d space available\n",
  153. __func__, nblocks, space_left);
  154. printk(KERN_ERR "%s: no way to get more "
  155. "journal space\n", __func__);
  156. WARN_ON(1);
  157. journal_abort(journal, 0);
  158. }
  159. spin_lock(&journal->j_state_lock);
  160. } else {
  161. spin_unlock(&journal->j_list_lock);
  162. }
  163. mutex_unlock(&journal->j_checkpoint_mutex);
  164. }
  165. }
  166. /*
  167. * We were unable to perform jbd_trylock_bh_state() inside j_list_lock.
  168. * The caller must restart a list walk. Wait for someone else to run
  169. * jbd_unlock_bh_state().
  170. */
  171. static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
  172. __releases(journal->j_list_lock)
  173. {
  174. get_bh(bh);
  175. spin_unlock(&journal->j_list_lock);
  176. jbd_lock_bh_state(bh);
  177. jbd_unlock_bh_state(bh);
  178. put_bh(bh);
  179. }
  180. /*
  181. * Clean up transaction's list of buffers submitted for io.
  182. * We wait for any pending IO to complete and remove any clean
  183. * buffers. Note that we take the buffers in the opposite ordering
  184. * from the one in which they were submitted for IO.
  185. *
  186. * Return 0 on success, and return <0 if some buffers have failed
  187. * to be written out.
  188. *
  189. * Called with j_list_lock held.
  190. */
  191. static int __wait_cp_io(journal_t *journal, transaction_t *transaction)
  192. {
  193. struct journal_head *jh;
  194. struct buffer_head *bh;
  195. tid_t this_tid;
  196. int released = 0;
  197. int ret = 0;
  198. this_tid = transaction->t_tid;
  199. restart:
  200. /* Did somebody clean up the transaction in the meanwhile? */
  201. if (journal->j_checkpoint_transactions != transaction ||
  202. transaction->t_tid != this_tid)
  203. return ret;
  204. while (!released && transaction->t_checkpoint_io_list) {
  205. jh = transaction->t_checkpoint_io_list;
  206. bh = jh2bh(jh);
  207. if (!jbd_trylock_bh_state(bh)) {
  208. jbd_sync_bh(journal, bh);
  209. spin_lock(&journal->j_list_lock);
  210. goto restart;
  211. }
  212. get_bh(bh);
  213. if (buffer_locked(bh)) {
  214. spin_unlock(&journal->j_list_lock);
  215. jbd_unlock_bh_state(bh);
  216. wait_on_buffer(bh);
  217. /* the journal_head may have gone by now */
  218. BUFFER_TRACE(bh, "brelse");
  219. __brelse(bh);
  220. spin_lock(&journal->j_list_lock);
  221. goto restart;
  222. }
  223. if (unlikely(buffer_write_io_error(bh)))
  224. ret = -EIO;
  225. /*
  226. * Now in whatever state the buffer currently is, we know that
  227. * it has been written out and so we can drop it from the list
  228. */
  229. released = __journal_remove_checkpoint(jh);
  230. jbd_unlock_bh_state(bh);
  231. __brelse(bh);
  232. }
  233. return ret;
  234. }
  235. #define NR_BATCH 64
  236. static void
  237. __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
  238. {
  239. int i;
  240. struct blk_plug plug;
  241. blk_start_plug(&plug);
  242. for (i = 0; i < *batch_count; i++)
  243. write_dirty_buffer(bhs[i], WRITE_SYNC);
  244. blk_finish_plug(&plug);
  245. for (i = 0; i < *batch_count; i++) {
  246. struct buffer_head *bh = bhs[i];
  247. clear_buffer_jwrite(bh);
  248. BUFFER_TRACE(bh, "brelse");
  249. __brelse(bh);
  250. }
  251. *batch_count = 0;
  252. }
  253. /*
  254. * Try to flush one buffer from the checkpoint list to disk.
  255. *
  256. * Return 1 if something happened which requires us to abort the current
  257. * scan of the checkpoint list. Return <0 if the buffer has failed to
  258. * be written out.
  259. *
  260. * Called with j_list_lock held and drops it if 1 is returned
  261. * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
  262. */
  263. static int __process_buffer(journal_t *journal, struct journal_head *jh,
  264. struct buffer_head **bhs, int *batch_count)
  265. {
  266. struct buffer_head *bh = jh2bh(jh);
  267. int ret = 0;
  268. if (buffer_locked(bh)) {
  269. get_bh(bh);
  270. spin_unlock(&journal->j_list_lock);
  271. jbd_unlock_bh_state(bh);
  272. wait_on_buffer(bh);
  273. /* the journal_head may have gone by now */
  274. BUFFER_TRACE(bh, "brelse");
  275. __brelse(bh);
  276. ret = 1;
  277. } else if (jh->b_transaction != NULL) {
  278. transaction_t *t = jh->b_transaction;
  279. tid_t tid = t->t_tid;
  280. spin_unlock(&journal->j_list_lock);
  281. jbd_unlock_bh_state(bh);
  282. log_start_commit(journal, tid);
  283. log_wait_commit(journal, tid);
  284. ret = 1;
  285. } else if (!buffer_dirty(bh)) {
  286. ret = 1;
  287. if (unlikely(buffer_write_io_error(bh)))
  288. ret = -EIO;
  289. get_bh(bh);
  290. J_ASSERT_JH(jh, !buffer_jbddirty(bh));
  291. BUFFER_TRACE(bh, "remove from checkpoint");
  292. __journal_remove_checkpoint(jh);
  293. spin_unlock(&journal->j_list_lock);
  294. jbd_unlock_bh_state(bh);
  295. __brelse(bh);
  296. } else {
  297. /*
  298. * Important: we are about to write the buffer, and
  299. * possibly block, while still holding the journal lock.
  300. * We cannot afford to let the transaction logic start
  301. * messing around with this buffer before we write it to
  302. * disk, as that would break recoverability.
  303. */
  304. BUFFER_TRACE(bh, "queue");
  305. get_bh(bh);
  306. J_ASSERT_BH(bh, !buffer_jwrite(bh));
  307. set_buffer_jwrite(bh);
  308. bhs[*batch_count] = bh;
  309. __buffer_relink_io(jh);
  310. jbd_unlock_bh_state(bh);
  311. (*batch_count)++;
  312. if (*batch_count == NR_BATCH) {
  313. spin_unlock(&journal->j_list_lock);
  314. __flush_batch(journal, bhs, batch_count);
  315. ret = 1;
  316. }
  317. }
  318. return ret;
  319. }
  320. /*
  321. * Perform an actual checkpoint. We take the first transaction on the
  322. * list of transactions to be checkpointed and send all its buffers
  323. * to disk. We submit larger chunks of data at once.
  324. *
  325. * The journal should be locked before calling this function.
  326. * Called with j_checkpoint_mutex held.
  327. */
  328. int log_do_checkpoint(journal_t *journal)
  329. {
  330. transaction_t *transaction;
  331. tid_t this_tid;
  332. int result;
  333. jbd_debug(1, "Start checkpoint\n");
  334. /*
  335. * First thing: if there are any transactions in the log which
  336. * don't need checkpointing, just eliminate them from the
  337. * journal straight away.
  338. */
  339. result = cleanup_journal_tail(journal);
  340. trace_jbd_checkpoint(journal, result);
  341. jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
  342. if (result <= 0)
  343. return result;
  344. /*
  345. * OK, we need to start writing disk blocks. Take one transaction
  346. * and write it.
  347. */
  348. result = 0;
  349. spin_lock(&journal->j_list_lock);
  350. if (!journal->j_checkpoint_transactions)
  351. goto out;
  352. transaction = journal->j_checkpoint_transactions;
  353. this_tid = transaction->t_tid;
  354. restart:
  355. /*
  356. * If someone cleaned up this transaction while we slept, we're
  357. * done (maybe it's a new transaction, but it fell at the same
  358. * address).
  359. */
  360. if (journal->j_checkpoint_transactions == transaction &&
  361. transaction->t_tid == this_tid) {
  362. int batch_count = 0;
  363. struct buffer_head *bhs[NR_BATCH];
  364. struct journal_head *jh;
  365. int retry = 0, err;
  366. while (!retry && transaction->t_checkpoint_list) {
  367. struct buffer_head *bh;
  368. jh = transaction->t_checkpoint_list;
  369. bh = jh2bh(jh);
  370. if (!jbd_trylock_bh_state(bh)) {
  371. jbd_sync_bh(journal, bh);
  372. retry = 1;
  373. break;
  374. }
  375. retry = __process_buffer(journal, jh, bhs,&batch_count);
  376. if (retry < 0 && !result)
  377. result = retry;
  378. if (!retry && (need_resched() ||
  379. spin_needbreak(&journal->j_list_lock))) {
  380. spin_unlock(&journal->j_list_lock);
  381. retry = 1;
  382. break;
  383. }
  384. }
  385. if (batch_count) {
  386. if (!retry) {
  387. spin_unlock(&journal->j_list_lock);
  388. retry = 1;
  389. }
  390. __flush_batch(journal, bhs, &batch_count);
  391. }
  392. if (retry) {
  393. spin_lock(&journal->j_list_lock);
  394. goto restart;
  395. }
  396. /*
  397. * Now we have cleaned up the first transaction's checkpoint
  398. * list. Let's clean up the second one
  399. */
  400. err = __wait_cp_io(journal, transaction);
  401. if (!result)
  402. result = err;
  403. }
  404. out:
  405. spin_unlock(&journal->j_list_lock);
  406. if (result < 0)
  407. journal_abort(journal, result);
  408. else
  409. result = cleanup_journal_tail(journal);
  410. return (result < 0) ? result : 0;
  411. }
  412. /*
  413. * Check the list of checkpoint transactions for the journal to see if
  414. * we have already got rid of any since the last update of the log tail
  415. * in the journal superblock. If so, we can instantly roll the
  416. * superblock forward to remove those transactions from the log.
  417. *
  418. * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
  419. *
  420. * This is the only part of the journaling code which really needs to be
  421. * aware of transaction aborts. Checkpointing involves writing to the
  422. * main filesystem area rather than to the journal, so it can proceed
  423. * even in abort state, but we must not update the super block if
  424. * checkpointing may have failed. Otherwise, we would lose some metadata
  425. * buffers which should be written-back to the filesystem.
  426. */
  427. int cleanup_journal_tail(journal_t *journal)
  428. {
  429. transaction_t * transaction;
  430. tid_t first_tid;
  431. unsigned int blocknr, freed;
  432. if (is_journal_aborted(journal))
  433. return 1;
  434. /*
  435. * OK, work out the oldest transaction remaining in the log, and
  436. * the log block it starts at.
  437. *
  438. * If the log is now empty, we need to work out which is the
  439. * next transaction ID we will write, and where it will
  440. * start.
  441. */
  442. spin_lock(&journal->j_state_lock);
  443. spin_lock(&journal->j_list_lock);
  444. transaction = journal->j_checkpoint_transactions;
  445. if (transaction) {
  446. first_tid = transaction->t_tid;
  447. blocknr = transaction->t_log_start;
  448. } else if ((transaction = journal->j_committing_transaction) != NULL) {
  449. first_tid = transaction->t_tid;
  450. blocknr = transaction->t_log_start;
  451. } else if ((transaction = journal->j_running_transaction) != NULL) {
  452. first_tid = transaction->t_tid;
  453. blocknr = journal->j_head;
  454. } else {
  455. first_tid = journal->j_transaction_sequence;
  456. blocknr = journal->j_head;
  457. }
  458. spin_unlock(&journal->j_list_lock);
  459. J_ASSERT(blocknr != 0);
  460. /* If the oldest pinned transaction is at the tail of the log
  461. already then there's not much we can do right now. */
  462. if (journal->j_tail_sequence == first_tid) {
  463. spin_unlock(&journal->j_state_lock);
  464. return 1;
  465. }
  466. spin_unlock(&journal->j_state_lock);
  467. /*
  468. * We need to make sure that any blocks that were recently written out
  469. * --- perhaps by log_do_checkpoint() --- are flushed out before we
  470. * drop the transactions from the journal. Similarly we need to be sure
  471. * superblock makes it to disk before next transaction starts reusing
  472. * freed space (otherwise we could replay some blocks of the new
  473. * transaction thinking they belong to the old one). So we use
  474. * WRITE_FLUSH_FUA. It's unlikely this will be necessary, especially
  475. * with an appropriately sized journal, but we need this to guarantee
  476. * correctness. Fortunately cleanup_journal_tail() doesn't get called
  477. * all that often.
  478. */
  479. journal_update_sb_log_tail(journal, first_tid, blocknr,
  480. WRITE_FLUSH_FUA);
  481. spin_lock(&journal->j_state_lock);
  482. /* OK, update the superblock to recover the freed space.
  483. * Physical blocks come first: have we wrapped beyond the end of
  484. * the log? */
  485. freed = blocknr - journal->j_tail;
  486. if (blocknr < journal->j_tail)
  487. freed = freed + journal->j_last - journal->j_first;
  488. trace_jbd_cleanup_journal_tail(journal, first_tid, blocknr, freed);
  489. jbd_debug(1,
  490. "Cleaning journal tail from %d to %d (offset %u), "
  491. "freeing %u\n",
  492. journal->j_tail_sequence, first_tid, blocknr, freed);
  493. journal->j_free += freed;
  494. journal->j_tail_sequence = first_tid;
  495. journal->j_tail = blocknr;
  496. spin_unlock(&journal->j_state_lock);
  497. return 0;
  498. }
  499. /* Checkpoint list management */
  500. /*
  501. * journal_clean_one_cp_list
  502. *
  503. * Find all the written-back checkpoint buffers in the given list and release
  504. * them.
  505. *
  506. * Called with j_list_lock held.
  507. * Returns number of buffers reaped (for debug)
  508. */
  509. static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
  510. {
  511. struct journal_head *last_jh;
  512. struct journal_head *next_jh = jh;
  513. int ret, freed = 0;
  514. *released = 0;
  515. if (!jh)
  516. return 0;
  517. last_jh = jh->b_cpprev;
  518. do {
  519. jh = next_jh;
  520. next_jh = jh->b_cpnext;
  521. /* Use trylock because of the ranking */
  522. if (jbd_trylock_bh_state(jh2bh(jh))) {
  523. ret = __try_to_free_cp_buf(jh);
  524. if (ret) {
  525. freed++;
  526. if (ret == 2) {
  527. *released = 1;
  528. return freed;
  529. }
  530. }
  531. }
  532. /*
  533. * This function only frees up some memory
  534. * if possible so we dont have an obligation
  535. * to finish processing. Bail out if preemption
  536. * requested:
  537. */
  538. if (need_resched())
  539. return freed;
  540. } while (jh != last_jh);
  541. return freed;
  542. }
  543. /*
  544. * journal_clean_checkpoint_list
  545. *
  546. * Find all the written-back checkpoint buffers in the journal and release them.
  547. *
  548. * Called with the journal locked.
  549. * Called with j_list_lock held.
  550. * Returns number of buffers reaped (for debug)
  551. */
  552. int __journal_clean_checkpoint_list(journal_t *journal)
  553. {
  554. transaction_t *transaction, *last_transaction, *next_transaction;
  555. int ret = 0;
  556. int released;
  557. transaction = journal->j_checkpoint_transactions;
  558. if (!transaction)
  559. goto out;
  560. last_transaction = transaction->t_cpprev;
  561. next_transaction = transaction;
  562. do {
  563. transaction = next_transaction;
  564. next_transaction = transaction->t_cpnext;
  565. ret += journal_clean_one_cp_list(transaction->
  566. t_checkpoint_list, &released);
  567. /*
  568. * This function only frees up some memory if possible so we
  569. * dont have an obligation to finish processing. Bail out if
  570. * preemption requested:
  571. */
  572. if (need_resched())
  573. goto out;
  574. if (released)
  575. continue;
  576. /*
  577. * It is essential that we are as careful as in the case of
  578. * t_checkpoint_list with removing the buffer from the list as
  579. * we can possibly see not yet submitted buffers on io_list
  580. */
  581. ret += journal_clean_one_cp_list(transaction->
  582. t_checkpoint_io_list, &released);
  583. if (need_resched())
  584. goto out;
  585. } while (transaction != last_transaction);
  586. out:
  587. return ret;
  588. }
  589. /*
  590. * journal_remove_checkpoint: called after a buffer has been committed
  591. * to disk (either by being write-back flushed to disk, or being
  592. * committed to the log).
  593. *
  594. * We cannot safely clean a transaction out of the log until all of the
  595. * buffer updates committed in that transaction have safely been stored
  596. * elsewhere on disk. To achieve this, all of the buffers in a
  597. * transaction need to be maintained on the transaction's checkpoint
  598. * lists until they have been rewritten, at which point this function is
  599. * called to remove the buffer from the existing transaction's
  600. * checkpoint lists.
  601. *
  602. * The function returns 1 if it frees the transaction, 0 otherwise.
  603. * The function can free jh and bh.
  604. *
  605. * This function is called with j_list_lock held.
  606. * This function is called with jbd_lock_bh_state(jh2bh(jh))
  607. */
  608. int __journal_remove_checkpoint(struct journal_head *jh)
  609. {
  610. transaction_t *transaction;
  611. journal_t *journal;
  612. int ret = 0;
  613. JBUFFER_TRACE(jh, "entry");
  614. if ((transaction = jh->b_cp_transaction) == NULL) {
  615. JBUFFER_TRACE(jh, "not on transaction");
  616. goto out;
  617. }
  618. journal = transaction->t_journal;
  619. JBUFFER_TRACE(jh, "removing from transaction");
  620. __buffer_unlink(jh);
  621. jh->b_cp_transaction = NULL;
  622. journal_put_journal_head(jh);
  623. if (transaction->t_checkpoint_list != NULL ||
  624. transaction->t_checkpoint_io_list != NULL)
  625. goto out;
  626. /*
  627. * There is one special case to worry about: if we have just pulled the
  628. * buffer off a running or committing transaction's checkpoing list,
  629. * then even if the checkpoint list is empty, the transaction obviously
  630. * cannot be dropped!
  631. *
  632. * The locking here around t_state is a bit sleazy.
  633. * See the comment at the end of journal_commit_transaction().
  634. */
  635. if (transaction->t_state != T_FINISHED)
  636. goto out;
  637. /* OK, that was the last buffer for the transaction: we can now
  638. safely remove this transaction from the log */
  639. __journal_drop_transaction(journal, transaction);
  640. /* Just in case anybody was waiting for more transactions to be
  641. checkpointed... */
  642. wake_up(&journal->j_wait_logspace);
  643. ret = 1;
  644. out:
  645. return ret;
  646. }
  647. /*
  648. * journal_insert_checkpoint: put a committed buffer onto a checkpoint
  649. * list so that we know when it is safe to clean the transaction out of
  650. * the log.
  651. *
  652. * Called with the journal locked.
  653. * Called with j_list_lock held.
  654. */
  655. void __journal_insert_checkpoint(struct journal_head *jh,
  656. transaction_t *transaction)
  657. {
  658. JBUFFER_TRACE(jh, "entry");
  659. J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
  660. J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
  661. /* Get reference for checkpointing transaction */
  662. journal_grab_journal_head(jh2bh(jh));
  663. jh->b_cp_transaction = transaction;
  664. if (!transaction->t_checkpoint_list) {
  665. jh->b_cpnext = jh->b_cpprev = jh;
  666. } else {
  667. jh->b_cpnext = transaction->t_checkpoint_list;
  668. jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev;
  669. jh->b_cpprev->b_cpnext = jh;
  670. jh->b_cpnext->b_cpprev = jh;
  671. }
  672. transaction->t_checkpoint_list = jh;
  673. }
  674. /*
  675. * We've finished with this transaction structure: adios...
  676. *
  677. * The transaction must have no links except for the checkpoint by this
  678. * point.
  679. *
  680. * Called with the journal locked.
  681. * Called with j_list_lock held.
  682. */
  683. void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
  684. {
  685. assert_spin_locked(&journal->j_list_lock);
  686. if (transaction->t_cpnext) {
  687. transaction->t_cpnext->t_cpprev = transaction->t_cpprev;
  688. transaction->t_cpprev->t_cpnext = transaction->t_cpnext;
  689. if (journal->j_checkpoint_transactions == transaction)
  690. journal->j_checkpoint_transactions =
  691. transaction->t_cpnext;
  692. if (journal->j_checkpoint_transactions == transaction)
  693. journal->j_checkpoint_transactions = NULL;
  694. }
  695. J_ASSERT(transaction->t_state == T_FINISHED);
  696. J_ASSERT(transaction->t_buffers == NULL);
  697. J_ASSERT(transaction->t_sync_datalist == NULL);
  698. J_ASSERT(transaction->t_forget == NULL);
  699. J_ASSERT(transaction->t_iobuf_list == NULL);
  700. J_ASSERT(transaction->t_shadow_list == NULL);
  701. J_ASSERT(transaction->t_log_list == NULL);
  702. J_ASSERT(transaction->t_checkpoint_list == NULL);
  703. J_ASSERT(transaction->t_checkpoint_io_list == NULL);
  704. J_ASSERT(transaction->t_updates == 0);
  705. J_ASSERT(journal->j_committing_transaction != transaction);
  706. J_ASSERT(journal->j_running_transaction != transaction);
  707. trace_jbd_drop_transaction(journal, transaction);
  708. jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
  709. kfree(transaction);
  710. }