xfs_log.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #ifndef __XFS_LOG_H__
  7. #define __XFS_LOG_H__
  8. struct xfs_log_vec {
  9. struct xfs_log_vec *lv_next; /* next lv in build list */
  10. int lv_niovecs; /* number of iovecs in lv */
  11. struct xfs_log_iovec *lv_iovecp; /* iovec array */
  12. struct xfs_log_item *lv_item; /* owner */
  13. char *lv_buf; /* formatted buffer */
  14. int lv_bytes; /* accounted space in buffer */
  15. int lv_buf_len; /* aligned size of buffer */
  16. int lv_size; /* size of allocated lv */
  17. };
  18. #define XFS_LOG_VEC_ORDERED (-1)
  19. static inline void *
  20. xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
  21. uint type)
  22. {
  23. struct xfs_log_iovec *vec = *vecp;
  24. if (vec) {
  25. ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
  26. vec++;
  27. } else {
  28. vec = &lv->lv_iovecp[0];
  29. }
  30. vec->i_type = type;
  31. vec->i_addr = lv->lv_buf + lv->lv_buf_len;
  32. ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t)));
  33. *vecp = vec;
  34. return vec->i_addr;
  35. }
  36. /*
  37. * We need to make sure the next buffer is naturally aligned for the biggest
  38. * basic data type we put into it. We already accounted for this padding when
  39. * sizing the buffer.
  40. *
  41. * However, this padding does not get written into the log, and hence we have to
  42. * track the space used by the log vectors separately to prevent log space hangs
  43. * due to inaccurate accounting (i.e. a leak) of the used log space through the
  44. * CIL context ticket.
  45. */
  46. static inline void
  47. xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len)
  48. {
  49. lv->lv_buf_len += round_up(len, sizeof(uint64_t));
  50. lv->lv_bytes += len;
  51. vec->i_len = len;
  52. }
  53. static inline void *
  54. xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
  55. uint type, void *data, int len)
  56. {
  57. void *buf;
  58. buf = xlog_prepare_iovec(lv, vecp, type);
  59. memcpy(buf, data, len);
  60. xlog_finish_iovec(lv, *vecp, len);
  61. return buf;
  62. }
  63. /*
  64. * Structure used to pass callback function and the function's argument
  65. * to the log manager.
  66. */
  67. typedef struct xfs_log_callback {
  68. struct xfs_log_callback *cb_next;
  69. void (*cb_func)(void *, int);
  70. void *cb_arg;
  71. } xfs_log_callback_t;
  72. /*
  73. * By comparing each component, we don't have to worry about extra
  74. * endian issues in treating two 32 bit numbers as one 64 bit number
  75. */
  76. static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
  77. {
  78. if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
  79. return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
  80. if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
  81. return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;
  82. return 0;
  83. }
  84. #define XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
  85. /*
  86. * Flags to xfs_log_force()
  87. *
  88. * XFS_LOG_SYNC: Synchronous force in-core log to disk
  89. */
  90. #define XFS_LOG_SYNC 0x1
  91. /* Log manager interfaces */
  92. struct xfs_mount;
  93. struct xlog_in_core;
  94. struct xlog_ticket;
  95. struct xfs_log_item;
  96. struct xfs_item_ops;
  97. struct xfs_trans;
  98. xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
  99. struct xlog_ticket *ticket,
  100. struct xlog_in_core **iclog,
  101. bool regrant);
  102. int xfs_log_force(struct xfs_mount *mp, uint flags);
  103. int xfs_log_force_lsn(struct xfs_mount *mp, xfs_lsn_t lsn, uint flags,
  104. int *log_forced);
  105. int xfs_log_mount(struct xfs_mount *mp,
  106. struct xfs_buftarg *log_target,
  107. xfs_daddr_t start_block,
  108. int num_bblocks);
  109. int xfs_log_mount_finish(struct xfs_mount *mp);
  110. int xfs_log_mount_cancel(struct xfs_mount *);
  111. xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
  112. xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
  113. void xfs_log_space_wake(struct xfs_mount *mp);
  114. int xfs_log_notify(struct xlog_in_core *iclog,
  115. struct xfs_log_callback *callback_entry);
  116. int xfs_log_release_iclog(struct xfs_mount *mp,
  117. struct xlog_in_core *iclog);
  118. int xfs_log_reserve(struct xfs_mount *mp,
  119. int length,
  120. int count,
  121. struct xlog_ticket **ticket,
  122. uint8_t clientid,
  123. bool permanent);
  124. int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
  125. void xfs_log_unmount(struct xfs_mount *mp);
  126. int xfs_log_force_umount(struct xfs_mount *mp, int logerror);
  127. struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
  128. void xfs_log_ticket_put(struct xlog_ticket *ticket);
  129. void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
  130. xfs_lsn_t *commit_lsn, bool regrant);
  131. bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
  132. void xfs_log_work_queue(struct xfs_mount *mp);
  133. void xfs_log_quiesce(struct xfs_mount *mp);
  134. bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
  135. bool xfs_log_in_recovery(struct xfs_mount *);
  136. #endif /* __XFS_LOG_H__ */