async_tx.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright © 2006, Intel Corporation.
  4. */
  5. #ifndef _ASYNC_TX_H_
  6. #define _ASYNC_TX_H_
  7. #include <linux/dmaengine.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/interrupt.h>
  10. /* on architectures without dma-mapping capabilities we need to ensure
  11. * that the asynchronous path compiles away
  12. */
  13. #ifdef CONFIG_HAS_DMA
  14. #define __async_inline
  15. #else
  16. #define __async_inline __always_inline
  17. #endif
  18. /**
  19. * dma_chan_ref - object used to manage dma channels received from the
  20. * dmaengine core.
  21. * @chan - the channel being tracked
  22. * @node - node for the channel to be placed on async_tx_master_list
  23. * @rcu - for list_del_rcu
  24. * @count - number of times this channel is listed in the pool
  25. * (for channels with multiple capabiities)
  26. */
  27. struct dma_chan_ref {
  28. struct dma_chan *chan;
  29. struct list_head node;
  30. struct rcu_head rcu;
  31. atomic_t count;
  32. };
  33. /**
  34. * async_tx_flags - modifiers for the async_* calls
  35. * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
  36. * the destination address is not a source. The asynchronous case handles this
  37. * implicitly, the synchronous case needs to zero the destination block.
  38. * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
  39. * also one of the source addresses. In the synchronous case the destination
  40. * address is an implied source, whereas the asynchronous case it must be listed
  41. * as a source. The destination address must be the first address in the source
  42. * array.
  43. * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
  44. * dependency chain
  45. * @ASYNC_TX_FENCE: specify that the next operation in the dependency
  46. * chain uses this operation's result as an input
  47. * @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the
  48. * input data. Required for rmw case.
  49. */
  50. enum async_tx_flags {
  51. ASYNC_TX_XOR_ZERO_DST = (1 << 0),
  52. ASYNC_TX_XOR_DROP_DST = (1 << 1),
  53. ASYNC_TX_ACK = (1 << 2),
  54. ASYNC_TX_FENCE = (1 << 3),
  55. ASYNC_TX_PQ_XOR_DST = (1 << 4),
  56. };
  57. /**
  58. * struct async_submit_ctl - async_tx submission/completion modifiers
  59. * @flags: submission modifiers
  60. * @depend_tx: parent dependency of the current operation being submitted
  61. * @cb_fn: callback routine to run at operation completion
  62. * @cb_param: parameter for the callback routine
  63. * @scribble: caller provided space for dma/page address conversions
  64. */
  65. struct async_submit_ctl {
  66. enum async_tx_flags flags;
  67. struct dma_async_tx_descriptor *depend_tx;
  68. dma_async_tx_callback cb_fn;
  69. void *cb_param;
  70. void *scribble;
  71. };
  72. #if defined(CONFIG_DMA_ENGINE) && !defined(CONFIG_ASYNC_TX_CHANNEL_SWITCH)
  73. #define async_tx_issue_pending_all dma_issue_pending_all
  74. /**
  75. * async_tx_issue_pending - send pending descriptor to the hardware channel
  76. * @tx: descriptor handle to retrieve hardware context
  77. *
  78. * Note: any dependent operations will have already been issued by
  79. * async_tx_channel_switch, or (in the case of no channel switch) will
  80. * be already pending on this channel.
  81. */
  82. static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
  83. {
  84. if (likely(tx)) {
  85. struct dma_chan *chan = tx->chan;
  86. struct dma_device *dma = chan->device;
  87. dma->device_issue_pending(chan);
  88. }
  89. }
  90. #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
  91. #include <asm/async_tx.h>
  92. #else
  93. #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
  94. __async_tx_find_channel(dep, type)
  95. struct dma_chan *
  96. __async_tx_find_channel(struct async_submit_ctl *submit,
  97. enum dma_transaction_type tx_type);
  98. #endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
  99. #else
  100. static inline void async_tx_issue_pending_all(void)
  101. {
  102. do { } while (0);
  103. }
  104. static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
  105. {
  106. do { } while (0);
  107. }
  108. static inline struct dma_chan *
  109. async_tx_find_channel(struct async_submit_ctl *submit,
  110. enum dma_transaction_type tx_type, struct page **dst,
  111. int dst_count, struct page **src, int src_count,
  112. size_t len)
  113. {
  114. return NULL;
  115. }
  116. #endif
  117. /**
  118. * async_tx_sync_epilog - actions to take if an operation is run synchronously
  119. * @cb_fn: function to call when the transaction completes
  120. * @cb_fn_param: parameter to pass to the callback routine
  121. */
  122. static inline void
  123. async_tx_sync_epilog(struct async_submit_ctl *submit)
  124. {
  125. if (submit->cb_fn)
  126. submit->cb_fn(submit->cb_param);
  127. }
  128. typedef union {
  129. unsigned long addr;
  130. struct page *page;
  131. dma_addr_t dma;
  132. } addr_conv_t;
  133. static inline void
  134. init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags,
  135. struct dma_async_tx_descriptor *tx,
  136. dma_async_tx_callback cb_fn, void *cb_param,
  137. addr_conv_t *scribble)
  138. {
  139. args->flags = flags;
  140. args->depend_tx = tx;
  141. args->cb_fn = cb_fn;
  142. args->cb_param = cb_param;
  143. args->scribble = scribble;
  144. }
  145. void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
  146. struct async_submit_ctl *submit);
  147. struct dma_async_tx_descriptor *
  148. async_xor(struct page *dest, struct page **src_list, unsigned int offset,
  149. int src_cnt, size_t len, struct async_submit_ctl *submit);
  150. struct dma_async_tx_descriptor *
  151. async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
  152. int src_cnt, size_t len, enum sum_check_flags *result,
  153. struct async_submit_ctl *submit);
  154. struct dma_async_tx_descriptor *
  155. async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
  156. unsigned int src_offset, size_t len,
  157. struct async_submit_ctl *submit);
  158. struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
  159. struct dma_async_tx_descriptor *
  160. async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
  161. size_t len, struct async_submit_ctl *submit);
  162. struct dma_async_tx_descriptor *
  163. async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
  164. size_t len, enum sum_check_flags *pqres, struct page *spare,
  165. struct async_submit_ctl *submit);
  166. struct dma_async_tx_descriptor *
  167. async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
  168. struct page **ptrs, struct async_submit_ctl *submit);
  169. struct dma_async_tx_descriptor *
  170. async_raid6_datap_recov(int src_num, size_t bytes, int faila,
  171. struct page **ptrs, struct async_submit_ctl *submit);
  172. void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
  173. #endif /* _ASYNC_TX_H_ */