virt-dma.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. /*
  2. * Virtual DMA channel support for DMAengine
  3. *
  4. * Copyright (C) 2012 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/device.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/module.h>
  13. #include <linux/spinlock.h>
  14. #include "virt-dma.h"
  15. static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
  16. {
  17. return container_of(tx, struct virt_dma_desc, tx);
  18. }
  19. dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
  20. {
  21. struct virt_dma_chan *vc = to_virt_chan(tx->chan);
  22. struct virt_dma_desc *vd = to_virt_desc(tx);
  23. unsigned long flags;
  24. dma_cookie_t cookie;
  25. spin_lock_irqsave(&vc->lock, flags);
  26. cookie = dma_cookie_assign(tx);
  27. list_move_tail(&vd->node, &vc->desc_submitted);
  28. spin_unlock_irqrestore(&vc->lock, flags);
  29. dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
  30. vc, vd, cookie);
  31. return cookie;
  32. }
  33. EXPORT_SYMBOL_GPL(vchan_tx_submit);
  34. /**
  35. * vchan_tx_desc_free - free a reusable descriptor
  36. * @tx: the transfer
  37. *
  38. * This function frees a previously allocated reusable descriptor. The only
  39. * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
  40. * transfer.
  41. *
  42. * Returns 0 upon success
  43. */
  44. int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
  45. {
  46. struct virt_dma_chan *vc = to_virt_chan(tx->chan);
  47. struct virt_dma_desc *vd = to_virt_desc(tx);
  48. unsigned long flags;
  49. spin_lock_irqsave(&vc->lock, flags);
  50. list_del(&vd->node);
  51. spin_unlock_irqrestore(&vc->lock, flags);
  52. dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
  53. vc, vd, vd->tx.cookie);
  54. vc->desc_free(vd);
  55. return 0;
  56. }
  57. EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
  58. struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
  59. dma_cookie_t cookie)
  60. {
  61. struct virt_dma_desc *vd;
  62. list_for_each_entry(vd, &vc->desc_issued, node)
  63. if (vd->tx.cookie == cookie)
  64. return vd;
  65. return NULL;
  66. }
  67. EXPORT_SYMBOL_GPL(vchan_find_desc);
  68. /*
  69. * This tasklet handles the completion of a DMA descriptor by
  70. * calling its callback and freeing it.
  71. */
  72. static void vchan_complete(unsigned long arg)
  73. {
  74. struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
  75. struct virt_dma_desc *vd;
  76. struct dmaengine_desc_callback cb;
  77. LIST_HEAD(head);
  78. spin_lock_irq(&vc->lock);
  79. list_splice_tail_init(&vc->desc_completed, &head);
  80. vd = vc->cyclic;
  81. if (vd) {
  82. vc->cyclic = NULL;
  83. dmaengine_desc_get_callback(&vd->tx, &cb);
  84. } else {
  85. memset(&cb, 0, sizeof(cb));
  86. }
  87. spin_unlock_irq(&vc->lock);
  88. dmaengine_desc_callback_invoke(&cb, NULL);
  89. while (!list_empty(&head)) {
  90. vd = list_first_entry(&head, struct virt_dma_desc, node);
  91. dmaengine_desc_get_callback(&vd->tx, &cb);
  92. list_del(&vd->node);
  93. if (dmaengine_desc_test_reuse(&vd->tx))
  94. list_add(&vd->node, &vc->desc_allocated);
  95. else
  96. vc->desc_free(vd);
  97. dmaengine_desc_callback_invoke(&cb, NULL);
  98. }
  99. }
  100. void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
  101. {
  102. while (!list_empty(head)) {
  103. struct virt_dma_desc *vd = list_first_entry(head,
  104. struct virt_dma_desc, node);
  105. if (dmaengine_desc_test_reuse(&vd->tx)) {
  106. list_move_tail(&vd->node, &vc->desc_allocated);
  107. } else {
  108. dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
  109. list_del(&vd->node);
  110. vc->desc_free(vd);
  111. }
  112. }
  113. }
  114. EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
  115. void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
  116. {
  117. dma_cookie_init(&vc->chan);
  118. spin_lock_init(&vc->lock);
  119. INIT_LIST_HEAD(&vc->desc_allocated);
  120. INIT_LIST_HEAD(&vc->desc_submitted);
  121. INIT_LIST_HEAD(&vc->desc_issued);
  122. INIT_LIST_HEAD(&vc->desc_completed);
  123. tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
  124. vc->chan.device = dmadev;
  125. list_add_tail(&vc->chan.device_node, &dmadev->channels);
  126. }
  127. EXPORT_SYMBOL_GPL(vchan_init);
  128. MODULE_AUTHOR("Russell King");
  129. MODULE_LICENSE("GPL");