8250_dma.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * 8250_dma.c - DMA Engine API support for 8250.c
  4. *
  5. * Copyright (C) 2013 Intel Corporation
  6. */
  7. #include <linux/tty.h>
  8. #include <linux/tty_flip.h>
  9. #include <linux/serial_reg.h>
  10. #include <linux/dma-mapping.h>
  11. #include "8250.h"
  12. static void __dma_tx_complete(void *param)
  13. {
  14. struct uart_8250_port *p = param;
  15. struct uart_8250_dma *dma = p->dma;
  16. struct circ_buf *xmit = &p->port.state->xmit;
  17. unsigned long flags;
  18. int ret;
  19. dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
  20. UART_XMIT_SIZE, DMA_TO_DEVICE);
  21. spin_lock_irqsave(&p->port.lock, flags);
  22. dma->tx_running = 0;
  23. xmit->tail += dma->tx_size;
  24. xmit->tail &= UART_XMIT_SIZE - 1;
  25. p->port.icount.tx += dma->tx_size;
  26. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  27. uart_write_wakeup(&p->port);
  28. ret = serial8250_tx_dma(p);
  29. if (ret)
  30. serial8250_set_THRI(p);
  31. spin_unlock_irqrestore(&p->port.lock, flags);
  32. }
  33. static void __dma_rx_complete(void *param)
  34. {
  35. struct uart_8250_port *p = param;
  36. struct uart_8250_dma *dma = p->dma;
  37. struct tty_port *tty_port = &p->port.state->port;
  38. struct dma_tx_state state;
  39. int count;
  40. dma->rx_running = 0;
  41. dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
  42. count = dma->rx_size - state.residue;
  43. tty_insert_flip_string(tty_port, dma->rx_buf, count);
  44. p->port.icount.rx += count;
  45. tty_flip_buffer_push(tty_port);
  46. }
  47. int serial8250_tx_dma(struct uart_8250_port *p)
  48. {
  49. struct uart_8250_dma *dma = p->dma;
  50. struct circ_buf *xmit = &p->port.state->xmit;
  51. struct dma_async_tx_descriptor *desc;
  52. int ret;
  53. if (dma->tx_running)
  54. return 0;
  55. if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
  56. /* We have been called from __dma_tx_complete() */
  57. serial8250_rpm_put_tx(p);
  58. return 0;
  59. }
  60. dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
  61. desc = dmaengine_prep_slave_single(dma->txchan,
  62. dma->tx_addr + xmit->tail,
  63. dma->tx_size, DMA_MEM_TO_DEV,
  64. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  65. if (!desc) {
  66. ret = -EBUSY;
  67. goto err;
  68. }
  69. dma->tx_running = 1;
  70. desc->callback = __dma_tx_complete;
  71. desc->callback_param = p;
  72. dma->tx_cookie = dmaengine_submit(desc);
  73. dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
  74. UART_XMIT_SIZE, DMA_TO_DEVICE);
  75. dma_async_issue_pending(dma->txchan);
  76. if (dma->tx_err) {
  77. dma->tx_err = 0;
  78. serial8250_clear_THRI(p);
  79. }
  80. return 0;
  81. err:
  82. dma->tx_err = 1;
  83. return ret;
  84. }
  85. int serial8250_rx_dma(struct uart_8250_port *p)
  86. {
  87. struct uart_8250_dma *dma = p->dma;
  88. struct dma_async_tx_descriptor *desc;
  89. if (dma->rx_running)
  90. return 0;
  91. desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
  92. dma->rx_size, DMA_DEV_TO_MEM,
  93. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  94. if (!desc)
  95. return -EBUSY;
  96. dma->rx_running = 1;
  97. desc->callback = __dma_rx_complete;
  98. desc->callback_param = p;
  99. dma->rx_cookie = dmaengine_submit(desc);
  100. dma_async_issue_pending(dma->rxchan);
  101. return 0;
  102. }
  103. void serial8250_rx_dma_flush(struct uart_8250_port *p)
  104. {
  105. struct uart_8250_dma *dma = p->dma;
  106. if (dma->rx_running) {
  107. dmaengine_pause(dma->rxchan);
  108. __dma_rx_complete(p);
  109. dmaengine_terminate_async(dma->rxchan);
  110. }
  111. }
  112. EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
  113. int serial8250_request_dma(struct uart_8250_port *p)
  114. {
  115. struct uart_8250_dma *dma = p->dma;
  116. phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
  117. dma->rx_dma_addr : p->port.mapbase;
  118. phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
  119. dma->tx_dma_addr : p->port.mapbase;
  120. dma_cap_mask_t mask;
  121. struct dma_slave_caps caps;
  122. int ret;
  123. /* Default slave configuration parameters */
  124. dma->rxconf.direction = DMA_DEV_TO_MEM;
  125. dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  126. dma->rxconf.src_addr = rx_dma_addr + UART_RX;
  127. dma->txconf.direction = DMA_MEM_TO_DEV;
  128. dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  129. dma->txconf.dst_addr = tx_dma_addr + UART_TX;
  130. dma_cap_zero(mask);
  131. dma_cap_set(DMA_SLAVE, mask);
  132. /* Get a channel for RX */
  133. dma->rxchan = dma_request_slave_channel_compat(mask,
  134. dma->fn, dma->rx_param,
  135. p->port.dev, "rx");
  136. if (!dma->rxchan)
  137. return -ENODEV;
  138. /* 8250 rx dma requires dmaengine driver to support pause/terminate */
  139. ret = dma_get_slave_caps(dma->rxchan, &caps);
  140. if (ret)
  141. goto release_rx;
  142. if (!caps.cmd_pause || !caps.cmd_terminate ||
  143. caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
  144. ret = -EINVAL;
  145. goto release_rx;
  146. }
  147. dmaengine_slave_config(dma->rxchan, &dma->rxconf);
  148. /* Get a channel for TX */
  149. dma->txchan = dma_request_slave_channel_compat(mask,
  150. dma->fn, dma->tx_param,
  151. p->port.dev, "tx");
  152. if (!dma->txchan) {
  153. ret = -ENODEV;
  154. goto release_rx;
  155. }
  156. /* 8250 tx dma requires dmaengine driver to support terminate */
  157. ret = dma_get_slave_caps(dma->txchan, &caps);
  158. if (ret)
  159. goto err;
  160. if (!caps.cmd_terminate) {
  161. ret = -EINVAL;
  162. goto err;
  163. }
  164. dmaengine_slave_config(dma->txchan, &dma->txconf);
  165. /* RX buffer */
  166. if (!dma->rx_size)
  167. dma->rx_size = PAGE_SIZE;
  168. dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
  169. &dma->rx_addr, GFP_KERNEL);
  170. if (!dma->rx_buf) {
  171. ret = -ENOMEM;
  172. goto err;
  173. }
  174. /* TX buffer */
  175. dma->tx_addr = dma_map_single(dma->txchan->device->dev,
  176. p->port.state->xmit.buf,
  177. UART_XMIT_SIZE,
  178. DMA_TO_DEVICE);
  179. if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
  180. dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
  181. dma->rx_buf, dma->rx_addr);
  182. ret = -ENOMEM;
  183. goto err;
  184. }
  185. dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
  186. return 0;
  187. err:
  188. dma_release_channel(dma->txchan);
  189. release_rx:
  190. dma_release_channel(dma->rxchan);
  191. return ret;
  192. }
  193. EXPORT_SYMBOL_GPL(serial8250_request_dma);
  194. void serial8250_release_dma(struct uart_8250_port *p)
  195. {
  196. struct uart_8250_dma *dma = p->dma;
  197. if (!dma)
  198. return;
  199. /* Release RX resources */
  200. dmaengine_terminate_sync(dma->rxchan);
  201. dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
  202. dma->rx_addr);
  203. dma_release_channel(dma->rxchan);
  204. dma->rxchan = NULL;
  205. /* Release TX resources */
  206. dmaengine_terminate_sync(dma->txchan);
  207. dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
  208. UART_XMIT_SIZE, DMA_TO_DEVICE);
  209. dma_release_channel(dma->txchan);
  210. dma->txchan = NULL;
  211. dma->tx_running = 0;
  212. dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
  213. }
  214. EXPORT_SYMBOL_GPL(serial8250_release_dma);