spi-dw-mid.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. /*
  2. * Special handling for DW core on Intel MID platform
  3. *
  4. * Copyright (c) 2009, 2014 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. */
  15. #include <linux/dma-mapping.h>
  16. #include <linux/dmaengine.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/slab.h>
  19. #include <linux/spi/spi.h>
  20. #include <linux/types.h>
  21. #include "spi-dw.h"
  22. #ifdef CONFIG_SPI_DW_MID_DMA
  23. #include <linux/pci.h>
  24. #include <linux/platform_data/dma-dw.h>
  25. #define RX_BUSY 0
  26. #define TX_BUSY 1
  27. static struct dw_dma_slave mid_dma_tx = { .dst_id = 1 };
  28. static struct dw_dma_slave mid_dma_rx = { .src_id = 0 };
  29. static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
  30. {
  31. struct dw_dma_slave *s = param;
  32. if (s->dma_dev != chan->device->dev)
  33. return false;
  34. chan->private = s;
  35. return true;
  36. }
  37. static int mid_spi_dma_init(struct dw_spi *dws)
  38. {
  39. struct pci_dev *dma_dev;
  40. struct dw_dma_slave *tx = dws->dma_tx;
  41. struct dw_dma_slave *rx = dws->dma_rx;
  42. dma_cap_mask_t mask;
  43. /*
  44. * Get pci device for DMA controller, currently it could only
  45. * be the DMA controller of Medfield
  46. */
  47. dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
  48. if (!dma_dev)
  49. return -ENODEV;
  50. dma_cap_zero(mask);
  51. dma_cap_set(DMA_SLAVE, mask);
  52. /* 1. Init rx channel */
  53. rx->dma_dev = &dma_dev->dev;
  54. dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, rx);
  55. if (!dws->rxchan)
  56. goto err_exit;
  57. dws->master->dma_rx = dws->rxchan;
  58. /* 2. Init tx channel */
  59. tx->dma_dev = &dma_dev->dev;
  60. dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx);
  61. if (!dws->txchan)
  62. goto free_rxchan;
  63. dws->master->dma_tx = dws->txchan;
  64. dws->dma_inited = 1;
  65. return 0;
  66. free_rxchan:
  67. dma_release_channel(dws->rxchan);
  68. err_exit:
  69. return -EBUSY;
  70. }
  71. static void mid_spi_dma_exit(struct dw_spi *dws)
  72. {
  73. if (!dws->dma_inited)
  74. return;
  75. dmaengine_terminate_sync(dws->txchan);
  76. dma_release_channel(dws->txchan);
  77. dmaengine_terminate_sync(dws->rxchan);
  78. dma_release_channel(dws->rxchan);
  79. }
  80. static irqreturn_t dma_transfer(struct dw_spi *dws)
  81. {
  82. u16 irq_status = dw_readl(dws, DW_SPI_ISR);
  83. if (!irq_status)
  84. return IRQ_NONE;
  85. dw_readl(dws, DW_SPI_ICR);
  86. spi_reset_chip(dws);
  87. dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
  88. dws->master->cur_msg->status = -EIO;
  89. spi_finalize_current_transfer(dws->master);
  90. return IRQ_HANDLED;
  91. }
  92. static bool mid_spi_can_dma(struct spi_controller *master,
  93. struct spi_device *spi, struct spi_transfer *xfer)
  94. {
  95. struct dw_spi *dws = spi_controller_get_devdata(master);
  96. if (!dws->dma_inited)
  97. return false;
  98. return xfer->len > dws->fifo_len;
  99. }
  100. static enum dma_slave_buswidth convert_dma_width(u32 dma_width) {
  101. if (dma_width == 1)
  102. return DMA_SLAVE_BUSWIDTH_1_BYTE;
  103. else if (dma_width == 2)
  104. return DMA_SLAVE_BUSWIDTH_2_BYTES;
  105. return DMA_SLAVE_BUSWIDTH_UNDEFINED;
  106. }
  107. /*
  108. * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
  109. * channel will clear a corresponding bit.
  110. */
  111. static void dw_spi_dma_tx_done(void *arg)
  112. {
  113. struct dw_spi *dws = arg;
  114. clear_bit(TX_BUSY, &dws->dma_chan_busy);
  115. if (test_bit(RX_BUSY, &dws->dma_chan_busy))
  116. return;
  117. spi_finalize_current_transfer(dws->master);
  118. }
  119. static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws,
  120. struct spi_transfer *xfer)
  121. {
  122. struct dma_slave_config txconf;
  123. struct dma_async_tx_descriptor *txdesc;
  124. if (!xfer->tx_buf)
  125. return NULL;
  126. txconf.direction = DMA_MEM_TO_DEV;
  127. txconf.dst_addr = dws->dma_addr;
  128. txconf.dst_maxburst = 16;
  129. txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  130. txconf.dst_addr_width = convert_dma_width(dws->dma_width);
  131. txconf.device_fc = false;
  132. dmaengine_slave_config(dws->txchan, &txconf);
  133. txdesc = dmaengine_prep_slave_sg(dws->txchan,
  134. xfer->tx_sg.sgl,
  135. xfer->tx_sg.nents,
  136. DMA_MEM_TO_DEV,
  137. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  138. if (!txdesc)
  139. return NULL;
  140. txdesc->callback = dw_spi_dma_tx_done;
  141. txdesc->callback_param = dws;
  142. return txdesc;
  143. }
  144. /*
  145. * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
  146. * channel will clear a corresponding bit.
  147. */
  148. static void dw_spi_dma_rx_done(void *arg)
  149. {
  150. struct dw_spi *dws = arg;
  151. clear_bit(RX_BUSY, &dws->dma_chan_busy);
  152. if (test_bit(TX_BUSY, &dws->dma_chan_busy))
  153. return;
  154. spi_finalize_current_transfer(dws->master);
  155. }
  156. static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
  157. struct spi_transfer *xfer)
  158. {
  159. struct dma_slave_config rxconf;
  160. struct dma_async_tx_descriptor *rxdesc;
  161. if (!xfer->rx_buf)
  162. return NULL;
  163. rxconf.direction = DMA_DEV_TO_MEM;
  164. rxconf.src_addr = dws->dma_addr;
  165. rxconf.src_maxburst = 16;
  166. rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  167. rxconf.src_addr_width = convert_dma_width(dws->dma_width);
  168. rxconf.device_fc = false;
  169. dmaengine_slave_config(dws->rxchan, &rxconf);
  170. rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
  171. xfer->rx_sg.sgl,
  172. xfer->rx_sg.nents,
  173. DMA_DEV_TO_MEM,
  174. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  175. if (!rxdesc)
  176. return NULL;
  177. rxdesc->callback = dw_spi_dma_rx_done;
  178. rxdesc->callback_param = dws;
  179. return rxdesc;
  180. }
  181. static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
  182. {
  183. u16 dma_ctrl = 0;
  184. dw_writel(dws, DW_SPI_DMARDLR, 0xf);
  185. dw_writel(dws, DW_SPI_DMATDLR, 0x10);
  186. if (xfer->tx_buf)
  187. dma_ctrl |= SPI_DMA_TDMAE;
  188. if (xfer->rx_buf)
  189. dma_ctrl |= SPI_DMA_RDMAE;
  190. dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
  191. /* Set the interrupt mask */
  192. spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI);
  193. dws->transfer_handler = dma_transfer;
  194. return 0;
  195. }
  196. static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
  197. {
  198. struct dma_async_tx_descriptor *txdesc, *rxdesc;
  199. /* Prepare the TX dma transfer */
  200. txdesc = dw_spi_dma_prepare_tx(dws, xfer);
  201. /* Prepare the RX dma transfer */
  202. rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
  203. /* rx must be started before tx due to spi instinct */
  204. if (rxdesc) {
  205. set_bit(RX_BUSY, &dws->dma_chan_busy);
  206. dmaengine_submit(rxdesc);
  207. dma_async_issue_pending(dws->rxchan);
  208. }
  209. if (txdesc) {
  210. set_bit(TX_BUSY, &dws->dma_chan_busy);
  211. dmaengine_submit(txdesc);
  212. dma_async_issue_pending(dws->txchan);
  213. }
  214. return 0;
  215. }
  216. static void mid_spi_dma_stop(struct dw_spi *dws)
  217. {
  218. if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
  219. dmaengine_terminate_sync(dws->txchan);
  220. clear_bit(TX_BUSY, &dws->dma_chan_busy);
  221. }
  222. if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
  223. dmaengine_terminate_sync(dws->rxchan);
  224. clear_bit(RX_BUSY, &dws->dma_chan_busy);
  225. }
  226. }
  227. static const struct dw_spi_dma_ops mid_dma_ops = {
  228. .dma_init = mid_spi_dma_init,
  229. .dma_exit = mid_spi_dma_exit,
  230. .dma_setup = mid_spi_dma_setup,
  231. .can_dma = mid_spi_can_dma,
  232. .dma_transfer = mid_spi_dma_transfer,
  233. .dma_stop = mid_spi_dma_stop,
  234. };
  235. #endif
  236. /* Some specific info for SPI0 controller on Intel MID */
  237. /* HW info for MRST Clk Control Unit, 32b reg per controller */
  238. #define MRST_SPI_CLK_BASE 100000000 /* 100m */
  239. #define MRST_CLK_SPI_REG 0xff11d86c
  240. #define CLK_SPI_BDIV_OFFSET 0
  241. #define CLK_SPI_BDIV_MASK 0x00000007
  242. #define CLK_SPI_CDIV_OFFSET 9
  243. #define CLK_SPI_CDIV_MASK 0x00000e00
  244. #define CLK_SPI_DISABLE_OFFSET 8
  245. int dw_spi_mid_init(struct dw_spi *dws)
  246. {
  247. void __iomem *clk_reg;
  248. u32 clk_cdiv;
  249. clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
  250. if (!clk_reg)
  251. return -ENOMEM;
  252. /* Get SPI controller operating freq info */
  253. clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
  254. clk_cdiv &= CLK_SPI_CDIV_MASK;
  255. clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
  256. dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
  257. iounmap(clk_reg);
  258. #ifdef CONFIG_SPI_DW_MID_DMA
  259. dws->dma_tx = &mid_dma_tx;
  260. dws->dma_rx = &mid_dma_rx;
  261. dws->dma_ops = &mid_dma_ops;
  262. #endif
  263. return 0;
  264. }