spi-fsl-cpm.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. /*
  2. * Freescale SPI controller driver cpm functions.
  3. *
  4. * Maintainer: Kumar Gala
  5. *
  6. * Copyright (C) 2006 Polycom, Inc.
  7. * Copyright 2010 Freescale Semiconductor, Inc.
  8. *
  9. * CPM SPI and QE buffer descriptors mode support:
  10. * Copyright (c) 2009 MontaVista Software, Inc.
  11. * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
  12. *
  13. * This program is free software; you can redistribute it and/or modify it
  14. * under the terms of the GNU General Public License as published by the
  15. * Free Software Foundation; either version 2 of the License, or (at your
  16. * option) any later version.
  17. */
  18. #include <asm/cpm.h>
  19. #include <soc/fsl/qe/qe.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/fsl_devices.h>
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/of_address.h>
  25. #include <linux/spi/spi.h>
  26. #include <linux/types.h>
  27. #include <linux/platform_device.h>
  28. #include "spi-fsl-cpm.h"
  29. #include "spi-fsl-lib.h"
  30. #include "spi-fsl-spi.h"
  31. /* CPM1 and CPM2 are mutually exclusive. */
  32. #ifdef CONFIG_CPM1
  33. #include <asm/cpm1.h>
  34. #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
  35. #else
  36. #include <asm/cpm2.h>
  37. #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
  38. #endif
  39. #define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
  40. #define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
  41. /* SPCOM register values */
  42. #define SPCOM_STR (1 << 23) /* Start transmit */
  43. #define SPI_PRAM_SIZE 0x100
  44. #define SPI_MRBLR ((unsigned int)PAGE_SIZE)
  45. static void *fsl_dummy_rx;
  46. static DEFINE_MUTEX(fsl_dummy_rx_lock);
  47. static int fsl_dummy_rx_refcnt;
  48. void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
  49. {
  50. if (mspi->flags & SPI_QE) {
  51. qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
  52. QE_CR_PROTOCOL_UNSPECIFIED, 0);
  53. } else {
  54. if (mspi->flags & SPI_CPM1) {
  55. out_be32(&mspi->pram->rstate, 0);
  56. out_be16(&mspi->pram->rbptr,
  57. in_be16(&mspi->pram->rbase));
  58. out_be32(&mspi->pram->tstate, 0);
  59. out_be16(&mspi->pram->tbptr,
  60. in_be16(&mspi->pram->tbase));
  61. } else {
  62. cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
  63. }
  64. }
  65. }
  66. EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
  67. static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
  68. {
  69. struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
  70. struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
  71. unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
  72. unsigned int xfer_ofs;
  73. struct fsl_spi_reg *reg_base = mspi->reg_base;
  74. xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
  75. if (mspi->rx_dma == mspi->dma_dummy_rx)
  76. out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
  77. else
  78. out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
  79. out_be16(&rx_bd->cbd_datlen, 0);
  80. out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
  81. if (mspi->tx_dma == mspi->dma_dummy_tx)
  82. out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
  83. else
  84. out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
  85. out_be16(&tx_bd->cbd_datlen, xfer_len);
  86. out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
  87. BD_SC_LAST);
  88. /* start transfer */
  89. mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR);
  90. }
  91. int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
  92. struct spi_transfer *t, bool is_dma_mapped)
  93. {
  94. struct device *dev = mspi->dev;
  95. struct fsl_spi_reg *reg_base = mspi->reg_base;
  96. if (is_dma_mapped) {
  97. mspi->map_tx_dma = 0;
  98. mspi->map_rx_dma = 0;
  99. } else {
  100. mspi->map_tx_dma = 1;
  101. mspi->map_rx_dma = 1;
  102. }
  103. if (!t->tx_buf) {
  104. mspi->tx_dma = mspi->dma_dummy_tx;
  105. mspi->map_tx_dma = 0;
  106. }
  107. if (!t->rx_buf) {
  108. mspi->rx_dma = mspi->dma_dummy_rx;
  109. mspi->map_rx_dma = 0;
  110. }
  111. if (mspi->map_tx_dma) {
  112. void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
  113. mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
  114. DMA_TO_DEVICE);
  115. if (dma_mapping_error(dev, mspi->tx_dma)) {
  116. dev_err(dev, "unable to map tx dma\n");
  117. return -ENOMEM;
  118. }
  119. } else if (t->tx_buf) {
  120. mspi->tx_dma = t->tx_dma;
  121. }
  122. if (mspi->map_rx_dma) {
  123. mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
  124. DMA_FROM_DEVICE);
  125. if (dma_mapping_error(dev, mspi->rx_dma)) {
  126. dev_err(dev, "unable to map rx dma\n");
  127. goto err_rx_dma;
  128. }
  129. } else if (t->rx_buf) {
  130. mspi->rx_dma = t->rx_dma;
  131. }
  132. /* enable rx ints */
  133. mpc8xxx_spi_write_reg(&reg_base->mask, SPIE_RXB);
  134. mspi->xfer_in_progress = t;
  135. mspi->count = t->len;
  136. /* start CPM transfers */
  137. fsl_spi_cpm_bufs_start(mspi);
  138. return 0;
  139. err_rx_dma:
  140. if (mspi->map_tx_dma)
  141. dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
  142. return -ENOMEM;
  143. }
  144. EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
  145. void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
  146. {
  147. struct device *dev = mspi->dev;
  148. struct spi_transfer *t = mspi->xfer_in_progress;
  149. if (mspi->map_tx_dma)
  150. dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
  151. if (mspi->map_rx_dma)
  152. dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
  153. mspi->xfer_in_progress = NULL;
  154. }
  155. EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
  156. void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
  157. {
  158. u16 len;
  159. struct fsl_spi_reg *reg_base = mspi->reg_base;
  160. dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
  161. in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
  162. len = in_be16(&mspi->rx_bd->cbd_datlen);
  163. if (len > mspi->count) {
  164. WARN_ON(1);
  165. len = mspi->count;
  166. }
  167. /* Clear the events */
  168. mpc8xxx_spi_write_reg(&reg_base->event, events);
  169. mspi->count -= len;
  170. if (mspi->count)
  171. fsl_spi_cpm_bufs_start(mspi);
  172. else
  173. complete(&mspi->done);
  174. }
  175. EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
  176. static void *fsl_spi_alloc_dummy_rx(void)
  177. {
  178. mutex_lock(&fsl_dummy_rx_lock);
  179. if (!fsl_dummy_rx)
  180. fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
  181. if (fsl_dummy_rx)
  182. fsl_dummy_rx_refcnt++;
  183. mutex_unlock(&fsl_dummy_rx_lock);
  184. return fsl_dummy_rx;
  185. }
  186. static void fsl_spi_free_dummy_rx(void)
  187. {
  188. mutex_lock(&fsl_dummy_rx_lock);
  189. switch (fsl_dummy_rx_refcnt) {
  190. case 0:
  191. WARN_ON(1);
  192. break;
  193. case 1:
  194. kfree(fsl_dummy_rx);
  195. fsl_dummy_rx = NULL;
  196. /* fall through */
  197. default:
  198. fsl_dummy_rx_refcnt--;
  199. break;
  200. }
  201. mutex_unlock(&fsl_dummy_rx_lock);
  202. }
  203. static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
  204. {
  205. struct device *dev = mspi->dev;
  206. struct device_node *np = dev->of_node;
  207. const u32 *iprop;
  208. int size;
  209. void __iomem *spi_base;
  210. unsigned long pram_ofs = -ENOMEM;
  211. /* Can't use of_address_to_resource(), QE muram isn't at 0. */
  212. iprop = of_get_property(np, "reg", &size);
  213. /* QE with a fixed pram location? */
  214. if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
  215. return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
  216. /* QE but with a dynamic pram location? */
  217. if (mspi->flags & SPI_QE) {
  218. pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
  219. qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
  220. QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
  221. return pram_ofs;
  222. }
  223. spi_base = of_iomap(np, 1);
  224. if (spi_base == NULL)
  225. return -EINVAL;
  226. if (mspi->flags & SPI_CPM2) {
  227. pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
  228. out_be16(spi_base, pram_ofs);
  229. }
  230. iounmap(spi_base);
  231. return pram_ofs;
  232. }
  233. int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
  234. {
  235. struct device *dev = mspi->dev;
  236. struct device_node *np = dev->of_node;
  237. const u32 *iprop;
  238. int size;
  239. unsigned long bds_ofs;
  240. if (!(mspi->flags & SPI_CPM_MODE))
  241. return 0;
  242. if (!fsl_spi_alloc_dummy_rx())
  243. return -ENOMEM;
  244. if (mspi->flags & SPI_QE) {
  245. iprop = of_get_property(np, "cell-index", &size);
  246. if (iprop && size == sizeof(*iprop))
  247. mspi->subblock = *iprop;
  248. switch (mspi->subblock) {
  249. default:
  250. dev_warn(dev, "cell-index unspecified, assuming SPI1\n");
  251. /* fall through */
  252. case 0:
  253. mspi->subblock = QE_CR_SUBBLOCK_SPI1;
  254. break;
  255. case 1:
  256. mspi->subblock = QE_CR_SUBBLOCK_SPI2;
  257. break;
  258. }
  259. }
  260. if (mspi->flags & SPI_CPM1) {
  261. struct resource *res;
  262. void *pram;
  263. res = platform_get_resource(to_platform_device(dev),
  264. IORESOURCE_MEM, 1);
  265. pram = devm_ioremap_resource(dev, res);
  266. if (IS_ERR(pram))
  267. mspi->pram = NULL;
  268. else
  269. mspi->pram = pram;
  270. } else {
  271. unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi);
  272. if (IS_ERR_VALUE(pram_ofs))
  273. mspi->pram = NULL;
  274. else
  275. mspi->pram = cpm_muram_addr(pram_ofs);
  276. }
  277. if (mspi->pram == NULL) {
  278. dev_err(dev, "can't allocate spi parameter ram\n");
  279. goto err_pram;
  280. }
  281. bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
  282. sizeof(*mspi->rx_bd), 8);
  283. if (IS_ERR_VALUE(bds_ofs)) {
  284. dev_err(dev, "can't allocate bds\n");
  285. goto err_bds;
  286. }
  287. mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
  288. DMA_TO_DEVICE);
  289. if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
  290. dev_err(dev, "unable to map dummy tx buffer\n");
  291. goto err_dummy_tx;
  292. }
  293. mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
  294. DMA_FROM_DEVICE);
  295. if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
  296. dev_err(dev, "unable to map dummy rx buffer\n");
  297. goto err_dummy_rx;
  298. }
  299. mspi->tx_bd = cpm_muram_addr(bds_ofs);
  300. mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
  301. /* Initialize parameter ram. */
  302. out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
  303. out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
  304. out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
  305. out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
  306. out_be16(&mspi->pram->mrblr, SPI_MRBLR);
  307. out_be32(&mspi->pram->rstate, 0);
  308. out_be32(&mspi->pram->rdp, 0);
  309. out_be16(&mspi->pram->rbptr, 0);
  310. out_be16(&mspi->pram->rbc, 0);
  311. out_be32(&mspi->pram->rxtmp, 0);
  312. out_be32(&mspi->pram->tstate, 0);
  313. out_be32(&mspi->pram->tdp, 0);
  314. out_be16(&mspi->pram->tbptr, 0);
  315. out_be16(&mspi->pram->tbc, 0);
  316. out_be32(&mspi->pram->txtmp, 0);
  317. return 0;
  318. err_dummy_rx:
  319. dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
  320. err_dummy_tx:
  321. cpm_muram_free(bds_ofs);
  322. err_bds:
  323. if (!(mspi->flags & SPI_CPM1))
  324. cpm_muram_free(cpm_muram_offset(mspi->pram));
  325. err_pram:
  326. fsl_spi_free_dummy_rx();
  327. return -ENOMEM;
  328. }
  329. EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
  330. void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
  331. {
  332. struct device *dev = mspi->dev;
  333. if (!(mspi->flags & SPI_CPM_MODE))
  334. return;
  335. dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
  336. dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
  337. cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
  338. cpm_muram_free(cpm_muram_offset(mspi->pram));
  339. fsl_spi_free_dummy_rx();
  340. }
  341. EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
  342. MODULE_LICENSE("GPL");