altera_sgdma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541
  1. /* Altera TSE SGDMA and MSGDMA Linux driver
  2. * Copyright (C) 2014 Altera Corporation. All rights reserved
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/list.h>
  17. #include "altera_utils.h"
  18. #include "altera_tse.h"
  19. #include "altera_sgdmahw.h"
  20. #include "altera_sgdma.h"
  21. static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
  22. struct sgdma_descrip __iomem *ndesc,
  23. dma_addr_t ndesc_phys,
  24. dma_addr_t raddr,
  25. dma_addr_t waddr,
  26. u16 length,
  27. int generate_eop,
  28. int rfixed,
  29. int wfixed);
  30. static int sgdma_async_write(struct altera_tse_private *priv,
  31. struct sgdma_descrip __iomem *desc);
  32. static int sgdma_async_read(struct altera_tse_private *priv);
  33. static dma_addr_t
  34. sgdma_txphysaddr(struct altera_tse_private *priv,
  35. struct sgdma_descrip __iomem *desc);
  36. static dma_addr_t
  37. sgdma_rxphysaddr(struct altera_tse_private *priv,
  38. struct sgdma_descrip __iomem *desc);
  39. static int sgdma_txbusy(struct altera_tse_private *priv);
  40. static int sgdma_rxbusy(struct altera_tse_private *priv);
  41. static void
  42. queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
  43. static void
  44. queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
  45. static struct tse_buffer *
  46. dequeue_tx(struct altera_tse_private *priv);
  47. static struct tse_buffer *
  48. dequeue_rx(struct altera_tse_private *priv);
  49. static struct tse_buffer *
  50. queue_rx_peekhead(struct altera_tse_private *priv);
  51. int sgdma_initialize(struct altera_tse_private *priv)
  52. {
  53. priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
  54. SGDMA_CTRLREG_INTEN;
  55. priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
  56. SGDMA_CTRLREG_INTEN |
  57. SGDMA_CTRLREG_ILASTD;
  58. priv->sgdmadesclen = sizeof(struct sgdma_descrip);
  59. INIT_LIST_HEAD(&priv->txlisthd);
  60. INIT_LIST_HEAD(&priv->rxlisthd);
  61. priv->rxdescphys = (dma_addr_t) 0;
  62. priv->txdescphys = (dma_addr_t) 0;
  63. priv->rxdescphys = dma_map_single(priv->device,
  64. (void __force *)priv->rx_dma_desc,
  65. priv->rxdescmem, DMA_BIDIRECTIONAL);
  66. if (dma_mapping_error(priv->device, priv->rxdescphys)) {
  67. sgdma_uninitialize(priv);
  68. netdev_err(priv->dev, "error mapping rx descriptor memory\n");
  69. return -EINVAL;
  70. }
  71. priv->txdescphys = dma_map_single(priv->device,
  72. (void __force *)priv->tx_dma_desc,
  73. priv->txdescmem, DMA_TO_DEVICE);
  74. if (dma_mapping_error(priv->device, priv->txdescphys)) {
  75. sgdma_uninitialize(priv);
  76. netdev_err(priv->dev, "error mapping tx descriptor memory\n");
  77. return -EINVAL;
  78. }
  79. /* Initialize descriptor memory to all 0's, sync memory to cache */
  80. memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
  81. memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
  82. dma_sync_single_for_device(priv->device, priv->txdescphys,
  83. priv->txdescmem, DMA_TO_DEVICE);
  84. dma_sync_single_for_device(priv->device, priv->rxdescphys,
  85. priv->rxdescmem, DMA_TO_DEVICE);
  86. return 0;
  87. }
  88. void sgdma_uninitialize(struct altera_tse_private *priv)
  89. {
  90. if (priv->rxdescphys)
  91. dma_unmap_single(priv->device, priv->rxdescphys,
  92. priv->rxdescmem, DMA_BIDIRECTIONAL);
  93. if (priv->txdescphys)
  94. dma_unmap_single(priv->device, priv->txdescphys,
  95. priv->txdescmem, DMA_TO_DEVICE);
  96. }
  97. /* This function resets the SGDMA controller and clears the
  98. * descriptor memory used for transmits and receives.
  99. */
  100. void sgdma_reset(struct altera_tse_private *priv)
  101. {
  102. /* Initialize descriptor memory to 0 */
  103. memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
  104. memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
  105. csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
  106. csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
  107. csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
  108. csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
  109. }
  110. /* For SGDMA, interrupts remain enabled after initially enabling,
  111. * so no need to provide implementations for abstract enable
  112. * and disable
  113. */
  114. void sgdma_enable_rxirq(struct altera_tse_private *priv)
  115. {
  116. }
  117. void sgdma_enable_txirq(struct altera_tse_private *priv)
  118. {
  119. }
  120. void sgdma_disable_rxirq(struct altera_tse_private *priv)
  121. {
  122. }
  123. void sgdma_disable_txirq(struct altera_tse_private *priv)
  124. {
  125. }
  126. void sgdma_clear_rxirq(struct altera_tse_private *priv)
  127. {
  128. tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
  129. SGDMA_CTRLREG_CLRINT);
  130. }
  131. void sgdma_clear_txirq(struct altera_tse_private *priv)
  132. {
  133. tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
  134. SGDMA_CTRLREG_CLRINT);
  135. }
  136. /* transmits buffer through SGDMA. Returns number of buffers
  137. * transmitted, 0 if not possible.
  138. *
  139. * tx_lock is held by the caller
  140. */
  141. int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
  142. {
  143. struct sgdma_descrip __iomem *descbase =
  144. (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
  145. struct sgdma_descrip __iomem *cdesc = &descbase[0];
  146. struct sgdma_descrip __iomem *ndesc = &descbase[1];
  147. /* wait 'til the tx sgdma is ready for the next transmit request */
  148. if (sgdma_txbusy(priv))
  149. return 0;
  150. sgdma_setup_descrip(cdesc, /* current descriptor */
  151. ndesc, /* next descriptor */
  152. sgdma_txphysaddr(priv, ndesc),
  153. buffer->dma_addr, /* address of packet to xmit */
  154. 0, /* write addr 0 for tx dma */
  155. buffer->len, /* length of packet */
  156. SGDMA_CONTROL_EOP, /* Generate EOP */
  157. 0, /* read fixed */
  158. SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
  159. sgdma_async_write(priv, cdesc);
  160. /* enqueue the request to the pending transmit queue */
  161. queue_tx(priv, buffer);
  162. return 1;
  163. }
  164. /* tx_lock held to protect access to queued tx list
  165. */
  166. u32 sgdma_tx_completions(struct altera_tse_private *priv)
  167. {
  168. u32 ready = 0;
  169. if (!sgdma_txbusy(priv) &&
  170. ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
  171. & SGDMA_CONTROL_HW_OWNED) == 0) &&
  172. (dequeue_tx(priv))) {
  173. ready = 1;
  174. }
  175. return ready;
  176. }
  177. void sgdma_start_rxdma(struct altera_tse_private *priv)
  178. {
  179. sgdma_async_read(priv);
  180. }
  181. void sgdma_add_rx_desc(struct altera_tse_private *priv,
  182. struct tse_buffer *rxbuffer)
  183. {
  184. queue_rx(priv, rxbuffer);
  185. }
  186. /* status is returned on upper 16 bits,
  187. * length is returned in lower 16 bits
  188. */
  189. u32 sgdma_rx_status(struct altera_tse_private *priv)
  190. {
  191. struct sgdma_descrip __iomem *base =
  192. (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
  193. struct sgdma_descrip __iomem *desc = NULL;
  194. struct tse_buffer *rxbuffer = NULL;
  195. unsigned int rxstatus = 0;
  196. u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
  197. desc = &base[0];
  198. if (sts & SGDMA_STSREG_EOP) {
  199. unsigned int pktlength = 0;
  200. unsigned int pktstatus = 0;
  201. dma_sync_single_for_cpu(priv->device,
  202. priv->rxdescphys,
  203. priv->sgdmadesclen,
  204. DMA_FROM_DEVICE);
  205. pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
  206. pktstatus = csrrd8(desc, sgdma_descroffs(status));
  207. rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
  208. rxstatus = rxstatus << 16;
  209. rxstatus |= (pktlength & 0xffff);
  210. if (rxstatus) {
  211. csrwr8(0, desc, sgdma_descroffs(status));
  212. rxbuffer = dequeue_rx(priv);
  213. if (rxbuffer == NULL)
  214. netdev_info(priv->dev,
  215. "sgdma rx and rx queue empty!\n");
  216. /* Clear control */
  217. csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
  218. /* clear status */
  219. csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
  220. /* kick the rx sgdma after reaping this descriptor */
  221. sgdma_async_read(priv);
  222. } else {
  223. /* If the SGDMA indicated an end of packet on recv,
  224. * then it's expected that the rxstatus from the
  225. * descriptor is non-zero - meaning a valid packet
  226. * with a nonzero length, or an error has been
  227. * indicated. if not, then all we can do is signal
  228. * an error and return no packet received. Most likely
  229. * there is a system design error, or an error in the
  230. * underlying kernel (cache or cache management problem)
  231. */
  232. netdev_err(priv->dev,
  233. "SGDMA RX Error Info: %x, %x, %x\n",
  234. sts, csrrd8(desc, sgdma_descroffs(status)),
  235. rxstatus);
  236. }
  237. } else if (sts == 0) {
  238. sgdma_async_read(priv);
  239. }
  240. return rxstatus;
  241. }
  242. /* Private functions */
  243. static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
  244. struct sgdma_descrip __iomem *ndesc,
  245. dma_addr_t ndesc_phys,
  246. dma_addr_t raddr,
  247. dma_addr_t waddr,
  248. u16 length,
  249. int generate_eop,
  250. int rfixed,
  251. int wfixed)
  252. {
  253. /* Clear the next descriptor as not owned by hardware */
  254. u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
  255. ctrl &= ~SGDMA_CONTROL_HW_OWNED;
  256. csrwr8(ctrl, ndesc, sgdma_descroffs(control));
  257. ctrl = SGDMA_CONTROL_HW_OWNED;
  258. ctrl |= generate_eop;
  259. ctrl |= rfixed;
  260. ctrl |= wfixed;
  261. /* Channel is implicitly zero, initialized to 0 by default */
  262. csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
  263. csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
  264. csrwr32(0, desc, sgdma_descroffs(pad1));
  265. csrwr32(0, desc, sgdma_descroffs(pad2));
  266. csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
  267. csrwr8(ctrl, desc, sgdma_descroffs(control));
  268. csrwr8(0, desc, sgdma_descroffs(status));
  269. csrwr8(0, desc, sgdma_descroffs(wburst));
  270. csrwr8(0, desc, sgdma_descroffs(rburst));
  271. csrwr16(length, desc, sgdma_descroffs(bytes));
  272. csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
  273. }
  274. /* If hardware is busy, don't restart async read.
  275. * if status register is 0 - meaning initial state, restart async read,
  276. * probably for the first time when populating a receive buffer.
  277. * If read status indicate not busy and a status, restart the async
  278. * DMA read.
  279. */
  280. static int sgdma_async_read(struct altera_tse_private *priv)
  281. {
  282. struct sgdma_descrip __iomem *descbase =
  283. (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
  284. struct sgdma_descrip __iomem *cdesc = &descbase[0];
  285. struct sgdma_descrip __iomem *ndesc = &descbase[1];
  286. struct tse_buffer *rxbuffer = NULL;
  287. if (!sgdma_rxbusy(priv)) {
  288. rxbuffer = queue_rx_peekhead(priv);
  289. if (rxbuffer == NULL) {
  290. netdev_err(priv->dev, "no rx buffers available\n");
  291. return 0;
  292. }
  293. sgdma_setup_descrip(cdesc, /* current descriptor */
  294. ndesc, /* next descriptor */
  295. sgdma_rxphysaddr(priv, ndesc),
  296. 0, /* read addr 0 for rx dma */
  297. rxbuffer->dma_addr, /* write addr for rx dma */
  298. 0, /* read 'til EOP */
  299. 0, /* EOP: NA for rx dma */
  300. 0, /* read fixed: NA for rx dma */
  301. 0); /* SOP: NA for rx DMA */
  302. dma_sync_single_for_device(priv->device,
  303. priv->rxdescphys,
  304. priv->sgdmadesclen,
  305. DMA_TO_DEVICE);
  306. csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
  307. priv->rx_dma_csr,
  308. sgdma_csroffs(next_descrip));
  309. csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
  310. priv->rx_dma_csr,
  311. sgdma_csroffs(control));
  312. return 1;
  313. }
  314. return 0;
  315. }
  316. static int sgdma_async_write(struct altera_tse_private *priv,
  317. struct sgdma_descrip __iomem *desc)
  318. {
  319. if (sgdma_txbusy(priv))
  320. return 0;
  321. /* clear control and status */
  322. csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
  323. csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
  324. dma_sync_single_for_device(priv->device, priv->txdescphys,
  325. priv->sgdmadesclen, DMA_TO_DEVICE);
  326. csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
  327. priv->tx_dma_csr,
  328. sgdma_csroffs(next_descrip));
  329. csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
  330. priv->tx_dma_csr,
  331. sgdma_csroffs(control));
  332. return 1;
  333. }
  334. static dma_addr_t
  335. sgdma_txphysaddr(struct altera_tse_private *priv,
  336. struct sgdma_descrip __iomem *desc)
  337. {
  338. dma_addr_t paddr = priv->txdescmem_busaddr;
  339. uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
  340. return (dma_addr_t)((uintptr_t)paddr + offs);
  341. }
  342. static dma_addr_t
  343. sgdma_rxphysaddr(struct altera_tse_private *priv,
  344. struct sgdma_descrip __iomem *desc)
  345. {
  346. dma_addr_t paddr = priv->rxdescmem_busaddr;
  347. uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
  348. return (dma_addr_t)((uintptr_t)paddr + offs);
  349. }
  350. #define list_remove_head(list, entry, type, member) \
  351. do { \
  352. entry = NULL; \
  353. if (!list_empty(list)) { \
  354. entry = list_entry((list)->next, type, member); \
  355. list_del_init(&entry->member); \
  356. } \
  357. } while (0)
  358. #define list_peek_head(list, entry, type, member) \
  359. do { \
  360. entry = NULL; \
  361. if (!list_empty(list)) { \
  362. entry = list_entry((list)->next, type, member); \
  363. } \
  364. } while (0)
  365. /* adds a tse_buffer to the tail of a tx buffer list.
  366. * assumes the caller is managing and holding a mutual exclusion
  367. * primitive to avoid simultaneous pushes/pops to the list.
  368. */
  369. static void
  370. queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
  371. {
  372. list_add_tail(&buffer->lh, &priv->txlisthd);
  373. }
  374. /* adds a tse_buffer to the tail of a rx buffer list
  375. * assumes the caller is managing and holding a mutual exclusion
  376. * primitive to avoid simultaneous pushes/pops to the list.
  377. */
  378. static void
  379. queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
  380. {
  381. list_add_tail(&buffer->lh, &priv->rxlisthd);
  382. }
  383. /* dequeues a tse_buffer from the transmit buffer list, otherwise
  384. * returns NULL if empty.
  385. * assumes the caller is managing and holding a mutual exclusion
  386. * primitive to avoid simultaneous pushes/pops to the list.
  387. */
  388. static struct tse_buffer *
  389. dequeue_tx(struct altera_tse_private *priv)
  390. {
  391. struct tse_buffer *buffer = NULL;
  392. list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
  393. return buffer;
  394. }
  395. /* dequeues a tse_buffer from the receive buffer list, otherwise
  396. * returns NULL if empty
  397. * assumes the caller is managing and holding a mutual exclusion
  398. * primitive to avoid simultaneous pushes/pops to the list.
  399. */
  400. static struct tse_buffer *
  401. dequeue_rx(struct altera_tse_private *priv)
  402. {
  403. struct tse_buffer *buffer = NULL;
  404. list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
  405. return buffer;
  406. }
  407. /* dequeues a tse_buffer from the receive buffer list, otherwise
  408. * returns NULL if empty
  409. * assumes the caller is managing and holding a mutual exclusion
  410. * primitive to avoid simultaneous pushes/pops to the list while the
  411. * head is being examined.
  412. */
  413. static struct tse_buffer *
  414. queue_rx_peekhead(struct altera_tse_private *priv)
  415. {
  416. struct tse_buffer *buffer = NULL;
  417. list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
  418. return buffer;
  419. }
  420. /* check and return rx sgdma status without polling
  421. */
  422. static int sgdma_rxbusy(struct altera_tse_private *priv)
  423. {
  424. return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
  425. & SGDMA_STSREG_BUSY;
  426. }
  427. /* waits for the tx sgdma to finish it's current operation, returns 0
  428. * when it transitions to nonbusy, returns 1 if the operation times out
  429. */
  430. static int sgdma_txbusy(struct altera_tse_private *priv)
  431. {
  432. int delay = 0;
  433. /* if DMA is busy, wait for current transactino to finish */
  434. while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
  435. & SGDMA_STSREG_BUSY) && (delay++ < 100))
  436. udelay(1);
  437. if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
  438. & SGDMA_STSREG_BUSY) {
  439. netdev_err(priv->dev, "timeout waiting for tx dma\n");
  440. return 1;
  441. }
  442. return 0;
  443. }