tsi721_dma.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055
  1. /*
  2. * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
  3. *
  4. * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
  5. * Alexandre Bounine <alexandre.bounine@idt.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 2 of the License, or (at your option)
  10. * any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * The full GNU General Public License is included in this distribution in the
  18. * file called COPYING.
  19. */
  20. #include <linux/io.h>
  21. #include <linux/errno.h>
  22. #include <linux/init.h>
  23. #include <linux/ioport.h>
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/pci.h>
  27. #include <linux/rio.h>
  28. #include <linux/rio_drv.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/kfifo.h>
  32. #include <linux/sched.h>
  33. #include <linux/delay.h>
  34. #include "../../dma/dmaengine.h"
  35. #include "tsi721.h"
  36. #ifdef CONFIG_PCI_MSI
  37. static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
  38. #endif
  39. static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
  40. static unsigned int dma_desc_per_channel = 128;
  41. module_param(dma_desc_per_channel, uint, S_IRUGO);
  42. MODULE_PARM_DESC(dma_desc_per_channel,
  43. "Number of DMA descriptors per channel (default: 128)");
  44. static unsigned int dma_txqueue_sz = 16;
  45. module_param(dma_txqueue_sz, uint, S_IRUGO);
  46. MODULE_PARM_DESC(dma_txqueue_sz,
  47. "DMA Transactions Queue Size (default: 16)");
  48. static u8 dma_sel = 0x7f;
  49. module_param(dma_sel, byte, S_IRUGO);
  50. MODULE_PARM_DESC(dma_sel,
  51. "DMA Channel Selection Mask (default: 0x7f = all)");
  52. static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
  53. {
  54. return container_of(chan, struct tsi721_bdma_chan, dchan);
  55. }
  56. static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
  57. {
  58. return container_of(ddev, struct rio_mport, dma)->priv;
  59. }
  60. static inline
  61. struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
  62. {
  63. return container_of(txd, struct tsi721_tx_desc, txd);
  64. }
  65. static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
  66. {
  67. struct tsi721_dma_desc *bd_ptr;
  68. struct device *dev = bdma_chan->dchan.device->dev;
  69. u64 *sts_ptr;
  70. dma_addr_t bd_phys;
  71. dma_addr_t sts_phys;
  72. int sts_size;
  73. #ifdef CONFIG_PCI_MSI
  74. struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
  75. #endif
  76. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
  77. /*
  78. * Allocate space for DMA descriptors
  79. * (add an extra element for link descriptor)
  80. */
  81. bd_ptr = dma_zalloc_coherent(dev,
  82. (bd_num + 1) * sizeof(struct tsi721_dma_desc),
  83. &bd_phys, GFP_ATOMIC);
  84. if (!bd_ptr)
  85. return -ENOMEM;
  86. bdma_chan->bd_num = bd_num;
  87. bdma_chan->bd_phys = bd_phys;
  88. bdma_chan->bd_base = bd_ptr;
  89. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  90. "DMAC%d descriptors @ %p (phys = %pad)",
  91. bdma_chan->id, bd_ptr, &bd_phys);
  92. /* Allocate space for descriptor status FIFO */
  93. sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
  94. (bd_num + 1) : TSI721_DMA_MINSTSSZ;
  95. sts_size = roundup_pow_of_two(sts_size);
  96. sts_ptr = dma_zalloc_coherent(dev,
  97. sts_size * sizeof(struct tsi721_dma_sts),
  98. &sts_phys, GFP_ATOMIC);
  99. if (!sts_ptr) {
  100. /* Free space allocated for DMA descriptors */
  101. dma_free_coherent(dev,
  102. (bd_num + 1) * sizeof(struct tsi721_dma_desc),
  103. bd_ptr, bd_phys);
  104. bdma_chan->bd_base = NULL;
  105. return -ENOMEM;
  106. }
  107. bdma_chan->sts_phys = sts_phys;
  108. bdma_chan->sts_base = sts_ptr;
  109. bdma_chan->sts_size = sts_size;
  110. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  111. "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x",
  112. bdma_chan->id, sts_ptr, &sts_phys, sts_size);
  113. /* Initialize DMA descriptors ring using added link descriptor */
  114. bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
  115. bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
  116. TSI721_DMAC_DPTRL_MASK);
  117. bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
  118. /* Setup DMA descriptor pointers */
  119. iowrite32(((u64)bd_phys >> 32),
  120. bdma_chan->regs + TSI721_DMAC_DPTRH);
  121. iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
  122. bdma_chan->regs + TSI721_DMAC_DPTRL);
  123. /* Setup descriptor status FIFO */
  124. iowrite32(((u64)sts_phys >> 32),
  125. bdma_chan->regs + TSI721_DMAC_DSBH);
  126. iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
  127. bdma_chan->regs + TSI721_DMAC_DSBL);
  128. iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
  129. bdma_chan->regs + TSI721_DMAC_DSSZ);
  130. /* Clear interrupt bits */
  131. iowrite32(TSI721_DMAC_INT_ALL,
  132. bdma_chan->regs + TSI721_DMAC_INT);
  133. ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  134. #ifdef CONFIG_PCI_MSI
  135. /* Request interrupt service if we are in MSI-X mode */
  136. if (priv->flags & TSI721_USING_MSIX) {
  137. int rc, idx;
  138. idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
  139. rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
  140. priv->msix[idx].irq_name, (void *)bdma_chan);
  141. if (rc) {
  142. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  143. "Unable to get MSI-X for DMAC%d-DONE",
  144. bdma_chan->id);
  145. goto err_out;
  146. }
  147. idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
  148. rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
  149. priv->msix[idx].irq_name, (void *)bdma_chan);
  150. if (rc) {
  151. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  152. "Unable to get MSI-X for DMAC%d-INT",
  153. bdma_chan->id);
  154. free_irq(
  155. priv->msix[TSI721_VECT_DMA0_DONE +
  156. bdma_chan->id].vector,
  157. (void *)bdma_chan);
  158. }
  159. err_out:
  160. if (rc) {
  161. /* Free space allocated for DMA descriptors */
  162. dma_free_coherent(dev,
  163. (bd_num + 1) * sizeof(struct tsi721_dma_desc),
  164. bd_ptr, bd_phys);
  165. bdma_chan->bd_base = NULL;
  166. /* Free space allocated for status descriptors */
  167. dma_free_coherent(dev,
  168. sts_size * sizeof(struct tsi721_dma_sts),
  169. sts_ptr, sts_phys);
  170. bdma_chan->sts_base = NULL;
  171. return -EIO;
  172. }
  173. }
  174. #endif /* CONFIG_PCI_MSI */
  175. /* Toggle DMA channel initialization */
  176. iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
  177. ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
  178. bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
  179. bdma_chan->sts_rdptr = 0;
  180. udelay(10);
  181. return 0;
  182. }
  183. static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
  184. {
  185. u32 ch_stat;
  186. #ifdef CONFIG_PCI_MSI
  187. struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
  188. #endif
  189. if (!bdma_chan->bd_base)
  190. return 0;
  191. /* Check if DMA channel still running */
  192. ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
  193. if (ch_stat & TSI721_DMAC_STS_RUN)
  194. return -EFAULT;
  195. /* Put DMA channel into init state */
  196. iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
  197. #ifdef CONFIG_PCI_MSI
  198. if (priv->flags & TSI721_USING_MSIX) {
  199. free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
  200. bdma_chan->id].vector, (void *)bdma_chan);
  201. free_irq(priv->msix[TSI721_VECT_DMA0_INT +
  202. bdma_chan->id].vector, (void *)bdma_chan);
  203. }
  204. #endif /* CONFIG_PCI_MSI */
  205. /* Free space allocated for DMA descriptors */
  206. dma_free_coherent(bdma_chan->dchan.device->dev,
  207. (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
  208. bdma_chan->bd_base, bdma_chan->bd_phys);
  209. bdma_chan->bd_base = NULL;
  210. /* Free space allocated for status FIFO */
  211. dma_free_coherent(bdma_chan->dchan.device->dev,
  212. bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
  213. bdma_chan->sts_base, bdma_chan->sts_phys);
  214. bdma_chan->sts_base = NULL;
  215. return 0;
  216. }
  217. static void
  218. tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
  219. {
  220. if (enable) {
  221. /* Clear pending BDMA channel interrupts */
  222. iowrite32(TSI721_DMAC_INT_ALL,
  223. bdma_chan->regs + TSI721_DMAC_INT);
  224. ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  225. /* Enable BDMA channel interrupts */
  226. iowrite32(TSI721_DMAC_INT_ALL,
  227. bdma_chan->regs + TSI721_DMAC_INTE);
  228. } else {
  229. /* Disable BDMA channel interrupts */
  230. iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
  231. /* Clear pending BDMA channel interrupts */
  232. iowrite32(TSI721_DMAC_INT_ALL,
  233. bdma_chan->regs + TSI721_DMAC_INT);
  234. }
  235. }
  236. static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
  237. {
  238. u32 sts;
  239. sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
  240. return ((sts & TSI721_DMAC_STS_RUN) == 0);
  241. }
  242. void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
  243. {
  244. /* Disable BDMA channel interrupts */
  245. iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
  246. if (bdma_chan->active)
  247. tasklet_hi_schedule(&bdma_chan->tasklet);
  248. }
  249. #ifdef CONFIG_PCI_MSI
  250. /**
  251. * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
  252. * @irq: Linux interrupt number
  253. * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
  254. *
  255. * Handles BDMA channel interrupts signaled using MSI-X.
  256. */
  257. static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
  258. {
  259. struct tsi721_bdma_chan *bdma_chan = ptr;
  260. if (bdma_chan->active)
  261. tasklet_hi_schedule(&bdma_chan->tasklet);
  262. return IRQ_HANDLED;
  263. }
  264. #endif /* CONFIG_PCI_MSI */
  265. /* Must be called with the spinlock held */
  266. static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
  267. {
  268. if (!tsi721_dma_is_idle(bdma_chan)) {
  269. tsi_err(&bdma_chan->dchan.dev->device,
  270. "DMAC%d Attempt to start non-idle channel",
  271. bdma_chan->id);
  272. return;
  273. }
  274. if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
  275. tsi_err(&bdma_chan->dchan.dev->device,
  276. "DMAC%d Attempt to start DMA with no BDs ready %d",
  277. bdma_chan->id, task_pid_nr(current));
  278. return;
  279. }
  280. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d",
  281. bdma_chan->id, bdma_chan->wr_count_next,
  282. task_pid_nr(current));
  283. iowrite32(bdma_chan->wr_count_next,
  284. bdma_chan->regs + TSI721_DMAC_DWRCNT);
  285. ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
  286. bdma_chan->wr_count = bdma_chan->wr_count_next;
  287. }
  288. static int
  289. tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
  290. struct tsi721_dma_desc *bd_ptr,
  291. struct scatterlist *sg, u32 sys_size)
  292. {
  293. u64 rio_addr;
  294. if (!bd_ptr)
  295. return -EINVAL;
  296. /* Initialize DMA descriptor */
  297. bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
  298. (desc->rtype << 19) | desc->destid);
  299. bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
  300. (sys_size << 26));
  301. rio_addr = (desc->rio_addr >> 2) |
  302. ((u64)(desc->rio_addr_u & 0x3) << 62);
  303. bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
  304. bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
  305. bd_ptr->t1.bufptr_lo = cpu_to_le32(
  306. (u64)sg_dma_address(sg) & 0xffffffff);
  307. bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
  308. bd_ptr->t1.s_dist = 0;
  309. bd_ptr->t1.s_size = 0;
  310. return 0;
  311. }
  312. static int
  313. tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
  314. {
  315. if (!bd_ptr)
  316. return -EINVAL;
  317. /* Update DMA descriptor */
  318. if (interrupt)
  319. bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
  320. bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
  321. return 0;
  322. }
  323. static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
  324. struct tsi721_tx_desc *desc)
  325. {
  326. struct dma_async_tx_descriptor *txd = &desc->txd;
  327. dma_async_tx_callback callback = txd->callback;
  328. void *param = txd->callback_param;
  329. list_move(&desc->desc_node, &bdma_chan->free_list);
  330. if (callback)
  331. callback(param);
  332. }
  333. static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
  334. {
  335. u32 srd_ptr;
  336. u64 *sts_ptr;
  337. int i, j;
  338. /* Check and clear descriptor status FIFO entries */
  339. srd_ptr = bdma_chan->sts_rdptr;
  340. sts_ptr = bdma_chan->sts_base;
  341. j = srd_ptr * 8;
  342. while (sts_ptr[j]) {
  343. for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
  344. sts_ptr[j] = 0;
  345. ++srd_ptr;
  346. srd_ptr %= bdma_chan->sts_size;
  347. j = srd_ptr * 8;
  348. }
  349. iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
  350. bdma_chan->sts_rdptr = srd_ptr;
  351. }
  352. /* Must be called with the channel spinlock held */
  353. static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
  354. {
  355. struct dma_chan *dchan = desc->txd.chan;
  356. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  357. u32 sys_size;
  358. u64 rio_addr;
  359. dma_addr_t next_addr;
  360. u32 bcount;
  361. struct scatterlist *sg;
  362. unsigned int i;
  363. int err = 0;
  364. struct tsi721_dma_desc *bd_ptr = NULL;
  365. u32 idx, rd_idx;
  366. u32 add_count = 0;
  367. struct device *ch_dev = &dchan->dev->device;
  368. if (!tsi721_dma_is_idle(bdma_chan)) {
  369. tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel",
  370. bdma_chan->id);
  371. return -EIO;
  372. }
  373. /*
  374. * Fill DMA channel's hardware buffer descriptors.
  375. * (NOTE: RapidIO destination address is limited to 64 bits for now)
  376. */
  377. rio_addr = desc->rio_addr;
  378. next_addr = -1;
  379. bcount = 0;
  380. sys_size = dma_to_mport(dchan->device)->sys_size;
  381. rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
  382. rd_idx %= (bdma_chan->bd_num + 1);
  383. idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
  384. if (idx == bdma_chan->bd_num) {
  385. /* wrap around link descriptor */
  386. idx = 0;
  387. add_count++;
  388. }
  389. tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d",
  390. bdma_chan->id, rd_idx, idx);
  391. for_each_sg(desc->sg, sg, desc->sg_len, i) {
  392. tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d",
  393. bdma_chan->id, i, desc->sg_len,
  394. (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
  395. if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
  396. tsi_err(ch_dev, "DMAC%d SG entry %d is too large",
  397. bdma_chan->id, i);
  398. err = -EINVAL;
  399. break;
  400. }
  401. /*
  402. * If this sg entry forms contiguous block with previous one,
  403. * try to merge it into existing DMA descriptor
  404. */
  405. if (next_addr == sg_dma_address(sg) &&
  406. bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
  407. /* Adjust byte count of the descriptor */
  408. bcount += sg_dma_len(sg);
  409. goto entry_done;
  410. } else if (next_addr != -1) {
  411. /* Finalize descriptor using total byte count value */
  412. tsi721_desc_fill_end(bd_ptr, bcount, 0);
  413. tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d",
  414. bdma_chan->id, bcount);
  415. }
  416. desc->rio_addr = rio_addr;
  417. if (i && idx == rd_idx) {
  418. tsi_debug(DMAV, ch_dev,
  419. "DMAC%d HW descriptor ring is full @ %d",
  420. bdma_chan->id, i);
  421. desc->sg = sg;
  422. desc->sg_len -= i;
  423. break;
  424. }
  425. bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
  426. err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
  427. if (err) {
  428. tsi_err(ch_dev, "Failed to build desc: err=%d", err);
  429. break;
  430. }
  431. tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx",
  432. bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr);
  433. next_addr = sg_dma_address(sg);
  434. bcount = sg_dma_len(sg);
  435. add_count++;
  436. if (++idx == bdma_chan->bd_num) {
  437. /* wrap around link descriptor */
  438. idx = 0;
  439. add_count++;
  440. }
  441. entry_done:
  442. if (sg_is_last(sg)) {
  443. tsi721_desc_fill_end(bd_ptr, bcount, 0);
  444. tsi_debug(DMAV, ch_dev,
  445. "DMAC%d last desc final len: %d",
  446. bdma_chan->id, bcount);
  447. desc->sg_len = 0;
  448. } else {
  449. rio_addr += sg_dma_len(sg);
  450. next_addr += sg_dma_len(sg);
  451. }
  452. }
  453. if (!err)
  454. bdma_chan->wr_count_next += add_count;
  455. return err;
  456. }
  457. static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
  458. struct tsi721_tx_desc *desc)
  459. {
  460. int err;
  461. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
  462. if (!tsi721_dma_is_idle(bdma_chan))
  463. return;
  464. /*
  465. * If there is no data transfer in progress, fetch new descriptor from
  466. * the pending queue.
  467. */
  468. if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) {
  469. desc = list_first_entry(&bdma_chan->queue,
  470. struct tsi721_tx_desc, desc_node);
  471. list_del_init((&desc->desc_node));
  472. bdma_chan->active_tx = desc;
  473. }
  474. if (desc) {
  475. err = tsi721_submit_sg(desc);
  476. if (!err)
  477. tsi721_start_dma(bdma_chan);
  478. else {
  479. tsi721_dma_tx_err(bdma_chan, desc);
  480. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  481. "DMAC%d ERR: tsi721_submit_sg failed with err=%d",
  482. bdma_chan->id, err);
  483. }
  484. }
  485. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit",
  486. bdma_chan->id);
  487. }
  488. static void tsi721_dma_tasklet(unsigned long data)
  489. {
  490. struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
  491. u32 dmac_int, dmac_sts;
  492. dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  493. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x",
  494. bdma_chan->id, dmac_int);
  495. /* Clear channel interrupts */
  496. iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
  497. if (dmac_int & TSI721_DMAC_INT_ERR) {
  498. int i = 10000;
  499. struct tsi721_tx_desc *desc;
  500. desc = bdma_chan->active_tx;
  501. dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
  502. tsi_err(&bdma_chan->dchan.dev->device,
  503. "DMAC%d_STS = 0x%x did=%d raddr=0x%llx",
  504. bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr);
  505. /* Re-initialize DMA channel if possible */
  506. if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0)
  507. goto err_out;
  508. tsi721_clr_stat(bdma_chan);
  509. spin_lock(&bdma_chan->lock);
  510. /* Put DMA channel into init state */
  511. iowrite32(TSI721_DMAC_CTL_INIT,
  512. bdma_chan->regs + TSI721_DMAC_CTL);
  513. do {
  514. udelay(1);
  515. dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
  516. i--;
  517. } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i);
  518. if (dmac_sts & TSI721_DMAC_STS_ABORT) {
  519. tsi_err(&bdma_chan->dchan.dev->device,
  520. "Failed to re-initiate DMAC%d", bdma_chan->id);
  521. spin_unlock(&bdma_chan->lock);
  522. goto err_out;
  523. }
  524. /* Setup DMA descriptor pointers */
  525. iowrite32(((u64)bdma_chan->bd_phys >> 32),
  526. bdma_chan->regs + TSI721_DMAC_DPTRH);
  527. iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK),
  528. bdma_chan->regs + TSI721_DMAC_DPTRL);
  529. /* Setup descriptor status FIFO */
  530. iowrite32(((u64)bdma_chan->sts_phys >> 32),
  531. bdma_chan->regs + TSI721_DMAC_DSBH);
  532. iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK),
  533. bdma_chan->regs + TSI721_DMAC_DSBL);
  534. iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size),
  535. bdma_chan->regs + TSI721_DMAC_DSSZ);
  536. /* Clear interrupt bits */
  537. iowrite32(TSI721_DMAC_INT_ALL,
  538. bdma_chan->regs + TSI721_DMAC_INT);
  539. ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  540. bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
  541. bdma_chan->sts_rdptr = 0;
  542. udelay(10);
  543. desc = bdma_chan->active_tx;
  544. desc->status = DMA_ERROR;
  545. dma_cookie_complete(&desc->txd);
  546. list_add(&desc->desc_node, &bdma_chan->free_list);
  547. bdma_chan->active_tx = NULL;
  548. if (bdma_chan->active)
  549. tsi721_advance_work(bdma_chan, NULL);
  550. spin_unlock(&bdma_chan->lock);
  551. }
  552. if (dmac_int & TSI721_DMAC_INT_STFULL) {
  553. tsi_err(&bdma_chan->dchan.dev->device,
  554. "DMAC%d descriptor status FIFO is full",
  555. bdma_chan->id);
  556. }
  557. if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
  558. struct tsi721_tx_desc *desc;
  559. tsi721_clr_stat(bdma_chan);
  560. spin_lock(&bdma_chan->lock);
  561. desc = bdma_chan->active_tx;
  562. if (desc->sg_len == 0) {
  563. dma_async_tx_callback callback = NULL;
  564. void *param = NULL;
  565. desc->status = DMA_COMPLETE;
  566. dma_cookie_complete(&desc->txd);
  567. if (desc->txd.flags & DMA_PREP_INTERRUPT) {
  568. callback = desc->txd.callback;
  569. param = desc->txd.callback_param;
  570. }
  571. list_add(&desc->desc_node, &bdma_chan->free_list);
  572. bdma_chan->active_tx = NULL;
  573. if (bdma_chan->active)
  574. tsi721_advance_work(bdma_chan, NULL);
  575. spin_unlock(&bdma_chan->lock);
  576. if (callback)
  577. callback(param);
  578. } else {
  579. if (bdma_chan->active)
  580. tsi721_advance_work(bdma_chan,
  581. bdma_chan->active_tx);
  582. spin_unlock(&bdma_chan->lock);
  583. }
  584. }
  585. err_out:
  586. /* Re-Enable BDMA channel interrupts */
  587. iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
  588. }
  589. static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
  590. {
  591. struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
  592. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
  593. dma_cookie_t cookie;
  594. /* Check if the descriptor is detached from any lists */
  595. if (!list_empty(&desc->desc_node)) {
  596. tsi_err(&bdma_chan->dchan.dev->device,
  597. "DMAC%d wrong state of descriptor %p",
  598. bdma_chan->id, txd);
  599. return -EIO;
  600. }
  601. spin_lock_bh(&bdma_chan->lock);
  602. if (!bdma_chan->active) {
  603. spin_unlock_bh(&bdma_chan->lock);
  604. return -ENODEV;
  605. }
  606. cookie = dma_cookie_assign(txd);
  607. desc->status = DMA_IN_PROGRESS;
  608. list_add_tail(&desc->desc_node, &bdma_chan->queue);
  609. tsi721_advance_work(bdma_chan, NULL);
  610. spin_unlock_bh(&bdma_chan->lock);
  611. return cookie;
  612. }
  613. static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
  614. {
  615. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  616. struct tsi721_tx_desc *desc;
  617. int i;
  618. tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
  619. if (bdma_chan->bd_base)
  620. return dma_txqueue_sz;
  621. /* Initialize BDMA channel */
  622. if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
  623. tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d",
  624. bdma_chan->id);
  625. return -ENODEV;
  626. }
  627. /* Allocate queue of transaction descriptors */
  628. desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
  629. GFP_ATOMIC);
  630. if (!desc) {
  631. tsi721_bdma_ch_free(bdma_chan);
  632. return -ENOMEM;
  633. }
  634. bdma_chan->tx_desc = desc;
  635. for (i = 0; i < dma_txqueue_sz; i++) {
  636. dma_async_tx_descriptor_init(&desc[i].txd, dchan);
  637. desc[i].txd.tx_submit = tsi721_tx_submit;
  638. desc[i].txd.flags = DMA_CTRL_ACK;
  639. list_add(&desc[i].desc_node, &bdma_chan->free_list);
  640. }
  641. dma_cookie_init(dchan);
  642. bdma_chan->active = true;
  643. tsi721_bdma_interrupt_enable(bdma_chan, 1);
  644. return dma_txqueue_sz;
  645. }
  646. static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
  647. {
  648. struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
  649. #ifdef CONFIG_PCI_MSI
  650. if (priv->flags & TSI721_USING_MSIX) {
  651. synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
  652. bdma_chan->id].vector);
  653. synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
  654. bdma_chan->id].vector);
  655. } else
  656. #endif
  657. synchronize_irq(priv->pdev->irq);
  658. }
  659. static void tsi721_free_chan_resources(struct dma_chan *dchan)
  660. {
  661. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  662. tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
  663. if (!bdma_chan->bd_base)
  664. return;
  665. tsi721_bdma_interrupt_enable(bdma_chan, 0);
  666. bdma_chan->active = false;
  667. tsi721_sync_dma_irq(bdma_chan);
  668. tasklet_kill(&bdma_chan->tasklet);
  669. INIT_LIST_HEAD(&bdma_chan->free_list);
  670. kfree(bdma_chan->tx_desc);
  671. tsi721_bdma_ch_free(bdma_chan);
  672. }
  673. static
  674. enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
  675. struct dma_tx_state *txstate)
  676. {
  677. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  678. enum dma_status status;
  679. spin_lock_bh(&bdma_chan->lock);
  680. status = dma_cookie_status(dchan, cookie, txstate);
  681. spin_unlock_bh(&bdma_chan->lock);
  682. return status;
  683. }
  684. static void tsi721_issue_pending(struct dma_chan *dchan)
  685. {
  686. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  687. tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
  688. spin_lock_bh(&bdma_chan->lock);
  689. if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
  690. tsi721_advance_work(bdma_chan, NULL);
  691. }
  692. spin_unlock_bh(&bdma_chan->lock);
  693. }
  694. static
  695. struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
  696. struct scatterlist *sgl, unsigned int sg_len,
  697. enum dma_transfer_direction dir, unsigned long flags,
  698. void *tinfo)
  699. {
  700. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  701. struct tsi721_tx_desc *desc;
  702. struct rio_dma_ext *rext = tinfo;
  703. enum dma_rtype rtype;
  704. struct dma_async_tx_descriptor *txd = NULL;
  705. if (!sgl || !sg_len) {
  706. tsi_err(&dchan->dev->device, "DMAC%d No SG list",
  707. bdma_chan->id);
  708. return ERR_PTR(-EINVAL);
  709. }
  710. tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id,
  711. (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
  712. if (dir == DMA_DEV_TO_MEM)
  713. rtype = NREAD;
  714. else if (dir == DMA_MEM_TO_DEV) {
  715. switch (rext->wr_type) {
  716. case RDW_ALL_NWRITE:
  717. rtype = ALL_NWRITE;
  718. break;
  719. case RDW_ALL_NWRITE_R:
  720. rtype = ALL_NWRITE_R;
  721. break;
  722. case RDW_LAST_NWRITE_R:
  723. default:
  724. rtype = LAST_NWRITE_R;
  725. break;
  726. }
  727. } else {
  728. tsi_err(&dchan->dev->device,
  729. "DMAC%d Unsupported DMA direction option",
  730. bdma_chan->id);
  731. return ERR_PTR(-EINVAL);
  732. }
  733. spin_lock_bh(&bdma_chan->lock);
  734. if (!list_empty(&bdma_chan->free_list)) {
  735. desc = list_first_entry(&bdma_chan->free_list,
  736. struct tsi721_tx_desc, desc_node);
  737. list_del_init(&desc->desc_node);
  738. desc->destid = rext->destid;
  739. desc->rio_addr = rext->rio_addr;
  740. desc->rio_addr_u = 0;
  741. desc->rtype = rtype;
  742. desc->sg_len = sg_len;
  743. desc->sg = sgl;
  744. txd = &desc->txd;
  745. txd->flags = flags;
  746. }
  747. spin_unlock_bh(&bdma_chan->lock);
  748. if (!txd) {
  749. tsi_debug(DMA, &dchan->dev->device,
  750. "DMAC%d free TXD is not available", bdma_chan->id);
  751. return ERR_PTR(-EBUSY);
  752. }
  753. return txd;
  754. }
  755. static int tsi721_terminate_all(struct dma_chan *dchan)
  756. {
  757. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  758. struct tsi721_tx_desc *desc, *_d;
  759. LIST_HEAD(list);
  760. tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
  761. spin_lock_bh(&bdma_chan->lock);
  762. bdma_chan->active = false;
  763. while (!tsi721_dma_is_idle(bdma_chan)) {
  764. udelay(5);
  765. #if (0)
  766. /* make sure to stop the transfer */
  767. iowrite32(TSI721_DMAC_CTL_SUSP,
  768. bdma_chan->regs + TSI721_DMAC_CTL);
  769. /* Wait until DMA channel stops */
  770. do {
  771. dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  772. } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
  773. #endif
  774. }
  775. if (bdma_chan->active_tx)
  776. list_add(&bdma_chan->active_tx->desc_node, &list);
  777. list_splice_init(&bdma_chan->queue, &list);
  778. list_for_each_entry_safe(desc, _d, &list, desc_node)
  779. tsi721_dma_tx_err(bdma_chan, desc);
  780. spin_unlock_bh(&bdma_chan->lock);
  781. return 0;
  782. }
  783. static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan)
  784. {
  785. if (!bdma_chan->active)
  786. return;
  787. spin_lock_bh(&bdma_chan->lock);
  788. if (!tsi721_dma_is_idle(bdma_chan)) {
  789. int timeout = 100000;
  790. /* stop the transfer in progress */
  791. iowrite32(TSI721_DMAC_CTL_SUSP,
  792. bdma_chan->regs + TSI721_DMAC_CTL);
  793. /* Wait until DMA channel stops */
  794. while (!tsi721_dma_is_idle(bdma_chan) && --timeout)
  795. udelay(1);
  796. }
  797. spin_unlock_bh(&bdma_chan->lock);
  798. }
  799. void tsi721_dma_stop_all(struct tsi721_device *priv)
  800. {
  801. int i;
  802. for (i = 0; i < TSI721_DMA_MAXCH; i++) {
  803. if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i)))
  804. tsi721_dma_stop(&priv->bdma[i]);
  805. }
  806. }
  807. int tsi721_register_dma(struct tsi721_device *priv)
  808. {
  809. int i;
  810. int nr_channels = 0;
  811. int err;
  812. struct rio_mport *mport = &priv->mport;
  813. INIT_LIST_HEAD(&mport->dma.channels);
  814. for (i = 0; i < TSI721_DMA_MAXCH; i++) {
  815. struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
  816. if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0)
  817. continue;
  818. bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
  819. bdma_chan->dchan.device = &mport->dma;
  820. bdma_chan->dchan.cookie = 1;
  821. bdma_chan->dchan.chan_id = i;
  822. bdma_chan->id = i;
  823. bdma_chan->active = false;
  824. spin_lock_init(&bdma_chan->lock);
  825. bdma_chan->active_tx = NULL;
  826. INIT_LIST_HEAD(&bdma_chan->queue);
  827. INIT_LIST_HEAD(&bdma_chan->free_list);
  828. tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
  829. (unsigned long)bdma_chan);
  830. list_add_tail(&bdma_chan->dchan.device_node,
  831. &mport->dma.channels);
  832. nr_channels++;
  833. }
  834. mport->dma.chancnt = nr_channels;
  835. dma_cap_zero(mport->dma.cap_mask);
  836. dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
  837. dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
  838. mport->dma.dev = &priv->pdev->dev;
  839. mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
  840. mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
  841. mport->dma.device_tx_status = tsi721_tx_status;
  842. mport->dma.device_issue_pending = tsi721_issue_pending;
  843. mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
  844. mport->dma.device_terminate_all = tsi721_terminate_all;
  845. err = dma_async_device_register(&mport->dma);
  846. if (err)
  847. tsi_err(&priv->pdev->dev, "Failed to register DMA device");
  848. return err;
  849. }
  850. void tsi721_unregister_dma(struct tsi721_device *priv)
  851. {
  852. struct rio_mport *mport = &priv->mport;
  853. struct dma_chan *chan, *_c;
  854. struct tsi721_bdma_chan *bdma_chan;
  855. tsi721_dma_stop_all(priv);
  856. dma_async_device_unregister(&mport->dma);
  857. list_for_each_entry_safe(chan, _c, &mport->dma.channels,
  858. device_node) {
  859. bdma_chan = to_tsi721_chan(chan);
  860. if (bdma_chan->active) {
  861. tsi721_bdma_interrupt_enable(bdma_chan, 0);
  862. bdma_chan->active = false;
  863. tsi721_sync_dma_irq(bdma_chan);
  864. tasklet_kill(&bdma_chan->tasklet);
  865. INIT_LIST_HEAD(&bdma_chan->free_list);
  866. kfree(bdma_chan->tx_desc);
  867. tsi721_bdma_ch_free(bdma_chan);
  868. }
  869. list_del(&chan->device_node);
  870. }
  871. }