txx9dmac.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367
  1. /*
  2. * Driver for the TXx9 SoC DMA Controller
  3. *
  4. * Copyright (C) 2009 Atsushi Nemoto
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/dma-mapping.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/module.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/slab.h>
  17. #include <linux/scatterlist.h>
  18. #include "txx9dmac.h"
  19. static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
  20. {
  21. return container_of(chan, struct txx9dmac_chan, chan);
  22. }
  23. static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc)
  24. {
  25. return dc->ch_regs;
  26. }
  27. static struct txx9dmac_cregs32 __iomem *__dma_regs32(
  28. const struct txx9dmac_chan *dc)
  29. {
  30. return dc->ch_regs;
  31. }
  32. #define channel64_readq(dc, name) \
  33. __raw_readq(&(__dma_regs(dc)->name))
  34. #define channel64_writeq(dc, name, val) \
  35. __raw_writeq((val), &(__dma_regs(dc)->name))
  36. #define channel64_readl(dc, name) \
  37. __raw_readl(&(__dma_regs(dc)->name))
  38. #define channel64_writel(dc, name, val) \
  39. __raw_writel((val), &(__dma_regs(dc)->name))
  40. #define channel32_readl(dc, name) \
  41. __raw_readl(&(__dma_regs32(dc)->name))
  42. #define channel32_writel(dc, name, val) \
  43. __raw_writel((val), &(__dma_regs32(dc)->name))
  44. #define channel_readq(dc, name) channel64_readq(dc, name)
  45. #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
  46. #define channel_readl(dc, name) \
  47. (is_dmac64(dc) ? \
  48. channel64_readl(dc, name) : channel32_readl(dc, name))
  49. #define channel_writel(dc, name, val) \
  50. (is_dmac64(dc) ? \
  51. channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
  52. static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc)
  53. {
  54. if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
  55. return channel64_readq(dc, CHAR);
  56. else
  57. return channel64_readl(dc, CHAR);
  58. }
  59. static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
  60. {
  61. if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
  62. channel64_writeq(dc, CHAR, val);
  63. else
  64. channel64_writel(dc, CHAR, val);
  65. }
  66. static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
  67. {
  68. #if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
  69. channel64_writel(dc, CHAR, 0);
  70. channel64_writel(dc, __pad_CHAR, 0);
  71. #else
  72. channel64_writeq(dc, CHAR, 0);
  73. #endif
  74. }
  75. static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc)
  76. {
  77. if (is_dmac64(dc))
  78. return channel64_read_CHAR(dc);
  79. else
  80. return channel32_readl(dc, CHAR);
  81. }
  82. static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
  83. {
  84. if (is_dmac64(dc))
  85. channel64_write_CHAR(dc, val);
  86. else
  87. channel32_writel(dc, CHAR, val);
  88. }
  89. static struct txx9dmac_regs __iomem *__txx9dmac_regs(
  90. const struct txx9dmac_dev *ddev)
  91. {
  92. return ddev->regs;
  93. }
  94. static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32(
  95. const struct txx9dmac_dev *ddev)
  96. {
  97. return ddev->regs;
  98. }
  99. #define dma64_readl(ddev, name) \
  100. __raw_readl(&(__txx9dmac_regs(ddev)->name))
  101. #define dma64_writel(ddev, name, val) \
  102. __raw_writel((val), &(__txx9dmac_regs(ddev)->name))
  103. #define dma32_readl(ddev, name) \
  104. __raw_readl(&(__txx9dmac_regs32(ddev)->name))
  105. #define dma32_writel(ddev, name, val) \
  106. __raw_writel((val), &(__txx9dmac_regs32(ddev)->name))
  107. #define dma_readl(ddev, name) \
  108. (__is_dmac64(ddev) ? \
  109. dma64_readl(ddev, name) : dma32_readl(ddev, name))
  110. #define dma_writel(ddev, name, val) \
  111. (__is_dmac64(ddev) ? \
  112. dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val))
  113. static struct device *chan2dev(struct dma_chan *chan)
  114. {
  115. return &chan->dev->device;
  116. }
  117. static struct device *chan2parent(struct dma_chan *chan)
  118. {
  119. return chan->dev->device.parent;
  120. }
  121. static struct txx9dmac_desc *
  122. txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd)
  123. {
  124. return container_of(txd, struct txx9dmac_desc, txd);
  125. }
  126. static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc,
  127. const struct txx9dmac_desc *desc)
  128. {
  129. return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR;
  130. }
  131. static void desc_write_CHAR(const struct txx9dmac_chan *dc,
  132. struct txx9dmac_desc *desc, dma_addr_t val)
  133. {
  134. if (is_dmac64(dc))
  135. desc->hwdesc.CHAR = val;
  136. else
  137. desc->hwdesc32.CHAR = val;
  138. }
  139. #define TXX9_DMA_MAX_COUNT 0x04000000
  140. #define TXX9_DMA_INITIAL_DESC_COUNT 64
  141. static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc)
  142. {
  143. return list_entry(dc->active_list.next,
  144. struct txx9dmac_desc, desc_node);
  145. }
  146. static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc)
  147. {
  148. return list_entry(dc->active_list.prev,
  149. struct txx9dmac_desc, desc_node);
  150. }
  151. static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc)
  152. {
  153. return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node);
  154. }
  155. static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc)
  156. {
  157. if (!list_empty(&desc->tx_list))
  158. desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node);
  159. return desc;
  160. }
  161. static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx);
  162. static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc,
  163. gfp_t flags)
  164. {
  165. struct txx9dmac_dev *ddev = dc->ddev;
  166. struct txx9dmac_desc *desc;
  167. desc = kzalloc(sizeof(*desc), flags);
  168. if (!desc)
  169. return NULL;
  170. INIT_LIST_HEAD(&desc->tx_list);
  171. dma_async_tx_descriptor_init(&desc->txd, &dc->chan);
  172. desc->txd.tx_submit = txx9dmac_tx_submit;
  173. /* txd.flags will be overwritten in prep funcs */
  174. desc->txd.flags = DMA_CTRL_ACK;
  175. desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc,
  176. ddev->descsize, DMA_TO_DEVICE);
  177. return desc;
  178. }
  179. static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc)
  180. {
  181. struct txx9dmac_desc *desc, *_desc;
  182. struct txx9dmac_desc *ret = NULL;
  183. unsigned int i = 0;
  184. spin_lock_bh(&dc->lock);
  185. list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) {
  186. if (async_tx_test_ack(&desc->txd)) {
  187. list_del(&desc->desc_node);
  188. ret = desc;
  189. break;
  190. }
  191. dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc);
  192. i++;
  193. }
  194. spin_unlock_bh(&dc->lock);
  195. dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n",
  196. i);
  197. if (!ret) {
  198. ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC);
  199. if (ret) {
  200. spin_lock_bh(&dc->lock);
  201. dc->descs_allocated++;
  202. spin_unlock_bh(&dc->lock);
  203. } else
  204. dev_err(chan2dev(&dc->chan),
  205. "not enough descriptors available\n");
  206. }
  207. return ret;
  208. }
  209. static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc,
  210. struct txx9dmac_desc *desc)
  211. {
  212. struct txx9dmac_dev *ddev = dc->ddev;
  213. struct txx9dmac_desc *child;
  214. list_for_each_entry(child, &desc->tx_list, desc_node)
  215. dma_sync_single_for_cpu(chan2parent(&dc->chan),
  216. child->txd.phys, ddev->descsize,
  217. DMA_TO_DEVICE);
  218. dma_sync_single_for_cpu(chan2parent(&dc->chan),
  219. desc->txd.phys, ddev->descsize,
  220. DMA_TO_DEVICE);
  221. }
  222. /*
  223. * Move a descriptor, including any children, to the free list.
  224. * `desc' must not be on any lists.
  225. */
  226. static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
  227. struct txx9dmac_desc *desc)
  228. {
  229. if (desc) {
  230. struct txx9dmac_desc *child;
  231. txx9dmac_sync_desc_for_cpu(dc, desc);
  232. spin_lock_bh(&dc->lock);
  233. list_for_each_entry(child, &desc->tx_list, desc_node)
  234. dev_vdbg(chan2dev(&dc->chan),
  235. "moving child desc %p to freelist\n",
  236. child);
  237. list_splice_init(&desc->tx_list, &dc->free_list);
  238. dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n",
  239. desc);
  240. list_add(&desc->desc_node, &dc->free_list);
  241. spin_unlock_bh(&dc->lock);
  242. }
  243. }
  244. /* Called with dc->lock held and bh disabled */
  245. static dma_cookie_t
  246. txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc)
  247. {
  248. dma_cookie_t cookie = dc->chan.cookie;
  249. if (++cookie < 0)
  250. cookie = 1;
  251. dc->chan.cookie = cookie;
  252. desc->txd.cookie = cookie;
  253. return cookie;
  254. }
  255. /*----------------------------------------------------------------------*/
  256. static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
  257. {
  258. if (is_dmac64(dc))
  259. dev_err(chan2dev(&dc->chan),
  260. " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x"
  261. " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
  262. (u64)channel64_read_CHAR(dc),
  263. channel64_readq(dc, SAR),
  264. channel64_readq(dc, DAR),
  265. channel64_readl(dc, CNTR),
  266. channel64_readl(dc, SAIR),
  267. channel64_readl(dc, DAIR),
  268. channel64_readl(dc, CCR),
  269. channel64_readl(dc, CSR));
  270. else
  271. dev_err(chan2dev(&dc->chan),
  272. " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x"
  273. " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
  274. channel32_readl(dc, CHAR),
  275. channel32_readl(dc, SAR),
  276. channel32_readl(dc, DAR),
  277. channel32_readl(dc, CNTR),
  278. channel32_readl(dc, SAIR),
  279. channel32_readl(dc, DAIR),
  280. channel32_readl(dc, CCR),
  281. channel32_readl(dc, CSR));
  282. }
  283. static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
  284. {
  285. channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST);
  286. if (is_dmac64(dc)) {
  287. channel64_clear_CHAR(dc);
  288. channel_writeq(dc, SAR, 0);
  289. channel_writeq(dc, DAR, 0);
  290. } else {
  291. channel_writel(dc, CHAR, 0);
  292. channel_writel(dc, SAR, 0);
  293. channel_writel(dc, DAR, 0);
  294. }
  295. channel_writel(dc, CNTR, 0);
  296. channel_writel(dc, SAIR, 0);
  297. channel_writel(dc, DAIR, 0);
  298. channel_writel(dc, CCR, 0);
  299. mmiowb();
  300. }
  301. /* Called with dc->lock held and bh disabled */
  302. static void txx9dmac_dostart(struct txx9dmac_chan *dc,
  303. struct txx9dmac_desc *first)
  304. {
  305. struct txx9dmac_slave *ds = dc->chan.private;
  306. u32 sai, dai;
  307. dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n",
  308. first->txd.cookie, first);
  309. /* ASSERT: channel is idle */
  310. if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
  311. dev_err(chan2dev(&dc->chan),
  312. "BUG: Attempted to start non-idle channel\n");
  313. txx9dmac_dump_regs(dc);
  314. /* The tasklet will hopefully advance the queue... */
  315. return;
  316. }
  317. if (is_dmac64(dc)) {
  318. channel64_writel(dc, CNTR, 0);
  319. channel64_writel(dc, CSR, 0xffffffff);
  320. if (ds) {
  321. if (ds->tx_reg) {
  322. sai = ds->reg_width;
  323. dai = 0;
  324. } else {
  325. sai = 0;
  326. dai = ds->reg_width;
  327. }
  328. } else {
  329. sai = 8;
  330. dai = 8;
  331. }
  332. channel64_writel(dc, SAIR, sai);
  333. channel64_writel(dc, DAIR, dai);
  334. /* All 64-bit DMAC supports SMPCHN */
  335. channel64_writel(dc, CCR, dc->ccr);
  336. /* Writing a non zero value to CHAR will assert XFACT */
  337. channel64_write_CHAR(dc, first->txd.phys);
  338. } else {
  339. channel32_writel(dc, CNTR, 0);
  340. channel32_writel(dc, CSR, 0xffffffff);
  341. if (ds) {
  342. if (ds->tx_reg) {
  343. sai = ds->reg_width;
  344. dai = 0;
  345. } else {
  346. sai = 0;
  347. dai = ds->reg_width;
  348. }
  349. } else {
  350. sai = 4;
  351. dai = 4;
  352. }
  353. channel32_writel(dc, SAIR, sai);
  354. channel32_writel(dc, DAIR, dai);
  355. if (txx9_dma_have_SMPCHN()) {
  356. channel32_writel(dc, CCR, dc->ccr);
  357. /* Writing a non zero value to CHAR will assert XFACT */
  358. channel32_writel(dc, CHAR, first->txd.phys);
  359. } else {
  360. channel32_writel(dc, CHAR, first->txd.phys);
  361. channel32_writel(dc, CCR, dc->ccr);
  362. }
  363. }
  364. }
  365. /*----------------------------------------------------------------------*/
  366. static void
  367. txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
  368. struct txx9dmac_desc *desc)
  369. {
  370. dma_async_tx_callback callback;
  371. void *param;
  372. struct dma_async_tx_descriptor *txd = &desc->txd;
  373. struct txx9dmac_slave *ds = dc->chan.private;
  374. dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
  375. txd->cookie, desc);
  376. dc->completed = txd->cookie;
  377. callback = txd->callback;
  378. param = txd->callback_param;
  379. txx9dmac_sync_desc_for_cpu(dc, desc);
  380. list_splice_init(&desc->tx_list, &dc->free_list);
  381. list_move(&desc->desc_node, &dc->free_list);
  382. if (!ds) {
  383. dma_addr_t dmaaddr;
  384. if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
  385. dmaaddr = is_dmac64(dc) ?
  386. desc->hwdesc.DAR : desc->hwdesc32.DAR;
  387. if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
  388. dma_unmap_single(chan2parent(&dc->chan),
  389. dmaaddr, desc->len, DMA_FROM_DEVICE);
  390. else
  391. dma_unmap_page(chan2parent(&dc->chan),
  392. dmaaddr, desc->len, DMA_FROM_DEVICE);
  393. }
  394. if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
  395. dmaaddr = is_dmac64(dc) ?
  396. desc->hwdesc.SAR : desc->hwdesc32.SAR;
  397. if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
  398. dma_unmap_single(chan2parent(&dc->chan),
  399. dmaaddr, desc->len, DMA_TO_DEVICE);
  400. else
  401. dma_unmap_page(chan2parent(&dc->chan),
  402. dmaaddr, desc->len, DMA_TO_DEVICE);
  403. }
  404. }
  405. /*
  406. * The API requires that no submissions are done from a
  407. * callback, so we don't need to drop the lock here
  408. */
  409. if (callback)
  410. callback(param);
  411. dma_run_dependencies(txd);
  412. }
  413. static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list)
  414. {
  415. struct txx9dmac_dev *ddev = dc->ddev;
  416. struct txx9dmac_desc *desc;
  417. struct txx9dmac_desc *prev = NULL;
  418. BUG_ON(!list_empty(list));
  419. do {
  420. desc = txx9dmac_first_queued(dc);
  421. if (prev) {
  422. desc_write_CHAR(dc, prev, desc->txd.phys);
  423. dma_sync_single_for_device(chan2parent(&dc->chan),
  424. prev->txd.phys, ddev->descsize,
  425. DMA_TO_DEVICE);
  426. }
  427. prev = txx9dmac_last_child(desc);
  428. list_move_tail(&desc->desc_node, list);
  429. /* Make chain-completion interrupt happen */
  430. if ((desc->txd.flags & DMA_PREP_INTERRUPT) &&
  431. !txx9dmac_chan_INTENT(dc))
  432. break;
  433. } while (!list_empty(&dc->queue));
  434. }
  435. static void txx9dmac_complete_all(struct txx9dmac_chan *dc)
  436. {
  437. struct txx9dmac_desc *desc, *_desc;
  438. LIST_HEAD(list);
  439. /*
  440. * Submit queued descriptors ASAP, i.e. before we go through
  441. * the completed ones.
  442. */
  443. list_splice_init(&dc->active_list, &list);
  444. if (!list_empty(&dc->queue)) {
  445. txx9dmac_dequeue(dc, &dc->active_list);
  446. txx9dmac_dostart(dc, txx9dmac_first_active(dc));
  447. }
  448. list_for_each_entry_safe(desc, _desc, &list, desc_node)
  449. txx9dmac_descriptor_complete(dc, desc);
  450. }
  451. static void txx9dmac_dump_desc(struct txx9dmac_chan *dc,
  452. struct txx9dmac_hwdesc *desc)
  453. {
  454. if (is_dmac64(dc)) {
  455. #ifdef TXX9_DMA_USE_SIMPLE_CHAIN
  456. dev_crit(chan2dev(&dc->chan),
  457. " desc: ch%#llx s%#llx d%#llx c%#x\n",
  458. (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR);
  459. #else
  460. dev_crit(chan2dev(&dc->chan),
  461. " desc: ch%#llx s%#llx d%#llx c%#x"
  462. " si%#x di%#x cc%#x cs%#x\n",
  463. (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR,
  464. desc->SAIR, desc->DAIR, desc->CCR, desc->CSR);
  465. #endif
  466. } else {
  467. struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc;
  468. #ifdef TXX9_DMA_USE_SIMPLE_CHAIN
  469. dev_crit(chan2dev(&dc->chan),
  470. " desc: ch%#x s%#x d%#x c%#x\n",
  471. d->CHAR, d->SAR, d->DAR, d->CNTR);
  472. #else
  473. dev_crit(chan2dev(&dc->chan),
  474. " desc: ch%#x s%#x d%#x c%#x"
  475. " si%#x di%#x cc%#x cs%#x\n",
  476. d->CHAR, d->SAR, d->DAR, d->CNTR,
  477. d->SAIR, d->DAIR, d->CCR, d->CSR);
  478. #endif
  479. }
  480. }
  481. static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr)
  482. {
  483. struct txx9dmac_desc *bad_desc;
  484. struct txx9dmac_desc *child;
  485. u32 errors;
  486. /*
  487. * The descriptor currently at the head of the active list is
  488. * borked. Since we don't have any way to report errors, we'll
  489. * just have to scream loudly and try to carry on.
  490. */
  491. dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n");
  492. txx9dmac_dump_regs(dc);
  493. bad_desc = txx9dmac_first_active(dc);
  494. list_del_init(&bad_desc->desc_node);
  495. /* Clear all error flags and try to restart the controller */
  496. errors = csr & (TXX9_DMA_CSR_ABCHC |
  497. TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR |
  498. TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR);
  499. channel_writel(dc, CSR, errors);
  500. if (list_empty(&dc->active_list) && !list_empty(&dc->queue))
  501. txx9dmac_dequeue(dc, &dc->active_list);
  502. if (!list_empty(&dc->active_list))
  503. txx9dmac_dostart(dc, txx9dmac_first_active(dc));
  504. dev_crit(chan2dev(&dc->chan),
  505. "Bad descriptor submitted for DMA! (cookie: %d)\n",
  506. bad_desc->txd.cookie);
  507. txx9dmac_dump_desc(dc, &bad_desc->hwdesc);
  508. list_for_each_entry(child, &bad_desc->tx_list, desc_node)
  509. txx9dmac_dump_desc(dc, &child->hwdesc);
  510. /* Pretend the descriptor completed successfully */
  511. txx9dmac_descriptor_complete(dc, bad_desc);
  512. }
  513. static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc)
  514. {
  515. dma_addr_t chain;
  516. struct txx9dmac_desc *desc, *_desc;
  517. struct txx9dmac_desc *child;
  518. u32 csr;
  519. if (is_dmac64(dc)) {
  520. chain = channel64_read_CHAR(dc);
  521. csr = channel64_readl(dc, CSR);
  522. channel64_writel(dc, CSR, csr);
  523. } else {
  524. chain = channel32_readl(dc, CHAR);
  525. csr = channel32_readl(dc, CSR);
  526. channel32_writel(dc, CSR, csr);
  527. }
  528. /* For dynamic chain, we should look at XFACT instead of NCHNC */
  529. if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) {
  530. /* Everything we've submitted is done */
  531. txx9dmac_complete_all(dc);
  532. return;
  533. }
  534. if (!(csr & TXX9_DMA_CSR_CHNEN))
  535. chain = 0; /* last descriptor of this chain */
  536. dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n",
  537. (u64)chain);
  538. list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) {
  539. if (desc_read_CHAR(dc, desc) == chain) {
  540. /* This one is currently in progress */
  541. if (csr & TXX9_DMA_CSR_ABCHC)
  542. goto scan_done;
  543. return;
  544. }
  545. list_for_each_entry(child, &desc->tx_list, desc_node)
  546. if (desc_read_CHAR(dc, child) == chain) {
  547. /* Currently in progress */
  548. if (csr & TXX9_DMA_CSR_ABCHC)
  549. goto scan_done;
  550. return;
  551. }
  552. /*
  553. * No descriptors so far seem to be in progress, i.e.
  554. * this one must be done.
  555. */
  556. txx9dmac_descriptor_complete(dc, desc);
  557. }
  558. scan_done:
  559. if (csr & TXX9_DMA_CSR_ABCHC) {
  560. txx9dmac_handle_error(dc, csr);
  561. return;
  562. }
  563. dev_err(chan2dev(&dc->chan),
  564. "BUG: All descriptors done, but channel not idle!\n");
  565. /* Try to continue after resetting the channel... */
  566. txx9dmac_reset_chan(dc);
  567. if (!list_empty(&dc->queue)) {
  568. txx9dmac_dequeue(dc, &dc->active_list);
  569. txx9dmac_dostart(dc, txx9dmac_first_active(dc));
  570. }
  571. }
  572. static void txx9dmac_chan_tasklet(unsigned long data)
  573. {
  574. int irq;
  575. u32 csr;
  576. struct txx9dmac_chan *dc;
  577. dc = (struct txx9dmac_chan *)data;
  578. csr = channel_readl(dc, CSR);
  579. dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
  580. spin_lock(&dc->lock);
  581. if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
  582. TXX9_DMA_CSR_NTRNFC))
  583. txx9dmac_scan_descriptors(dc);
  584. spin_unlock(&dc->lock);
  585. irq = dc->irq;
  586. enable_irq(irq);
  587. }
  588. static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id)
  589. {
  590. struct txx9dmac_chan *dc = dev_id;
  591. dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n",
  592. channel_readl(dc, CSR));
  593. tasklet_schedule(&dc->tasklet);
  594. /*
  595. * Just disable the interrupts. We'll turn them back on in the
  596. * softirq handler.
  597. */
  598. disable_irq_nosync(irq);
  599. return IRQ_HANDLED;
  600. }
  601. static void txx9dmac_tasklet(unsigned long data)
  602. {
  603. int irq;
  604. u32 csr;
  605. struct txx9dmac_chan *dc;
  606. struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data;
  607. u32 mcr;
  608. int i;
  609. mcr = dma_readl(ddev, MCR);
  610. dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr);
  611. for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) {
  612. if ((mcr >> (24 + i)) & 0x11) {
  613. dc = ddev->chan[i];
  614. csr = channel_readl(dc, CSR);
  615. dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n",
  616. csr);
  617. spin_lock(&dc->lock);
  618. if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
  619. TXX9_DMA_CSR_NTRNFC))
  620. txx9dmac_scan_descriptors(dc);
  621. spin_unlock(&dc->lock);
  622. }
  623. }
  624. irq = ddev->irq;
  625. enable_irq(irq);
  626. }
  627. static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id)
  628. {
  629. struct txx9dmac_dev *ddev = dev_id;
  630. dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n",
  631. dma_readl(ddev, MCR));
  632. tasklet_schedule(&ddev->tasklet);
  633. /*
  634. * Just disable the interrupts. We'll turn them back on in the
  635. * softirq handler.
  636. */
  637. disable_irq_nosync(irq);
  638. return IRQ_HANDLED;
  639. }
  640. /*----------------------------------------------------------------------*/
  641. static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
  642. {
  643. struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx);
  644. struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan);
  645. dma_cookie_t cookie;
  646. spin_lock_bh(&dc->lock);
  647. cookie = txx9dmac_assign_cookie(dc, desc);
  648. dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
  649. desc->txd.cookie, desc);
  650. list_add_tail(&desc->desc_node, &dc->queue);
  651. spin_unlock_bh(&dc->lock);
  652. return cookie;
  653. }
  654. static struct dma_async_tx_descriptor *
  655. txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  656. size_t len, unsigned long flags)
  657. {
  658. struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
  659. struct txx9dmac_dev *ddev = dc->ddev;
  660. struct txx9dmac_desc *desc;
  661. struct txx9dmac_desc *first;
  662. struct txx9dmac_desc *prev;
  663. size_t xfer_count;
  664. size_t offset;
  665. dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n",
  666. (u64)dest, (u64)src, len, flags);
  667. if (unlikely(!len)) {
  668. dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
  669. return NULL;
  670. }
  671. prev = first = NULL;
  672. for (offset = 0; offset < len; offset += xfer_count) {
  673. xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT);
  674. /*
  675. * Workaround for ERT-TX49H2-033, ERT-TX49H3-020,
  676. * ERT-TX49H4-016 (slightly conservative)
  677. */
  678. if (__is_dmac64(ddev)) {
  679. if (xfer_count > 0x100 &&
  680. (xfer_count & 0xff) >= 0xfa &&
  681. (xfer_count & 0xff) <= 0xff)
  682. xfer_count -= 0x20;
  683. } else {
  684. if (xfer_count > 0x80 &&
  685. (xfer_count & 0x7f) >= 0x7e &&
  686. (xfer_count & 0x7f) <= 0x7f)
  687. xfer_count -= 0x20;
  688. }
  689. desc = txx9dmac_desc_get(dc);
  690. if (!desc) {
  691. txx9dmac_desc_put(dc, first);
  692. return NULL;
  693. }
  694. if (__is_dmac64(ddev)) {
  695. desc->hwdesc.SAR = src + offset;
  696. desc->hwdesc.DAR = dest + offset;
  697. desc->hwdesc.CNTR = xfer_count;
  698. txx9dmac_desc_set_nosimple(ddev, desc, 8, 8,
  699. dc->ccr | TXX9_DMA_CCR_XFACT);
  700. } else {
  701. desc->hwdesc32.SAR = src + offset;
  702. desc->hwdesc32.DAR = dest + offset;
  703. desc->hwdesc32.CNTR = xfer_count;
  704. txx9dmac_desc_set_nosimple(ddev, desc, 4, 4,
  705. dc->ccr | TXX9_DMA_CCR_XFACT);
  706. }
  707. /*
  708. * The descriptors on tx_list are not reachable from
  709. * the dc->queue list or dc->active_list after a
  710. * submit. If we put all descriptors on active_list,
  711. * calling of callback on the completion will be more
  712. * complex.
  713. */
  714. if (!first) {
  715. first = desc;
  716. } else {
  717. desc_write_CHAR(dc, prev, desc->txd.phys);
  718. dma_sync_single_for_device(chan2parent(&dc->chan),
  719. prev->txd.phys, ddev->descsize,
  720. DMA_TO_DEVICE);
  721. list_add_tail(&desc->desc_node, &first->tx_list);
  722. }
  723. prev = desc;
  724. }
  725. /* Trigger interrupt after last block */
  726. if (flags & DMA_PREP_INTERRUPT)
  727. txx9dmac_desc_set_INTENT(ddev, prev);
  728. desc_write_CHAR(dc, prev, 0);
  729. dma_sync_single_for_device(chan2parent(&dc->chan),
  730. prev->txd.phys, ddev->descsize,
  731. DMA_TO_DEVICE);
  732. first->txd.flags = flags;
  733. first->len = len;
  734. return &first->txd;
  735. }
  736. static struct dma_async_tx_descriptor *
  737. txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  738. unsigned int sg_len, enum dma_data_direction direction,
  739. unsigned long flags)
  740. {
  741. struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
  742. struct txx9dmac_dev *ddev = dc->ddev;
  743. struct txx9dmac_slave *ds = chan->private;
  744. struct txx9dmac_desc *prev;
  745. struct txx9dmac_desc *first;
  746. unsigned int i;
  747. struct scatterlist *sg;
  748. dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
  749. BUG_ON(!ds || !ds->reg_width);
  750. if (ds->tx_reg)
  751. BUG_ON(direction != DMA_TO_DEVICE);
  752. else
  753. BUG_ON(direction != DMA_FROM_DEVICE);
  754. if (unlikely(!sg_len))
  755. return NULL;
  756. prev = first = NULL;
  757. for_each_sg(sgl, sg, sg_len, i) {
  758. struct txx9dmac_desc *desc;
  759. dma_addr_t mem;
  760. u32 sai, dai;
  761. desc = txx9dmac_desc_get(dc);
  762. if (!desc) {
  763. txx9dmac_desc_put(dc, first);
  764. return NULL;
  765. }
  766. mem = sg_dma_address(sg);
  767. if (__is_dmac64(ddev)) {
  768. if (direction == DMA_TO_DEVICE) {
  769. desc->hwdesc.SAR = mem;
  770. desc->hwdesc.DAR = ds->tx_reg;
  771. } else {
  772. desc->hwdesc.SAR = ds->rx_reg;
  773. desc->hwdesc.DAR = mem;
  774. }
  775. desc->hwdesc.CNTR = sg_dma_len(sg);
  776. } else {
  777. if (direction == DMA_TO_DEVICE) {
  778. desc->hwdesc32.SAR = mem;
  779. desc->hwdesc32.DAR = ds->tx_reg;
  780. } else {
  781. desc->hwdesc32.SAR = ds->rx_reg;
  782. desc->hwdesc32.DAR = mem;
  783. }
  784. desc->hwdesc32.CNTR = sg_dma_len(sg);
  785. }
  786. if (direction == DMA_TO_DEVICE) {
  787. sai = ds->reg_width;
  788. dai = 0;
  789. } else {
  790. sai = 0;
  791. dai = ds->reg_width;
  792. }
  793. txx9dmac_desc_set_nosimple(ddev, desc, sai, dai,
  794. dc->ccr | TXX9_DMA_CCR_XFACT);
  795. if (!first) {
  796. first = desc;
  797. } else {
  798. desc_write_CHAR(dc, prev, desc->txd.phys);
  799. dma_sync_single_for_device(chan2parent(&dc->chan),
  800. prev->txd.phys,
  801. ddev->descsize,
  802. DMA_TO_DEVICE);
  803. list_add_tail(&desc->desc_node, &first->tx_list);
  804. }
  805. prev = desc;
  806. }
  807. /* Trigger interrupt after last block */
  808. if (flags & DMA_PREP_INTERRUPT)
  809. txx9dmac_desc_set_INTENT(ddev, prev);
  810. desc_write_CHAR(dc, prev, 0);
  811. dma_sync_single_for_device(chan2parent(&dc->chan),
  812. prev->txd.phys, ddev->descsize,
  813. DMA_TO_DEVICE);
  814. first->txd.flags = flags;
  815. first->len = 0;
  816. return &first->txd;
  817. }
  818. static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  819. unsigned long arg)
  820. {
  821. struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
  822. struct txx9dmac_desc *desc, *_desc;
  823. LIST_HEAD(list);
  824. /* Only supports DMA_TERMINATE_ALL */
  825. if (cmd != DMA_TERMINATE_ALL)
  826. return -EINVAL;
  827. dev_vdbg(chan2dev(chan), "terminate_all\n");
  828. spin_lock_bh(&dc->lock);
  829. txx9dmac_reset_chan(dc);
  830. /* active_list entries will end up before queued entries */
  831. list_splice_init(&dc->queue, &list);
  832. list_splice_init(&dc->active_list, &list);
  833. spin_unlock_bh(&dc->lock);
  834. /* Flush all pending and queued descriptors */
  835. list_for_each_entry_safe(desc, _desc, &list, desc_node)
  836. txx9dmac_descriptor_complete(dc, desc);
  837. return 0;
  838. }
  839. static enum dma_status
  840. txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
  841. struct dma_tx_state *txstate)
  842. {
  843. struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
  844. dma_cookie_t last_used;
  845. dma_cookie_t last_complete;
  846. int ret;
  847. last_complete = dc->completed;
  848. last_used = chan->cookie;
  849. ret = dma_async_is_complete(cookie, last_complete, last_used);
  850. if (ret != DMA_SUCCESS) {
  851. spin_lock_bh(&dc->lock);
  852. txx9dmac_scan_descriptors(dc);
  853. spin_unlock_bh(&dc->lock);
  854. last_complete = dc->completed;
  855. last_used = chan->cookie;
  856. ret = dma_async_is_complete(cookie, last_complete, last_used);
  857. }
  858. dma_set_tx_state(txstate, last_complete, last_used, 0);
  859. return ret;
  860. }
  861. static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
  862. struct txx9dmac_desc *prev)
  863. {
  864. struct txx9dmac_dev *ddev = dc->ddev;
  865. struct txx9dmac_desc *desc;
  866. LIST_HEAD(list);
  867. prev = txx9dmac_last_child(prev);
  868. txx9dmac_dequeue(dc, &list);
  869. desc = list_entry(list.next, struct txx9dmac_desc, desc_node);
  870. desc_write_CHAR(dc, prev, desc->txd.phys);
  871. dma_sync_single_for_device(chan2parent(&dc->chan),
  872. prev->txd.phys, ddev->descsize,
  873. DMA_TO_DEVICE);
  874. mmiowb();
  875. if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
  876. channel_read_CHAR(dc) == prev->txd.phys)
  877. /* Restart chain DMA */
  878. channel_write_CHAR(dc, desc->txd.phys);
  879. list_splice_tail(&list, &dc->active_list);
  880. }
  881. static void txx9dmac_issue_pending(struct dma_chan *chan)
  882. {
  883. struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
  884. spin_lock_bh(&dc->lock);
  885. if (!list_empty(&dc->active_list))
  886. txx9dmac_scan_descriptors(dc);
  887. if (!list_empty(&dc->queue)) {
  888. if (list_empty(&dc->active_list)) {
  889. txx9dmac_dequeue(dc, &dc->active_list);
  890. txx9dmac_dostart(dc, txx9dmac_first_active(dc));
  891. } else if (txx9_dma_have_SMPCHN()) {
  892. struct txx9dmac_desc *prev = txx9dmac_last_active(dc);
  893. if (!(prev->txd.flags & DMA_PREP_INTERRUPT) ||
  894. txx9dmac_chan_INTENT(dc))
  895. txx9dmac_chain_dynamic(dc, prev);
  896. }
  897. }
  898. spin_unlock_bh(&dc->lock);
  899. }
  900. static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
  901. {
  902. struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
  903. struct txx9dmac_slave *ds = chan->private;
  904. struct txx9dmac_desc *desc;
  905. int i;
  906. dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
  907. /* ASSERT: channel is idle */
  908. if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
  909. dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
  910. return -EIO;
  911. }
  912. dc->completed = chan->cookie = 1;
  913. dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
  914. txx9dmac_chan_set_SMPCHN(dc);
  915. if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN))
  916. dc->ccr |= TXX9_DMA_CCR_INTENC;
  917. if (chan->device->device_prep_dma_memcpy) {
  918. if (ds)
  919. return -EINVAL;
  920. dc->ccr |= TXX9_DMA_CCR_XFSZ_X8;
  921. } else {
  922. if (!ds ||
  923. (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg))
  924. return -EINVAL;
  925. dc->ccr |= TXX9_DMA_CCR_EXTRQ |
  926. TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width));
  927. txx9dmac_chan_set_INTENT(dc);
  928. }
  929. spin_lock_bh(&dc->lock);
  930. i = dc->descs_allocated;
  931. while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) {
  932. spin_unlock_bh(&dc->lock);
  933. desc = txx9dmac_desc_alloc(dc, GFP_KERNEL);
  934. if (!desc) {
  935. dev_info(chan2dev(chan),
  936. "only allocated %d descriptors\n", i);
  937. spin_lock_bh(&dc->lock);
  938. break;
  939. }
  940. txx9dmac_desc_put(dc, desc);
  941. spin_lock_bh(&dc->lock);
  942. i = ++dc->descs_allocated;
  943. }
  944. spin_unlock_bh(&dc->lock);
  945. dev_dbg(chan2dev(chan),
  946. "alloc_chan_resources allocated %d descriptors\n", i);
  947. return i;
  948. }
  949. static void txx9dmac_free_chan_resources(struct dma_chan *chan)
  950. {
  951. struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
  952. struct txx9dmac_dev *ddev = dc->ddev;
  953. struct txx9dmac_desc *desc, *_desc;
  954. LIST_HEAD(list);
  955. dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
  956. dc->descs_allocated);
  957. /* ASSERT: channel is idle */
  958. BUG_ON(!list_empty(&dc->active_list));
  959. BUG_ON(!list_empty(&dc->queue));
  960. BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT);
  961. spin_lock_bh(&dc->lock);
  962. list_splice_init(&dc->free_list, &list);
  963. dc->descs_allocated = 0;
  964. spin_unlock_bh(&dc->lock);
  965. list_for_each_entry_safe(desc, _desc, &list, desc_node) {
  966. dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
  967. dma_unmap_single(chan2parent(chan), desc->txd.phys,
  968. ddev->descsize, DMA_TO_DEVICE);
  969. kfree(desc);
  970. }
  971. dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
  972. }
  973. /*----------------------------------------------------------------------*/
  974. static void txx9dmac_off(struct txx9dmac_dev *ddev)
  975. {
  976. dma_writel(ddev, MCR, 0);
  977. mmiowb();
  978. }
  979. static int __init txx9dmac_chan_probe(struct platform_device *pdev)
  980. {
  981. struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data;
  982. struct platform_device *dmac_dev = cpdata->dmac_dev;
  983. struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data;
  984. struct txx9dmac_chan *dc;
  985. int err;
  986. int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
  987. int irq;
  988. dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
  989. if (!dc)
  990. return -ENOMEM;
  991. dc->dma.dev = &pdev->dev;
  992. dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
  993. dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
  994. dc->dma.device_control = txx9dmac_control;
  995. dc->dma.device_tx_status = txx9dmac_tx_status;
  996. dc->dma.device_issue_pending = txx9dmac_issue_pending;
  997. if (pdata && pdata->memcpy_chan == ch) {
  998. dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
  999. dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask);
  1000. } else {
  1001. dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg;
  1002. dma_cap_set(DMA_SLAVE, dc->dma.cap_mask);
  1003. dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask);
  1004. }
  1005. INIT_LIST_HEAD(&dc->dma.channels);
  1006. dc->ddev = platform_get_drvdata(dmac_dev);
  1007. if (dc->ddev->irq < 0) {
  1008. irq = platform_get_irq(pdev, 0);
  1009. if (irq < 0)
  1010. return irq;
  1011. tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet,
  1012. (unsigned long)dc);
  1013. dc->irq = irq;
  1014. err = devm_request_irq(&pdev->dev, dc->irq,
  1015. txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
  1016. if (err)
  1017. return err;
  1018. } else
  1019. dc->irq = -1;
  1020. dc->ddev->chan[ch] = dc;
  1021. dc->chan.device = &dc->dma;
  1022. list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
  1023. dc->chan.cookie = dc->completed = 1;
  1024. if (is_dmac64(dc))
  1025. dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
  1026. else
  1027. dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch];
  1028. spin_lock_init(&dc->lock);
  1029. INIT_LIST_HEAD(&dc->active_list);
  1030. INIT_LIST_HEAD(&dc->queue);
  1031. INIT_LIST_HEAD(&dc->free_list);
  1032. txx9dmac_reset_chan(dc);
  1033. platform_set_drvdata(pdev, dc);
  1034. err = dma_async_device_register(&dc->dma);
  1035. if (err)
  1036. return err;
  1037. dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n",
  1038. dc->dma.dev_id,
  1039. dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "",
  1040. dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : "");
  1041. return 0;
  1042. }
  1043. static int __exit txx9dmac_chan_remove(struct platform_device *pdev)
  1044. {
  1045. struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
  1046. dma_async_device_unregister(&dc->dma);
  1047. if (dc->irq >= 0)
  1048. tasklet_kill(&dc->tasklet);
  1049. dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
  1050. return 0;
  1051. }
  1052. static int __init txx9dmac_probe(struct platform_device *pdev)
  1053. {
  1054. struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
  1055. struct resource *io;
  1056. struct txx9dmac_dev *ddev;
  1057. u32 mcr;
  1058. int err;
  1059. io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1060. if (!io)
  1061. return -EINVAL;
  1062. ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL);
  1063. if (!ddev)
  1064. return -ENOMEM;
  1065. if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io),
  1066. dev_name(&pdev->dev)))
  1067. return -EBUSY;
  1068. ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io));
  1069. if (!ddev->regs)
  1070. return -ENOMEM;
  1071. ddev->have_64bit_regs = pdata->have_64bit_regs;
  1072. if (__is_dmac64(ddev))
  1073. ddev->descsize = sizeof(struct txx9dmac_hwdesc);
  1074. else
  1075. ddev->descsize = sizeof(struct txx9dmac_hwdesc32);
  1076. /* force dma off, just in case */
  1077. txx9dmac_off(ddev);
  1078. ddev->irq = platform_get_irq(pdev, 0);
  1079. if (ddev->irq >= 0) {
  1080. tasklet_init(&ddev->tasklet, txx9dmac_tasklet,
  1081. (unsigned long)ddev);
  1082. err = devm_request_irq(&pdev->dev, ddev->irq,
  1083. txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev);
  1084. if (err)
  1085. return err;
  1086. }
  1087. mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
  1088. if (pdata && pdata->memcpy_chan >= 0)
  1089. mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
  1090. dma_writel(ddev, MCR, mcr);
  1091. platform_set_drvdata(pdev, ddev);
  1092. return 0;
  1093. }
  1094. static int __exit txx9dmac_remove(struct platform_device *pdev)
  1095. {
  1096. struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
  1097. txx9dmac_off(ddev);
  1098. if (ddev->irq >= 0)
  1099. tasklet_kill(&ddev->tasklet);
  1100. return 0;
  1101. }
  1102. static void txx9dmac_shutdown(struct platform_device *pdev)
  1103. {
  1104. struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
  1105. txx9dmac_off(ddev);
  1106. }
  1107. static int txx9dmac_suspend_noirq(struct device *dev)
  1108. {
  1109. struct platform_device *pdev = to_platform_device(dev);
  1110. struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
  1111. txx9dmac_off(ddev);
  1112. return 0;
  1113. }
  1114. static int txx9dmac_resume_noirq(struct device *dev)
  1115. {
  1116. struct platform_device *pdev = to_platform_device(dev);
  1117. struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
  1118. struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
  1119. u32 mcr;
  1120. mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
  1121. if (pdata && pdata->memcpy_chan >= 0)
  1122. mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
  1123. dma_writel(ddev, MCR, mcr);
  1124. return 0;
  1125. }
  1126. static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
  1127. .suspend_noirq = txx9dmac_suspend_noirq,
  1128. .resume_noirq = txx9dmac_resume_noirq,
  1129. };
  1130. static struct platform_driver txx9dmac_chan_driver = {
  1131. .remove = __exit_p(txx9dmac_chan_remove),
  1132. .driver = {
  1133. .name = "txx9dmac-chan",
  1134. },
  1135. };
  1136. static struct platform_driver txx9dmac_driver = {
  1137. .remove = __exit_p(txx9dmac_remove),
  1138. .shutdown = txx9dmac_shutdown,
  1139. .driver = {
  1140. .name = "txx9dmac",
  1141. .pm = &txx9dmac_dev_pm_ops,
  1142. },
  1143. };
  1144. static int __init txx9dmac_init(void)
  1145. {
  1146. int rc;
  1147. rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe);
  1148. if (!rc) {
  1149. rc = platform_driver_probe(&txx9dmac_chan_driver,
  1150. txx9dmac_chan_probe);
  1151. if (rc)
  1152. platform_driver_unregister(&txx9dmac_driver);
  1153. }
  1154. return rc;
  1155. }
  1156. module_init(txx9dmac_init);
  1157. static void __exit txx9dmac_exit(void)
  1158. {
  1159. platform_driver_unregister(&txx9dmac_chan_driver);
  1160. platform_driver_unregister(&txx9dmac_driver);
  1161. }
  1162. module_exit(txx9dmac_exit);
  1163. MODULE_LICENSE("GPL");
  1164. MODULE_DESCRIPTION("TXx9 DMA Controller driver");
  1165. MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
  1166. MODULE_ALIAS("platform:txx9dmac");
  1167. MODULE_ALIAS("platform:txx9dmac-chan");