dma-jz4740.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. /*
  2. * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
  3. * JZ4740 DMAC support
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the
  7. * Free Software Foundation; either version 2 of the License, or (at your
  8. * option) any later version.
  9. *
  10. */
  11. #include <linux/dmaengine.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/err.h>
  14. #include <linux/init.h>
  15. #include <linux/list.h>
  16. #include <linux/module.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/irq.h>
  21. #include <linux/clk.h>
  22. #include "virt-dma.h"
  23. #define JZ_DMA_NR_CHANS 6
  24. #define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20)
  25. #define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20)
  26. #define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20)
  27. #define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20)
  28. #define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20)
  29. #define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20)
  30. #define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20)
  31. #define JZ_REG_DMA_CTRL 0x300
  32. #define JZ_REG_DMA_IRQ 0x304
  33. #define JZ_REG_DMA_DOORBELL 0x308
  34. #define JZ_REG_DMA_DOORBELL_SET 0x30C
  35. #define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31)
  36. #define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6)
  37. #define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4)
  38. #define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3)
  39. #define JZ_DMA_STATUS_CTRL_HALT BIT(2)
  40. #define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1)
  41. #define JZ_DMA_STATUS_CTRL_ENABLE BIT(0)
  42. #define JZ_DMA_CMD_SRC_INC BIT(23)
  43. #define JZ_DMA_CMD_DST_INC BIT(22)
  44. #define JZ_DMA_CMD_RDIL_MASK (0xf << 16)
  45. #define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14)
  46. #define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12)
  47. #define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8)
  48. #define JZ_DMA_CMD_BLOCK_MODE BIT(7)
  49. #define JZ_DMA_CMD_DESC_VALID BIT(4)
  50. #define JZ_DMA_CMD_DESC_VALID_MODE BIT(3)
  51. #define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2)
  52. #define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1)
  53. #define JZ_DMA_CMD_LINK_ENABLE BIT(0)
  54. #define JZ_DMA_CMD_FLAGS_OFFSET 22
  55. #define JZ_DMA_CMD_RDIL_OFFSET 16
  56. #define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14
  57. #define JZ_DMA_CMD_DST_WIDTH_OFFSET 12
  58. #define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8
  59. #define JZ_DMA_CMD_MODE_OFFSET 7
  60. #define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8)
  61. #define JZ_DMA_CTRL_HALT BIT(3)
  62. #define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2)
  63. #define JZ_DMA_CTRL_ENABLE BIT(0)
  64. enum jz4740_dma_width {
  65. JZ4740_DMA_WIDTH_32BIT = 0,
  66. JZ4740_DMA_WIDTH_8BIT = 1,
  67. JZ4740_DMA_WIDTH_16BIT = 2,
  68. };
  69. enum jz4740_dma_transfer_size {
  70. JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0,
  71. JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1,
  72. JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2,
  73. JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3,
  74. JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4,
  75. };
  76. enum jz4740_dma_flags {
  77. JZ4740_DMA_SRC_AUTOINC = 0x2,
  78. JZ4740_DMA_DST_AUTOINC = 0x1,
  79. };
  80. enum jz4740_dma_mode {
  81. JZ4740_DMA_MODE_SINGLE = 0,
  82. JZ4740_DMA_MODE_BLOCK = 1,
  83. };
  84. struct jz4740_dma_sg {
  85. dma_addr_t addr;
  86. unsigned int len;
  87. };
  88. struct jz4740_dma_desc {
  89. struct virt_dma_desc vdesc;
  90. enum dma_transfer_direction direction;
  91. bool cyclic;
  92. unsigned int num_sgs;
  93. struct jz4740_dma_sg sg[];
  94. };
  95. struct jz4740_dmaengine_chan {
  96. struct virt_dma_chan vchan;
  97. unsigned int id;
  98. dma_addr_t fifo_addr;
  99. unsigned int transfer_shift;
  100. struct jz4740_dma_desc *desc;
  101. unsigned int next_sg;
  102. };
  103. struct jz4740_dma_dev {
  104. struct dma_device ddev;
  105. void __iomem *base;
  106. struct clk *clk;
  107. struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS];
  108. };
  109. static struct jz4740_dma_dev *jz4740_dma_chan_get_dev(
  110. struct jz4740_dmaengine_chan *chan)
  111. {
  112. return container_of(chan->vchan.chan.device, struct jz4740_dma_dev,
  113. ddev);
  114. }
  115. static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c)
  116. {
  117. return container_of(c, struct jz4740_dmaengine_chan, vchan.chan);
  118. }
  119. static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc)
  120. {
  121. return container_of(vdesc, struct jz4740_dma_desc, vdesc);
  122. }
  123. static inline uint32_t jz4740_dma_read(struct jz4740_dma_dev *dmadev,
  124. unsigned int reg)
  125. {
  126. return readl(dmadev->base + reg);
  127. }
  128. static inline void jz4740_dma_write(struct jz4740_dma_dev *dmadev,
  129. unsigned reg, uint32_t val)
  130. {
  131. writel(val, dmadev->base + reg);
  132. }
  133. static inline void jz4740_dma_write_mask(struct jz4740_dma_dev *dmadev,
  134. unsigned int reg, uint32_t val, uint32_t mask)
  135. {
  136. uint32_t tmp;
  137. tmp = jz4740_dma_read(dmadev, reg);
  138. tmp &= ~mask;
  139. tmp |= val;
  140. jz4740_dma_write(dmadev, reg, tmp);
  141. }
  142. static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs)
  143. {
  144. return kzalloc(sizeof(struct jz4740_dma_desc) +
  145. sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC);
  146. }
  147. static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width)
  148. {
  149. switch (width) {
  150. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  151. return JZ4740_DMA_WIDTH_8BIT;
  152. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  153. return JZ4740_DMA_WIDTH_16BIT;
  154. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  155. return JZ4740_DMA_WIDTH_32BIT;
  156. default:
  157. return JZ4740_DMA_WIDTH_32BIT;
  158. }
  159. }
  160. static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
  161. {
  162. if (maxburst <= 1)
  163. return JZ4740_DMA_TRANSFER_SIZE_1BYTE;
  164. else if (maxburst <= 3)
  165. return JZ4740_DMA_TRANSFER_SIZE_2BYTE;
  166. else if (maxburst <= 15)
  167. return JZ4740_DMA_TRANSFER_SIZE_4BYTE;
  168. else if (maxburst <= 31)
  169. return JZ4740_DMA_TRANSFER_SIZE_16BYTE;
  170. return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
  171. }
  172. static int jz4740_dma_slave_config(struct dma_chan *c,
  173. struct dma_slave_config *config)
  174. {
  175. struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
  176. struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
  177. enum jz4740_dma_width src_width;
  178. enum jz4740_dma_width dst_width;
  179. enum jz4740_dma_transfer_size transfer_size;
  180. enum jz4740_dma_flags flags;
  181. uint32_t cmd;
  182. switch (config->direction) {
  183. case DMA_MEM_TO_DEV:
  184. flags = JZ4740_DMA_SRC_AUTOINC;
  185. transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
  186. chan->fifo_addr = config->dst_addr;
  187. break;
  188. case DMA_DEV_TO_MEM:
  189. flags = JZ4740_DMA_DST_AUTOINC;
  190. transfer_size = jz4740_dma_maxburst(config->src_maxburst);
  191. chan->fifo_addr = config->src_addr;
  192. break;
  193. default:
  194. return -EINVAL;
  195. }
  196. src_width = jz4740_dma_width(config->src_addr_width);
  197. dst_width = jz4740_dma_width(config->dst_addr_width);
  198. switch (transfer_size) {
  199. case JZ4740_DMA_TRANSFER_SIZE_2BYTE:
  200. chan->transfer_shift = 1;
  201. break;
  202. case JZ4740_DMA_TRANSFER_SIZE_4BYTE:
  203. chan->transfer_shift = 2;
  204. break;
  205. case JZ4740_DMA_TRANSFER_SIZE_16BYTE:
  206. chan->transfer_shift = 4;
  207. break;
  208. case JZ4740_DMA_TRANSFER_SIZE_32BYTE:
  209. chan->transfer_shift = 5;
  210. break;
  211. default:
  212. chan->transfer_shift = 0;
  213. break;
  214. }
  215. cmd = flags << JZ_DMA_CMD_FLAGS_OFFSET;
  216. cmd |= src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET;
  217. cmd |= dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET;
  218. cmd |= transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET;
  219. cmd |= JZ4740_DMA_MODE_SINGLE << JZ_DMA_CMD_MODE_OFFSET;
  220. cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE;
  221. jz4740_dma_write(dmadev, JZ_REG_DMA_CMD(chan->id), cmd);
  222. jz4740_dma_write(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0);
  223. jz4740_dma_write(dmadev, JZ_REG_DMA_REQ_TYPE(chan->id),
  224. config->slave_id);
  225. return 0;
  226. }
  227. static int jz4740_dma_terminate_all(struct dma_chan *c)
  228. {
  229. struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
  230. struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
  231. unsigned long flags;
  232. LIST_HEAD(head);
  233. spin_lock_irqsave(&chan->vchan.lock, flags);
  234. jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
  235. JZ_DMA_STATUS_CTRL_ENABLE);
  236. chan->desc = NULL;
  237. vchan_get_all_descriptors(&chan->vchan, &head);
  238. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  239. vchan_dma_desc_free_list(&chan->vchan, &head);
  240. return 0;
  241. }
  242. static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
  243. {
  244. struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
  245. dma_addr_t src_addr, dst_addr;
  246. struct virt_dma_desc *vdesc;
  247. struct jz4740_dma_sg *sg;
  248. jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
  249. JZ_DMA_STATUS_CTRL_ENABLE);
  250. if (!chan->desc) {
  251. vdesc = vchan_next_desc(&chan->vchan);
  252. if (!vdesc)
  253. return 0;
  254. chan->desc = to_jz4740_dma_desc(vdesc);
  255. chan->next_sg = 0;
  256. }
  257. if (chan->next_sg == chan->desc->num_sgs)
  258. chan->next_sg = 0;
  259. sg = &chan->desc->sg[chan->next_sg];
  260. if (chan->desc->direction == DMA_MEM_TO_DEV) {
  261. src_addr = sg->addr;
  262. dst_addr = chan->fifo_addr;
  263. } else {
  264. src_addr = chan->fifo_addr;
  265. dst_addr = sg->addr;
  266. }
  267. jz4740_dma_write(dmadev, JZ_REG_DMA_SRC_ADDR(chan->id), src_addr);
  268. jz4740_dma_write(dmadev, JZ_REG_DMA_DST_ADDR(chan->id), dst_addr);
  269. jz4740_dma_write(dmadev, JZ_REG_DMA_TRANSFER_COUNT(chan->id),
  270. sg->len >> chan->transfer_shift);
  271. chan->next_sg++;
  272. jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id),
  273. JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE,
  274. JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC |
  275. JZ_DMA_STATUS_CTRL_ENABLE);
  276. jz4740_dma_write_mask(dmadev, JZ_REG_DMA_CTRL,
  277. JZ_DMA_CTRL_ENABLE,
  278. JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE);
  279. return 0;
  280. }
  281. static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
  282. {
  283. spin_lock(&chan->vchan.lock);
  284. if (chan->desc) {
  285. if (chan->desc->cyclic) {
  286. vchan_cyclic_callback(&chan->desc->vdesc);
  287. } else {
  288. if (chan->next_sg == chan->desc->num_sgs) {
  289. list_del(&chan->desc->vdesc.node);
  290. vchan_cookie_complete(&chan->desc->vdesc);
  291. chan->desc = NULL;
  292. }
  293. }
  294. }
  295. jz4740_dma_start_transfer(chan);
  296. spin_unlock(&chan->vchan.lock);
  297. }
  298. static irqreturn_t jz4740_dma_irq(int irq, void *devid)
  299. {
  300. struct jz4740_dma_dev *dmadev = devid;
  301. uint32_t irq_status;
  302. unsigned int i;
  303. irq_status = readl(dmadev->base + JZ_REG_DMA_IRQ);
  304. for (i = 0; i < 6; ++i) {
  305. if (irq_status & (1 << i)) {
  306. jz4740_dma_write_mask(dmadev,
  307. JZ_REG_DMA_STATUS_CTRL(i), 0,
  308. JZ_DMA_STATUS_CTRL_ENABLE |
  309. JZ_DMA_STATUS_CTRL_TRANSFER_DONE);
  310. jz4740_dma_chan_irq(&dmadev->chan[i]);
  311. }
  312. }
  313. return IRQ_HANDLED;
  314. }
  315. static void jz4740_dma_issue_pending(struct dma_chan *c)
  316. {
  317. struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
  318. unsigned long flags;
  319. spin_lock_irqsave(&chan->vchan.lock, flags);
  320. if (vchan_issue_pending(&chan->vchan) && !chan->desc)
  321. jz4740_dma_start_transfer(chan);
  322. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  323. }
  324. static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
  325. struct dma_chan *c, struct scatterlist *sgl,
  326. unsigned int sg_len, enum dma_transfer_direction direction,
  327. unsigned long flags, void *context)
  328. {
  329. struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
  330. struct jz4740_dma_desc *desc;
  331. struct scatterlist *sg;
  332. unsigned int i;
  333. desc = jz4740_dma_alloc_desc(sg_len);
  334. if (!desc)
  335. return NULL;
  336. for_each_sg(sgl, sg, sg_len, i) {
  337. desc->sg[i].addr = sg_dma_address(sg);
  338. desc->sg[i].len = sg_dma_len(sg);
  339. }
  340. desc->num_sgs = sg_len;
  341. desc->direction = direction;
  342. desc->cyclic = false;
  343. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  344. }
  345. static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
  346. struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
  347. size_t period_len, enum dma_transfer_direction direction,
  348. unsigned long flags)
  349. {
  350. struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
  351. struct jz4740_dma_desc *desc;
  352. unsigned int num_periods, i;
  353. if (buf_len % period_len)
  354. return NULL;
  355. num_periods = buf_len / period_len;
  356. desc = jz4740_dma_alloc_desc(num_periods);
  357. if (!desc)
  358. return NULL;
  359. for (i = 0; i < num_periods; i++) {
  360. desc->sg[i].addr = buf_addr;
  361. desc->sg[i].len = period_len;
  362. buf_addr += period_len;
  363. }
  364. desc->num_sgs = num_periods;
  365. desc->direction = direction;
  366. desc->cyclic = true;
  367. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  368. }
  369. static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan,
  370. struct jz4740_dma_desc *desc, unsigned int next_sg)
  371. {
  372. struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
  373. unsigned int residue, count;
  374. unsigned int i;
  375. residue = 0;
  376. for (i = next_sg; i < desc->num_sgs; i++)
  377. residue += desc->sg[i].len;
  378. if (next_sg != 0) {
  379. count = jz4740_dma_read(dmadev,
  380. JZ_REG_DMA_TRANSFER_COUNT(chan->id));
  381. residue += count << chan->transfer_shift;
  382. }
  383. return residue;
  384. }
  385. static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
  386. dma_cookie_t cookie, struct dma_tx_state *state)
  387. {
  388. struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
  389. struct virt_dma_desc *vdesc;
  390. enum dma_status status;
  391. unsigned long flags;
  392. status = dma_cookie_status(c, cookie, state);
  393. if (status == DMA_COMPLETE || !state)
  394. return status;
  395. spin_lock_irqsave(&chan->vchan.lock, flags);
  396. vdesc = vchan_find_desc(&chan->vchan, cookie);
  397. if (cookie == chan->desc->vdesc.tx.cookie) {
  398. state->residue = jz4740_dma_desc_residue(chan, chan->desc,
  399. chan->next_sg);
  400. } else if (vdesc) {
  401. state->residue = jz4740_dma_desc_residue(chan,
  402. to_jz4740_dma_desc(vdesc), 0);
  403. } else {
  404. state->residue = 0;
  405. }
  406. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  407. return status;
  408. }
  409. static void jz4740_dma_free_chan_resources(struct dma_chan *c)
  410. {
  411. vchan_free_chan_resources(to_virt_chan(c));
  412. }
  413. static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
  414. {
  415. kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
  416. }
  417. #define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  418. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  419. static int jz4740_dma_probe(struct platform_device *pdev)
  420. {
  421. struct jz4740_dmaengine_chan *chan;
  422. struct jz4740_dma_dev *dmadev;
  423. struct dma_device *dd;
  424. unsigned int i;
  425. struct resource *res;
  426. int ret;
  427. int irq;
  428. dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
  429. if (!dmadev)
  430. return -EINVAL;
  431. dd = &dmadev->ddev;
  432. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  433. dmadev->base = devm_ioremap_resource(&pdev->dev, res);
  434. if (IS_ERR(dmadev->base))
  435. return PTR_ERR(dmadev->base);
  436. dmadev->clk = clk_get(&pdev->dev, "dma");
  437. if (IS_ERR(dmadev->clk))
  438. return PTR_ERR(dmadev->clk);
  439. clk_prepare_enable(dmadev->clk);
  440. dma_cap_set(DMA_SLAVE, dd->cap_mask);
  441. dma_cap_set(DMA_CYCLIC, dd->cap_mask);
  442. dd->device_free_chan_resources = jz4740_dma_free_chan_resources;
  443. dd->device_tx_status = jz4740_dma_tx_status;
  444. dd->device_issue_pending = jz4740_dma_issue_pending;
  445. dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
  446. dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
  447. dd->device_config = jz4740_dma_slave_config;
  448. dd->device_terminate_all = jz4740_dma_terminate_all;
  449. dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
  450. dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
  451. dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  452. dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  453. dd->dev = &pdev->dev;
  454. INIT_LIST_HEAD(&dd->channels);
  455. for (i = 0; i < JZ_DMA_NR_CHANS; i++) {
  456. chan = &dmadev->chan[i];
  457. chan->id = i;
  458. chan->vchan.desc_free = jz4740_dma_desc_free;
  459. vchan_init(&chan->vchan, dd);
  460. }
  461. ret = dma_async_device_register(dd);
  462. if (ret)
  463. goto err_clk;
  464. irq = platform_get_irq(pdev, 0);
  465. ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
  466. if (ret)
  467. goto err_unregister;
  468. platform_set_drvdata(pdev, dmadev);
  469. return 0;
  470. err_unregister:
  471. dma_async_device_unregister(dd);
  472. err_clk:
  473. clk_disable_unprepare(dmadev->clk);
  474. return ret;
  475. }
  476. static void jz4740_cleanup_vchan(struct dma_device *dmadev)
  477. {
  478. struct jz4740_dmaengine_chan *chan, *_chan;
  479. list_for_each_entry_safe(chan, _chan,
  480. &dmadev->channels, vchan.chan.device_node) {
  481. list_del(&chan->vchan.chan.device_node);
  482. tasklet_kill(&chan->vchan.task);
  483. }
  484. }
  485. static int jz4740_dma_remove(struct platform_device *pdev)
  486. {
  487. struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev);
  488. int irq = platform_get_irq(pdev, 0);
  489. free_irq(irq, dmadev);
  490. jz4740_cleanup_vchan(&dmadev->ddev);
  491. dma_async_device_unregister(&dmadev->ddev);
  492. clk_disable_unprepare(dmadev->clk);
  493. return 0;
  494. }
  495. static struct platform_driver jz4740_dma_driver = {
  496. .probe = jz4740_dma_probe,
  497. .remove = jz4740_dma_remove,
  498. .driver = {
  499. .name = "jz4740-dma",
  500. },
  501. };
  502. module_platform_driver(jz4740_dma_driver);
  503. MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
  504. MODULE_DESCRIPTION("JZ4740 DMA driver");
  505. MODULE_LICENSE("GPL v2");