timb_dma.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782
  1. /*
  2. * timb_dma.c timberdale FPGA DMA driver
  3. * Copyright (c) 2010 Intel Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. /* Supports:
  15. * Timberdale FPGA DMA engine
  16. */
  17. #include <linux/dmaengine.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/init.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/io.h>
  22. #include <linux/module.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/slab.h>
  25. #include <linux/timb_dma.h>
  26. #include "dmaengine.h"
  27. #define DRIVER_NAME "timb-dma"
  28. /* Global DMA registers */
  29. #define TIMBDMA_ACR 0x34
  30. #define TIMBDMA_32BIT_ADDR 0x01
  31. #define TIMBDMA_ISR 0x080000
  32. #define TIMBDMA_IPR 0x080004
  33. #define TIMBDMA_IER 0x080008
  34. /* Channel specific registers */
  35. /* RX instances base addresses are 0x00, 0x40, 0x80 ...
  36. * TX instances base addresses are 0x18, 0x58, 0x98 ...
  37. */
  38. #define TIMBDMA_INSTANCE_OFFSET 0x40
  39. #define TIMBDMA_INSTANCE_TX_OFFSET 0x18
  40. /* RX registers, relative the instance base */
  41. #define TIMBDMA_OFFS_RX_DHAR 0x00
  42. #define TIMBDMA_OFFS_RX_DLAR 0x04
  43. #define TIMBDMA_OFFS_RX_LR 0x0C
  44. #define TIMBDMA_OFFS_RX_BLR 0x10
  45. #define TIMBDMA_OFFS_RX_ER 0x14
  46. #define TIMBDMA_RX_EN 0x01
  47. /* bytes per Row, video specific register
  48. * which is placed after the TX registers...
  49. */
  50. #define TIMBDMA_OFFS_RX_BPRR 0x30
  51. /* TX registers, relative the instance base */
  52. #define TIMBDMA_OFFS_TX_DHAR 0x00
  53. #define TIMBDMA_OFFS_TX_DLAR 0x04
  54. #define TIMBDMA_OFFS_TX_BLR 0x0C
  55. #define TIMBDMA_OFFS_TX_LR 0x14
  56. #define TIMB_DMA_DESC_SIZE 8
  57. struct timb_dma_desc {
  58. struct list_head desc_node;
  59. struct dma_async_tx_descriptor txd;
  60. u8 *desc_list;
  61. unsigned int desc_list_len;
  62. bool interrupt;
  63. };
  64. struct timb_dma_chan {
  65. struct dma_chan chan;
  66. void __iomem *membase;
  67. spinlock_t lock; /* Used to protect data structures,
  68. especially the lists and descriptors,
  69. from races between the tasklet and calls
  70. from above */
  71. bool ongoing;
  72. struct list_head active_list;
  73. struct list_head queue;
  74. struct list_head free_list;
  75. unsigned int bytes_per_line;
  76. enum dma_transfer_direction direction;
  77. unsigned int descs; /* Descriptors to allocate */
  78. unsigned int desc_elems; /* number of elems per descriptor */
  79. };
  80. struct timb_dma {
  81. struct dma_device dma;
  82. void __iomem *membase;
  83. struct tasklet_struct tasklet;
  84. struct timb_dma_chan channels[0];
  85. };
  86. static struct device *chan2dev(struct dma_chan *chan)
  87. {
  88. return &chan->dev->device;
  89. }
  90. static struct device *chan2dmadev(struct dma_chan *chan)
  91. {
  92. return chan2dev(chan)->parent->parent;
  93. }
  94. static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
  95. {
  96. int id = td_chan->chan.chan_id;
  97. return (struct timb_dma *)((u8 *)td_chan -
  98. id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
  99. }
  100. /* Must be called with the spinlock held */
  101. static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
  102. {
  103. int id = td_chan->chan.chan_id;
  104. struct timb_dma *td = tdchantotd(td_chan);
  105. u32 ier;
  106. /* enable interrupt for this channel */
  107. ier = ioread32(td->membase + TIMBDMA_IER);
  108. ier |= 1 << id;
  109. dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
  110. ier);
  111. iowrite32(ier, td->membase + TIMBDMA_IER);
  112. }
  113. /* Should be called with the spinlock held */
  114. static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
  115. {
  116. int id = td_chan->chan.chan_id;
  117. struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
  118. id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
  119. u32 isr;
  120. bool done = false;
  121. dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
  122. isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
  123. if (isr) {
  124. iowrite32(isr, td->membase + TIMBDMA_ISR);
  125. done = true;
  126. }
  127. return done;
  128. }
  129. static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
  130. struct scatterlist *sg, bool last)
  131. {
  132. if (sg_dma_len(sg) > USHRT_MAX) {
  133. dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
  134. return -EINVAL;
  135. }
  136. /* length must be word aligned */
  137. if (sg_dma_len(sg) % sizeof(u32)) {
  138. dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
  139. sg_dma_len(sg));
  140. return -EINVAL;
  141. }
  142. dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
  143. dma_desc, (unsigned long long)sg_dma_address(sg));
  144. dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
  145. dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
  146. dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
  147. dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
  148. dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
  149. dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
  150. dma_desc[1] = 0x00;
  151. dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
  152. return 0;
  153. }
  154. /* Must be called with the spinlock held */
  155. static void __td_start_dma(struct timb_dma_chan *td_chan)
  156. {
  157. struct timb_dma_desc *td_desc;
  158. if (td_chan->ongoing) {
  159. dev_err(chan2dev(&td_chan->chan),
  160. "Transfer already ongoing\n");
  161. return;
  162. }
  163. td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
  164. desc_node);
  165. dev_dbg(chan2dev(&td_chan->chan),
  166. "td_chan: %p, chan: %d, membase: %p\n",
  167. td_chan, td_chan->chan.chan_id, td_chan->membase);
  168. if (td_chan->direction == DMA_DEV_TO_MEM) {
  169. /* descriptor address */
  170. iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
  171. iowrite32(td_desc->txd.phys, td_chan->membase +
  172. TIMBDMA_OFFS_RX_DLAR);
  173. /* Bytes per line */
  174. iowrite32(td_chan->bytes_per_line, td_chan->membase +
  175. TIMBDMA_OFFS_RX_BPRR);
  176. /* enable RX */
  177. iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
  178. } else {
  179. /* address high */
  180. iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
  181. iowrite32(td_desc->txd.phys, td_chan->membase +
  182. TIMBDMA_OFFS_TX_DLAR);
  183. }
  184. td_chan->ongoing = true;
  185. if (td_desc->interrupt)
  186. __td_enable_chan_irq(td_chan);
  187. }
  188. static void __td_finish(struct timb_dma_chan *td_chan)
  189. {
  190. struct dmaengine_desc_callback cb;
  191. struct dma_async_tx_descriptor *txd;
  192. struct timb_dma_desc *td_desc;
  193. /* can happen if the descriptor is canceled */
  194. if (list_empty(&td_chan->active_list))
  195. return;
  196. td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
  197. desc_node);
  198. txd = &td_desc->txd;
  199. dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
  200. txd->cookie);
  201. /* make sure to stop the transfer */
  202. if (td_chan->direction == DMA_DEV_TO_MEM)
  203. iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
  204. /* Currently no support for stopping DMA transfers
  205. else
  206. iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
  207. */
  208. dma_cookie_complete(txd);
  209. td_chan->ongoing = false;
  210. dmaengine_desc_get_callback(txd, &cb);
  211. list_move(&td_desc->desc_node, &td_chan->free_list);
  212. dma_descriptor_unmap(txd);
  213. /*
  214. * The API requires that no submissions are done from a
  215. * callback, so we don't need to drop the lock here
  216. */
  217. dmaengine_desc_callback_invoke(&cb, NULL);
  218. }
  219. static u32 __td_ier_mask(struct timb_dma *td)
  220. {
  221. int i;
  222. u32 ret = 0;
  223. for (i = 0; i < td->dma.chancnt; i++) {
  224. struct timb_dma_chan *td_chan = td->channels + i;
  225. if (td_chan->ongoing) {
  226. struct timb_dma_desc *td_desc =
  227. list_entry(td_chan->active_list.next,
  228. struct timb_dma_desc, desc_node);
  229. if (td_desc->interrupt)
  230. ret |= 1 << i;
  231. }
  232. }
  233. return ret;
  234. }
  235. static void __td_start_next(struct timb_dma_chan *td_chan)
  236. {
  237. struct timb_dma_desc *td_desc;
  238. BUG_ON(list_empty(&td_chan->queue));
  239. BUG_ON(td_chan->ongoing);
  240. td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
  241. desc_node);
  242. dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
  243. __func__, td_desc->txd.cookie);
  244. list_move(&td_desc->desc_node, &td_chan->active_list);
  245. __td_start_dma(td_chan);
  246. }
  247. static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
  248. {
  249. struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
  250. txd);
  251. struct timb_dma_chan *td_chan = container_of(txd->chan,
  252. struct timb_dma_chan, chan);
  253. dma_cookie_t cookie;
  254. spin_lock_bh(&td_chan->lock);
  255. cookie = dma_cookie_assign(txd);
  256. if (list_empty(&td_chan->active_list)) {
  257. dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
  258. txd->cookie);
  259. list_add_tail(&td_desc->desc_node, &td_chan->active_list);
  260. __td_start_dma(td_chan);
  261. } else {
  262. dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
  263. txd->cookie);
  264. list_add_tail(&td_desc->desc_node, &td_chan->queue);
  265. }
  266. spin_unlock_bh(&td_chan->lock);
  267. return cookie;
  268. }
  269. static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
  270. {
  271. struct dma_chan *chan = &td_chan->chan;
  272. struct timb_dma_desc *td_desc;
  273. int err;
  274. td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
  275. if (!td_desc)
  276. goto out;
  277. td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
  278. td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
  279. if (!td_desc->desc_list)
  280. goto err;
  281. dma_async_tx_descriptor_init(&td_desc->txd, chan);
  282. td_desc->txd.tx_submit = td_tx_submit;
  283. td_desc->txd.flags = DMA_CTRL_ACK;
  284. td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
  285. td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
  286. err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
  287. if (err) {
  288. dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
  289. goto err;
  290. }
  291. return td_desc;
  292. err:
  293. kfree(td_desc->desc_list);
  294. kfree(td_desc);
  295. out:
  296. return NULL;
  297. }
  298. static void td_free_desc(struct timb_dma_desc *td_desc)
  299. {
  300. dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
  301. dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
  302. td_desc->desc_list_len, DMA_TO_DEVICE);
  303. kfree(td_desc->desc_list);
  304. kfree(td_desc);
  305. }
  306. static void td_desc_put(struct timb_dma_chan *td_chan,
  307. struct timb_dma_desc *td_desc)
  308. {
  309. dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
  310. spin_lock_bh(&td_chan->lock);
  311. list_add(&td_desc->desc_node, &td_chan->free_list);
  312. spin_unlock_bh(&td_chan->lock);
  313. }
  314. static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
  315. {
  316. struct timb_dma_desc *td_desc, *_td_desc;
  317. struct timb_dma_desc *ret = NULL;
  318. spin_lock_bh(&td_chan->lock);
  319. list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
  320. desc_node) {
  321. if (async_tx_test_ack(&td_desc->txd)) {
  322. list_del(&td_desc->desc_node);
  323. ret = td_desc;
  324. break;
  325. }
  326. dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
  327. td_desc);
  328. }
  329. spin_unlock_bh(&td_chan->lock);
  330. return ret;
  331. }
  332. static int td_alloc_chan_resources(struct dma_chan *chan)
  333. {
  334. struct timb_dma_chan *td_chan =
  335. container_of(chan, struct timb_dma_chan, chan);
  336. int i;
  337. dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
  338. BUG_ON(!list_empty(&td_chan->free_list));
  339. for (i = 0; i < td_chan->descs; i++) {
  340. struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
  341. if (!td_desc) {
  342. if (i)
  343. break;
  344. else {
  345. dev_err(chan2dev(chan),
  346. "Couldn't allocate any descriptors\n");
  347. return -ENOMEM;
  348. }
  349. }
  350. td_desc_put(td_chan, td_desc);
  351. }
  352. spin_lock_bh(&td_chan->lock);
  353. dma_cookie_init(chan);
  354. spin_unlock_bh(&td_chan->lock);
  355. return 0;
  356. }
  357. static void td_free_chan_resources(struct dma_chan *chan)
  358. {
  359. struct timb_dma_chan *td_chan =
  360. container_of(chan, struct timb_dma_chan, chan);
  361. struct timb_dma_desc *td_desc, *_td_desc;
  362. LIST_HEAD(list);
  363. dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
  364. /* check that all descriptors are free */
  365. BUG_ON(!list_empty(&td_chan->active_list));
  366. BUG_ON(!list_empty(&td_chan->queue));
  367. spin_lock_bh(&td_chan->lock);
  368. list_splice_init(&td_chan->free_list, &list);
  369. spin_unlock_bh(&td_chan->lock);
  370. list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
  371. dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
  372. td_desc);
  373. td_free_desc(td_desc);
  374. }
  375. }
  376. static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
  377. struct dma_tx_state *txstate)
  378. {
  379. enum dma_status ret;
  380. dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
  381. ret = dma_cookie_status(chan, cookie, txstate);
  382. dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret);
  383. return ret;
  384. }
  385. static void td_issue_pending(struct dma_chan *chan)
  386. {
  387. struct timb_dma_chan *td_chan =
  388. container_of(chan, struct timb_dma_chan, chan);
  389. dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
  390. spin_lock_bh(&td_chan->lock);
  391. if (!list_empty(&td_chan->active_list))
  392. /* transfer ongoing */
  393. if (__td_dma_done_ack(td_chan))
  394. __td_finish(td_chan);
  395. if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
  396. __td_start_next(td_chan);
  397. spin_unlock_bh(&td_chan->lock);
  398. }
  399. static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
  400. struct scatterlist *sgl, unsigned int sg_len,
  401. enum dma_transfer_direction direction, unsigned long flags,
  402. void *context)
  403. {
  404. struct timb_dma_chan *td_chan =
  405. container_of(chan, struct timb_dma_chan, chan);
  406. struct timb_dma_desc *td_desc;
  407. struct scatterlist *sg;
  408. unsigned int i;
  409. unsigned int desc_usage = 0;
  410. if (!sgl || !sg_len) {
  411. dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
  412. return NULL;
  413. }
  414. /* even channels are for RX, odd for TX */
  415. if (td_chan->direction != direction) {
  416. dev_err(chan2dev(chan),
  417. "Requesting channel in wrong direction\n");
  418. return NULL;
  419. }
  420. td_desc = td_desc_get(td_chan);
  421. if (!td_desc) {
  422. dev_err(chan2dev(chan), "Not enough descriptors available\n");
  423. return NULL;
  424. }
  425. td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
  426. for_each_sg(sgl, sg, sg_len, i) {
  427. int err;
  428. if (desc_usage > td_desc->desc_list_len) {
  429. dev_err(chan2dev(chan), "No descriptor space\n");
  430. return NULL;
  431. }
  432. err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
  433. i == (sg_len - 1));
  434. if (err) {
  435. dev_err(chan2dev(chan), "Failed to update desc: %d\n",
  436. err);
  437. td_desc_put(td_chan, td_desc);
  438. return NULL;
  439. }
  440. desc_usage += TIMB_DMA_DESC_SIZE;
  441. }
  442. dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
  443. td_desc->desc_list_len, DMA_TO_DEVICE);
  444. return &td_desc->txd;
  445. }
  446. static int td_terminate_all(struct dma_chan *chan)
  447. {
  448. struct timb_dma_chan *td_chan =
  449. container_of(chan, struct timb_dma_chan, chan);
  450. struct timb_dma_desc *td_desc, *_td_desc;
  451. dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
  452. /* first the easy part, put the queue into the free list */
  453. spin_lock_bh(&td_chan->lock);
  454. list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
  455. desc_node)
  456. list_move(&td_desc->desc_node, &td_chan->free_list);
  457. /* now tear down the running */
  458. __td_finish(td_chan);
  459. spin_unlock_bh(&td_chan->lock);
  460. return 0;
  461. }
  462. static void td_tasklet(unsigned long data)
  463. {
  464. struct timb_dma *td = (struct timb_dma *)data;
  465. u32 isr;
  466. u32 ipr;
  467. u32 ier;
  468. int i;
  469. isr = ioread32(td->membase + TIMBDMA_ISR);
  470. ipr = isr & __td_ier_mask(td);
  471. /* ack the interrupts */
  472. iowrite32(ipr, td->membase + TIMBDMA_ISR);
  473. for (i = 0; i < td->dma.chancnt; i++)
  474. if (ipr & (1 << i)) {
  475. struct timb_dma_chan *td_chan = td->channels + i;
  476. spin_lock(&td_chan->lock);
  477. __td_finish(td_chan);
  478. if (!list_empty(&td_chan->queue))
  479. __td_start_next(td_chan);
  480. spin_unlock(&td_chan->lock);
  481. }
  482. ier = __td_ier_mask(td);
  483. iowrite32(ier, td->membase + TIMBDMA_IER);
  484. }
  485. static irqreturn_t td_irq(int irq, void *devid)
  486. {
  487. struct timb_dma *td = devid;
  488. u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
  489. if (ipr) {
  490. /* disable interrupts, will be re-enabled in tasklet */
  491. iowrite32(0, td->membase + TIMBDMA_IER);
  492. tasklet_schedule(&td->tasklet);
  493. return IRQ_HANDLED;
  494. } else
  495. return IRQ_NONE;
  496. }
  497. static int td_probe(struct platform_device *pdev)
  498. {
  499. struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
  500. struct timb_dma *td;
  501. struct resource *iomem;
  502. int irq;
  503. int err;
  504. int i;
  505. if (!pdata) {
  506. dev_err(&pdev->dev, "No platform data\n");
  507. return -EINVAL;
  508. }
  509. iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  510. if (!iomem)
  511. return -EINVAL;
  512. irq = platform_get_irq(pdev, 0);
  513. if (irq < 0)
  514. return irq;
  515. if (!request_mem_region(iomem->start, resource_size(iomem),
  516. DRIVER_NAME))
  517. return -EBUSY;
  518. td = kzalloc(sizeof(struct timb_dma) +
  519. sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
  520. if (!td) {
  521. err = -ENOMEM;
  522. goto err_release_region;
  523. }
  524. dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
  525. td->membase = ioremap(iomem->start, resource_size(iomem));
  526. if (!td->membase) {
  527. dev_err(&pdev->dev, "Failed to remap I/O memory\n");
  528. err = -ENOMEM;
  529. goto err_free_mem;
  530. }
  531. /* 32bit addressing */
  532. iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
  533. /* disable and clear any interrupts */
  534. iowrite32(0x0, td->membase + TIMBDMA_IER);
  535. iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
  536. tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
  537. err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
  538. if (err) {
  539. dev_err(&pdev->dev, "Failed to request IRQ\n");
  540. goto err_tasklet_kill;
  541. }
  542. td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
  543. td->dma.device_free_chan_resources = td_free_chan_resources;
  544. td->dma.device_tx_status = td_tx_status;
  545. td->dma.device_issue_pending = td_issue_pending;
  546. dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
  547. dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
  548. td->dma.device_prep_slave_sg = td_prep_slave_sg;
  549. td->dma.device_terminate_all = td_terminate_all;
  550. td->dma.dev = &pdev->dev;
  551. INIT_LIST_HEAD(&td->dma.channels);
  552. for (i = 0; i < pdata->nr_channels; i++) {
  553. struct timb_dma_chan *td_chan = &td->channels[i];
  554. struct timb_dma_platform_data_channel *pchan =
  555. pdata->channels + i;
  556. /* even channels are RX, odd are TX */
  557. if ((i % 2) == pchan->rx) {
  558. dev_err(&pdev->dev, "Wrong channel configuration\n");
  559. err = -EINVAL;
  560. goto err_free_irq;
  561. }
  562. td_chan->chan.device = &td->dma;
  563. dma_cookie_init(&td_chan->chan);
  564. spin_lock_init(&td_chan->lock);
  565. INIT_LIST_HEAD(&td_chan->active_list);
  566. INIT_LIST_HEAD(&td_chan->queue);
  567. INIT_LIST_HEAD(&td_chan->free_list);
  568. td_chan->descs = pchan->descriptors;
  569. td_chan->desc_elems = pchan->descriptor_elements;
  570. td_chan->bytes_per_line = pchan->bytes_per_line;
  571. td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
  572. DMA_MEM_TO_DEV;
  573. td_chan->membase = td->membase +
  574. (i / 2) * TIMBDMA_INSTANCE_OFFSET +
  575. (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
  576. dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
  577. i, td_chan->membase);
  578. list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
  579. }
  580. err = dma_async_device_register(&td->dma);
  581. if (err) {
  582. dev_err(&pdev->dev, "Failed to register async device\n");
  583. goto err_free_irq;
  584. }
  585. platform_set_drvdata(pdev, td);
  586. dev_dbg(&pdev->dev, "Probe result: %d\n", err);
  587. return err;
  588. err_free_irq:
  589. free_irq(irq, td);
  590. err_tasklet_kill:
  591. tasklet_kill(&td->tasklet);
  592. iounmap(td->membase);
  593. err_free_mem:
  594. kfree(td);
  595. err_release_region:
  596. release_mem_region(iomem->start, resource_size(iomem));
  597. return err;
  598. }
  599. static int td_remove(struct platform_device *pdev)
  600. {
  601. struct timb_dma *td = platform_get_drvdata(pdev);
  602. struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  603. int irq = platform_get_irq(pdev, 0);
  604. dma_async_device_unregister(&td->dma);
  605. free_irq(irq, td);
  606. tasklet_kill(&td->tasklet);
  607. iounmap(td->membase);
  608. kfree(td);
  609. release_mem_region(iomem->start, resource_size(iomem));
  610. dev_dbg(&pdev->dev, "Removed...\n");
  611. return 0;
  612. }
  613. static struct platform_driver td_driver = {
  614. .driver = {
  615. .name = DRIVER_NAME,
  616. },
  617. .probe = td_probe,
  618. .remove = td_remove,
  619. };
  620. module_platform_driver(td_driver);
  621. MODULE_LICENSE("GPL v2");
  622. MODULE_DESCRIPTION("Timberdale DMA controller driver");
  623. MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
  624. MODULE_ALIAS("platform:"DRIVER_NAME);