st_fdma.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890
  1. /*
  2. * DMA driver for STMicroelectronics STi FDMA controller
  3. *
  4. * Copyright (C) 2014 STMicroelectronics
  5. *
  6. * Author: Ludovic Barre <Ludovic.barre@st.com>
  7. * Peter Griffin <peter.griffin@linaro.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. */
  14. #include <linux/init.h>
  15. #include <linux/module.h>
  16. #include <linux/of_device.h>
  17. #include <linux/of_dma.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/remoteproc.h>
  21. #include "st_fdma.h"
  22. static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
  23. {
  24. return container_of(c, struct st_fdma_chan, vchan.chan);
  25. }
  26. static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
  27. {
  28. return container_of(vd, struct st_fdma_desc, vdesc);
  29. }
  30. static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
  31. {
  32. struct st_fdma_dev *fdev = fchan->fdev;
  33. u32 req_line_cfg = fchan->cfg.req_line;
  34. u32 dreq_line;
  35. int try = 0;
  36. /*
  37. * dreq_mask is shared for n channels of fdma, so all accesses must be
  38. * atomic. if the dreq_mask is changed between ffz and set_bit,
  39. * we retry
  40. */
  41. do {
  42. if (fdev->dreq_mask == ~0L) {
  43. dev_err(fdev->dev, "No req lines available\n");
  44. return -EINVAL;
  45. }
  46. if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
  47. dev_err(fdev->dev, "Invalid or used req line\n");
  48. return -EINVAL;
  49. } else {
  50. dreq_line = req_line_cfg;
  51. }
  52. try++;
  53. } while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
  54. dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
  55. dreq_line, fdev->dreq_mask);
  56. return dreq_line;
  57. }
  58. static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
  59. {
  60. struct st_fdma_dev *fdev = fchan->fdev;
  61. dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
  62. clear_bit(fchan->dreq_line, &fdev->dreq_mask);
  63. }
  64. static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
  65. {
  66. struct virt_dma_desc *vdesc;
  67. unsigned long nbytes, ch_cmd, cmd;
  68. vdesc = vchan_next_desc(&fchan->vchan);
  69. if (!vdesc)
  70. return;
  71. fchan->fdesc = to_st_fdma_desc(vdesc);
  72. nbytes = fchan->fdesc->node[0].desc->nbytes;
  73. cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
  74. ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
  75. /* start the channel for the descriptor */
  76. fnode_write(fchan, nbytes, FDMA_CNTN_OFST);
  77. fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST);
  78. writel(cmd,
  79. fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST);
  80. dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id);
  81. }
  82. static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
  83. unsigned long int_sta)
  84. {
  85. unsigned long ch_sta, ch_err;
  86. int ch_id = fchan->vchan.chan.chan_id;
  87. struct st_fdma_dev *fdev = fchan->fdev;
  88. ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST);
  89. ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
  90. ch_sta &= FDMA_CH_CMD_STA_MASK;
  91. if (int_sta & FDMA_INT_STA_ERR) {
  92. dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
  93. fchan->status = DMA_ERROR;
  94. return;
  95. }
  96. switch (ch_sta) {
  97. case FDMA_CH_CMD_STA_PAUSED:
  98. fchan->status = DMA_PAUSED;
  99. break;
  100. case FDMA_CH_CMD_STA_RUNNING:
  101. fchan->status = DMA_IN_PROGRESS;
  102. break;
  103. }
  104. }
  105. static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
  106. {
  107. struct st_fdma_dev *fdev = dev_id;
  108. irqreturn_t ret = IRQ_NONE;
  109. struct st_fdma_chan *fchan = &fdev->chans[0];
  110. unsigned long int_sta, clr;
  111. int_sta = fdma_read(fdev, FDMA_INT_STA_OFST);
  112. clr = int_sta;
  113. for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
  114. if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
  115. continue;
  116. spin_lock(&fchan->vchan.lock);
  117. st_fdma_ch_sta_update(fchan, int_sta);
  118. if (fchan->fdesc) {
  119. if (!fchan->fdesc->iscyclic) {
  120. list_del(&fchan->fdesc->vdesc.node);
  121. vchan_cookie_complete(&fchan->fdesc->vdesc);
  122. fchan->fdesc = NULL;
  123. fchan->status = DMA_COMPLETE;
  124. } else {
  125. vchan_cyclic_callback(&fchan->fdesc->vdesc);
  126. }
  127. /* Start the next descriptor (if available) */
  128. if (!fchan->fdesc)
  129. st_fdma_xfer_desc(fchan);
  130. }
  131. spin_unlock(&fchan->vchan.lock);
  132. ret = IRQ_HANDLED;
  133. }
  134. fdma_write(fdev, clr, FDMA_INT_CLR_OFST);
  135. return ret;
  136. }
  137. static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
  138. struct of_dma *ofdma)
  139. {
  140. struct st_fdma_dev *fdev = ofdma->of_dma_data;
  141. struct dma_chan *chan;
  142. struct st_fdma_chan *fchan;
  143. int ret;
  144. if (dma_spec->args_count < 1)
  145. return ERR_PTR(-EINVAL);
  146. if (fdev->dma_device.dev->of_node != dma_spec->np)
  147. return ERR_PTR(-EINVAL);
  148. ret = rproc_boot(fdev->slim_rproc->rproc);
  149. if (ret == -ENOENT)
  150. return ERR_PTR(-EPROBE_DEFER);
  151. else if (ret)
  152. return ERR_PTR(ret);
  153. chan = dma_get_any_slave_channel(&fdev->dma_device);
  154. if (!chan)
  155. goto err_chan;
  156. fchan = to_st_fdma_chan(chan);
  157. fchan->cfg.of_node = dma_spec->np;
  158. fchan->cfg.req_line = dma_spec->args[0];
  159. fchan->cfg.req_ctrl = 0;
  160. fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN;
  161. if (dma_spec->args_count > 1)
  162. fchan->cfg.req_ctrl = dma_spec->args[1]
  163. & FDMA_REQ_CTRL_CFG_MASK;
  164. if (dma_spec->args_count > 2)
  165. fchan->cfg.type = dma_spec->args[2];
  166. if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
  167. fchan->dreq_line = 0;
  168. } else {
  169. fchan->dreq_line = st_fdma_dreq_get(fchan);
  170. if (IS_ERR_VALUE(fchan->dreq_line)) {
  171. chan = ERR_PTR(fchan->dreq_line);
  172. goto err_chan;
  173. }
  174. }
  175. dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
  176. fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl);
  177. return chan;
  178. err_chan:
  179. rproc_shutdown(fdev->slim_rproc->rproc);
  180. return chan;
  181. }
  182. static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
  183. {
  184. struct st_fdma_desc *fdesc;
  185. int i;
  186. fdesc = to_st_fdma_desc(vdesc);
  187. for (i = 0; i < fdesc->n_nodes; i++)
  188. dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc,
  189. fdesc->node[i].pdesc);
  190. kfree(fdesc);
  191. }
  192. static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
  193. int sg_len)
  194. {
  195. struct st_fdma_desc *fdesc;
  196. int i;
  197. fdesc = kzalloc(sizeof(*fdesc) +
  198. sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT);
  199. if (!fdesc)
  200. return NULL;
  201. fdesc->fchan = fchan;
  202. fdesc->n_nodes = sg_len;
  203. for (i = 0; i < sg_len; i++) {
  204. fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
  205. GFP_NOWAIT, &fdesc->node[i].pdesc);
  206. if (!fdesc->node[i].desc)
  207. goto err;
  208. }
  209. return fdesc;
  210. err:
  211. while (--i >= 0)
  212. dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
  213. fdesc->node[i].pdesc);
  214. kfree(fdesc);
  215. return NULL;
  216. }
  217. static int st_fdma_alloc_chan_res(struct dma_chan *chan)
  218. {
  219. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  220. /* Create the dma pool for descriptor allocation */
  221. fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device),
  222. fchan->fdev->dev,
  223. sizeof(struct st_fdma_hw_node),
  224. __alignof__(struct st_fdma_hw_node),
  225. 0);
  226. if (!fchan->node_pool) {
  227. dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
  228. return -ENOMEM;
  229. }
  230. dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
  231. fchan->vchan.chan.chan_id, fchan->cfg.type);
  232. return 0;
  233. }
  234. static void st_fdma_free_chan_res(struct dma_chan *chan)
  235. {
  236. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  237. struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
  238. unsigned long flags;
  239. LIST_HEAD(head);
  240. dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
  241. __func__, fchan->vchan.chan.chan_id);
  242. if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
  243. st_fdma_dreq_put(fchan);
  244. spin_lock_irqsave(&fchan->vchan.lock, flags);
  245. fchan->fdesc = NULL;
  246. spin_unlock_irqrestore(&fchan->vchan.lock, flags);
  247. dma_pool_destroy(fchan->node_pool);
  248. fchan->node_pool = NULL;
  249. memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
  250. rproc_shutdown(rproc);
  251. }
  252. static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
  253. struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
  254. size_t len, unsigned long flags)
  255. {
  256. struct st_fdma_chan *fchan;
  257. struct st_fdma_desc *fdesc;
  258. struct st_fdma_hw_node *hw_node;
  259. if (!len)
  260. return NULL;
  261. fchan = to_st_fdma_chan(chan);
  262. /* We only require a single descriptor */
  263. fdesc = st_fdma_alloc_desc(fchan, 1);
  264. if (!fdesc) {
  265. dev_err(fchan->fdev->dev, "no memory for desc\n");
  266. return NULL;
  267. }
  268. hw_node = fdesc->node[0].desc;
  269. hw_node->next = 0;
  270. hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN;
  271. hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
  272. hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
  273. hw_node->control |= FDMA_NODE_CTRL_INT_EON;
  274. hw_node->nbytes = len;
  275. hw_node->saddr = src;
  276. hw_node->daddr = dst;
  277. hw_node->generic.length = len;
  278. hw_node->generic.sstride = 0;
  279. hw_node->generic.dstride = 0;
  280. return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
  281. }
  282. static int config_reqctrl(struct st_fdma_chan *fchan,
  283. enum dma_transfer_direction direction)
  284. {
  285. u32 maxburst = 0, addr = 0;
  286. enum dma_slave_buswidth width;
  287. int ch_id = fchan->vchan.chan.chan_id;
  288. struct st_fdma_dev *fdev = fchan->fdev;
  289. switch (direction) {
  290. case DMA_DEV_TO_MEM:
  291. fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR;
  292. maxburst = fchan->scfg.src_maxburst;
  293. width = fchan->scfg.src_addr_width;
  294. addr = fchan->scfg.src_addr;
  295. break;
  296. case DMA_MEM_TO_DEV:
  297. fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR;
  298. maxburst = fchan->scfg.dst_maxburst;
  299. width = fchan->scfg.dst_addr_width;
  300. addr = fchan->scfg.dst_addr;
  301. break;
  302. default:
  303. return -EINVAL;
  304. }
  305. fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK;
  306. switch (width) {
  307. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  308. fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1;
  309. break;
  310. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  311. fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2;
  312. break;
  313. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  314. fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4;
  315. break;
  316. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  317. fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8;
  318. break;
  319. default:
  320. return -EINVAL;
  321. }
  322. fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK;
  323. fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1);
  324. dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST);
  325. fchan->cfg.dev_addr = addr;
  326. fchan->cfg.dir = direction;
  327. dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
  328. ch_id, addr, fchan->cfg.req_ctrl);
  329. return 0;
  330. }
  331. static void fill_hw_node(struct st_fdma_hw_node *hw_node,
  332. struct st_fdma_chan *fchan,
  333. enum dma_transfer_direction direction)
  334. {
  335. if (direction == DMA_MEM_TO_DEV) {
  336. hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
  337. hw_node->control |= FDMA_NODE_CTRL_DST_STATIC;
  338. hw_node->daddr = fchan->cfg.dev_addr;
  339. } else {
  340. hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC;
  341. hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
  342. hw_node->saddr = fchan->cfg.dev_addr;
  343. }
  344. hw_node->generic.sstride = 0;
  345. hw_node->generic.dstride = 0;
  346. }
  347. static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan,
  348. size_t len, enum dma_transfer_direction direction)
  349. {
  350. struct st_fdma_chan *fchan;
  351. if (!chan || !len)
  352. return NULL;
  353. fchan = to_st_fdma_chan(chan);
  354. if (!is_slave_direction(direction)) {
  355. dev_err(fchan->fdev->dev, "bad direction?\n");
  356. return NULL;
  357. }
  358. return fchan;
  359. }
  360. static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
  361. struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
  362. size_t period_len, enum dma_transfer_direction direction,
  363. unsigned long flags)
  364. {
  365. struct st_fdma_chan *fchan;
  366. struct st_fdma_desc *fdesc;
  367. int sg_len, i;
  368. fchan = st_fdma_prep_common(chan, len, direction);
  369. if (!fchan)
  370. return NULL;
  371. if (!period_len)
  372. return NULL;
  373. if (config_reqctrl(fchan, direction)) {
  374. dev_err(fchan->fdev->dev, "bad width or direction\n");
  375. return NULL;
  376. }
  377. /* the buffer length must be a multiple of period_len */
  378. if (len % period_len != 0) {
  379. dev_err(fchan->fdev->dev, "len is not multiple of period\n");
  380. return NULL;
  381. }
  382. sg_len = len / period_len;
  383. fdesc = st_fdma_alloc_desc(fchan, sg_len);
  384. if (!fdesc) {
  385. dev_err(fchan->fdev->dev, "no memory for desc\n");
  386. return NULL;
  387. }
  388. fdesc->iscyclic = true;
  389. for (i = 0; i < sg_len; i++) {
  390. struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
  391. hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
  392. hw_node->control =
  393. FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
  394. hw_node->control |= FDMA_NODE_CTRL_INT_EON;
  395. fill_hw_node(hw_node, fchan, direction);
  396. if (direction == DMA_MEM_TO_DEV)
  397. hw_node->saddr = buf_addr + (i * period_len);
  398. else
  399. hw_node->daddr = buf_addr + (i * period_len);
  400. hw_node->nbytes = period_len;
  401. hw_node->generic.length = period_len;
  402. }
  403. return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
  404. }
  405. static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
  406. struct dma_chan *chan, struct scatterlist *sgl,
  407. unsigned int sg_len, enum dma_transfer_direction direction,
  408. unsigned long flags, void *context)
  409. {
  410. struct st_fdma_chan *fchan;
  411. struct st_fdma_desc *fdesc;
  412. struct st_fdma_hw_node *hw_node;
  413. struct scatterlist *sg;
  414. int i;
  415. fchan = st_fdma_prep_common(chan, sg_len, direction);
  416. if (!fchan)
  417. return NULL;
  418. if (!sgl)
  419. return NULL;
  420. fdesc = st_fdma_alloc_desc(fchan, sg_len);
  421. if (!fdesc) {
  422. dev_err(fchan->fdev->dev, "no memory for desc\n");
  423. return NULL;
  424. }
  425. fdesc->iscyclic = false;
  426. for_each_sg(sgl, sg, sg_len, i) {
  427. hw_node = fdesc->node[i].desc;
  428. hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
  429. hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
  430. fill_hw_node(hw_node, fchan, direction);
  431. if (direction == DMA_MEM_TO_DEV)
  432. hw_node->saddr = sg_dma_address(sg);
  433. else
  434. hw_node->daddr = sg_dma_address(sg);
  435. hw_node->nbytes = sg_dma_len(sg);
  436. hw_node->generic.length = sg_dma_len(sg);
  437. }
  438. /* interrupt at end of last node */
  439. hw_node->control |= FDMA_NODE_CTRL_INT_EON;
  440. return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
  441. }
  442. static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
  443. struct virt_dma_desc *vdesc,
  444. bool in_progress)
  445. {
  446. struct st_fdma_desc *fdesc = fchan->fdesc;
  447. size_t residue = 0;
  448. dma_addr_t cur_addr = 0;
  449. int i;
  450. if (in_progress) {
  451. cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST);
  452. cur_addr &= FDMA_CH_CMD_DATA_MASK;
  453. }
  454. for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
  455. if (cur_addr == fdesc->node[i].pdesc) {
  456. residue += fnode_read(fchan, FDMA_CNTN_OFST);
  457. break;
  458. }
  459. residue += fdesc->node[i].desc->nbytes;
  460. }
  461. return residue;
  462. }
  463. static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
  464. dma_cookie_t cookie,
  465. struct dma_tx_state *txstate)
  466. {
  467. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  468. struct virt_dma_desc *vd;
  469. enum dma_status ret;
  470. unsigned long flags;
  471. ret = dma_cookie_status(chan, cookie, txstate);
  472. if (ret == DMA_COMPLETE || !txstate)
  473. return ret;
  474. spin_lock_irqsave(&fchan->vchan.lock, flags);
  475. vd = vchan_find_desc(&fchan->vchan, cookie);
  476. if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
  477. txstate->residue = st_fdma_desc_residue(fchan, vd, true);
  478. else if (vd)
  479. txstate->residue = st_fdma_desc_residue(fchan, vd, false);
  480. else
  481. txstate->residue = 0;
  482. spin_unlock_irqrestore(&fchan->vchan.lock, flags);
  483. return ret;
  484. }
  485. static void st_fdma_issue_pending(struct dma_chan *chan)
  486. {
  487. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  488. unsigned long flags;
  489. spin_lock_irqsave(&fchan->vchan.lock, flags);
  490. if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
  491. st_fdma_xfer_desc(fchan);
  492. spin_unlock_irqrestore(&fchan->vchan.lock, flags);
  493. }
  494. static int st_fdma_pause(struct dma_chan *chan)
  495. {
  496. unsigned long flags;
  497. LIST_HEAD(head);
  498. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  499. int ch_id = fchan->vchan.chan.chan_id;
  500. unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
  501. dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
  502. spin_lock_irqsave(&fchan->vchan.lock, flags);
  503. if (fchan->fdesc)
  504. fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
  505. spin_unlock_irqrestore(&fchan->vchan.lock, flags);
  506. return 0;
  507. }
  508. static int st_fdma_resume(struct dma_chan *chan)
  509. {
  510. unsigned long flags;
  511. unsigned long val;
  512. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  513. int ch_id = fchan->vchan.chan.chan_id;
  514. dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
  515. spin_lock_irqsave(&fchan->vchan.lock, flags);
  516. if (fchan->fdesc) {
  517. val = fchan_read(fchan, FDMA_CH_CMD_OFST);
  518. val &= FDMA_CH_CMD_DATA_MASK;
  519. fchan_write(fchan, val, FDMA_CH_CMD_OFST);
  520. }
  521. spin_unlock_irqrestore(&fchan->vchan.lock, flags);
  522. return 0;
  523. }
  524. static int st_fdma_terminate_all(struct dma_chan *chan)
  525. {
  526. unsigned long flags;
  527. LIST_HEAD(head);
  528. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  529. int ch_id = fchan->vchan.chan.chan_id;
  530. unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
  531. dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
  532. spin_lock_irqsave(&fchan->vchan.lock, flags);
  533. fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
  534. fchan->fdesc = NULL;
  535. vchan_get_all_descriptors(&fchan->vchan, &head);
  536. spin_unlock_irqrestore(&fchan->vchan.lock, flags);
  537. vchan_dma_desc_free_list(&fchan->vchan, &head);
  538. return 0;
  539. }
  540. static int st_fdma_slave_config(struct dma_chan *chan,
  541. struct dma_slave_config *slave_cfg)
  542. {
  543. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  544. memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
  545. return 0;
  546. }
  547. static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
  548. .name = "STiH407",
  549. .id = 0,
  550. };
  551. static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
  552. .name = "STiH407",
  553. .id = 1,
  554. };
  555. static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
  556. .name = "STiH407",
  557. .id = 2,
  558. };
  559. static const struct of_device_id st_fdma_match[] = {
  560. { .compatible = "st,stih407-fdma-mpe31-11"
  561. , .data = &fdma_mpe31_stih407_11 },
  562. { .compatible = "st,stih407-fdma-mpe31-12"
  563. , .data = &fdma_mpe31_stih407_12 },
  564. { .compatible = "st,stih407-fdma-mpe31-13"
  565. , .data = &fdma_mpe31_stih407_13 },
  566. {},
  567. };
  568. MODULE_DEVICE_TABLE(of, st_fdma_match);
  569. static int st_fdma_parse_dt(struct platform_device *pdev,
  570. const struct st_fdma_driverdata *drvdata,
  571. struct st_fdma_dev *fdev)
  572. {
  573. snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
  574. drvdata->name, drvdata->id);
  575. return of_property_read_u32(pdev->dev.of_node, "dma-channels",
  576. &fdev->nr_channels);
  577. }
  578. #define FDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  579. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  580. BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
  581. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  582. static void st_fdma_free(struct st_fdma_dev *fdev)
  583. {
  584. struct st_fdma_chan *fchan;
  585. int i;
  586. for (i = 0; i < fdev->nr_channels; i++) {
  587. fchan = &fdev->chans[i];
  588. list_del(&fchan->vchan.chan.device_node);
  589. tasklet_kill(&fchan->vchan.task);
  590. }
  591. }
  592. static int st_fdma_probe(struct platform_device *pdev)
  593. {
  594. struct st_fdma_dev *fdev;
  595. const struct of_device_id *match;
  596. struct device_node *np = pdev->dev.of_node;
  597. const struct st_fdma_driverdata *drvdata;
  598. int ret, i;
  599. match = of_match_device((st_fdma_match), &pdev->dev);
  600. if (!match || !match->data) {
  601. dev_err(&pdev->dev, "No device match found\n");
  602. return -ENODEV;
  603. }
  604. drvdata = match->data;
  605. fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
  606. if (!fdev)
  607. return -ENOMEM;
  608. ret = st_fdma_parse_dt(pdev, drvdata, fdev);
  609. if (ret) {
  610. dev_err(&pdev->dev, "unable to find platform data\n");
  611. goto err;
  612. }
  613. fdev->chans = devm_kcalloc(&pdev->dev, fdev->nr_channels,
  614. sizeof(struct st_fdma_chan), GFP_KERNEL);
  615. if (!fdev->chans)
  616. return -ENOMEM;
  617. fdev->dev = &pdev->dev;
  618. fdev->drvdata = drvdata;
  619. platform_set_drvdata(pdev, fdev);
  620. fdev->irq = platform_get_irq(pdev, 0);
  621. if (fdev->irq < 0) {
  622. dev_err(&pdev->dev, "Failed to get irq resource\n");
  623. return -EINVAL;
  624. }
  625. ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
  626. dev_name(&pdev->dev), fdev);
  627. if (ret) {
  628. dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret);
  629. goto err;
  630. }
  631. fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name);
  632. if (IS_ERR(fdev->slim_rproc)) {
  633. ret = PTR_ERR(fdev->slim_rproc);
  634. dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret);
  635. goto err;
  636. }
  637. /* Initialise list of FDMA channels */
  638. INIT_LIST_HEAD(&fdev->dma_device.channels);
  639. for (i = 0; i < fdev->nr_channels; i++) {
  640. struct st_fdma_chan *fchan = &fdev->chans[i];
  641. fchan->fdev = fdev;
  642. fchan->vchan.desc_free = st_fdma_free_desc;
  643. vchan_init(&fchan->vchan, &fdev->dma_device);
  644. }
  645. /* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */
  646. fdev->dreq_mask = BIT(0) | BIT(31);
  647. dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
  648. dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
  649. dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
  650. fdev->dma_device.dev = &pdev->dev;
  651. fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res;
  652. fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res;
  653. fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic;
  654. fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
  655. fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy;
  656. fdev->dma_device.device_tx_status = st_fdma_tx_status;
  657. fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
  658. fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
  659. fdev->dma_device.device_config = st_fdma_slave_config;
  660. fdev->dma_device.device_pause = st_fdma_pause;
  661. fdev->dma_device.device_resume = st_fdma_resume;
  662. fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
  663. fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
  664. fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  665. fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  666. ret = dma_async_device_register(&fdev->dma_device);
  667. if (ret) {
  668. dev_err(&pdev->dev,
  669. "Failed to register DMA device (%d)\n", ret);
  670. goto err_rproc;
  671. }
  672. ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
  673. if (ret) {
  674. dev_err(&pdev->dev,
  675. "Failed to register controller (%d)\n", ret);
  676. goto err_dma_dev;
  677. }
  678. dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
  679. return 0;
  680. err_dma_dev:
  681. dma_async_device_unregister(&fdev->dma_device);
  682. err_rproc:
  683. st_fdma_free(fdev);
  684. st_slim_rproc_put(fdev->slim_rproc);
  685. err:
  686. return ret;
  687. }
  688. static int st_fdma_remove(struct platform_device *pdev)
  689. {
  690. struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
  691. devm_free_irq(&pdev->dev, fdev->irq, fdev);
  692. st_slim_rproc_put(fdev->slim_rproc);
  693. of_dma_controller_free(pdev->dev.of_node);
  694. dma_async_device_unregister(&fdev->dma_device);
  695. return 0;
  696. }
  697. static struct platform_driver st_fdma_platform_driver = {
  698. .driver = {
  699. .name = DRIVER_NAME,
  700. .of_match_table = st_fdma_match,
  701. },
  702. .probe = st_fdma_probe,
  703. .remove = st_fdma_remove,
  704. };
  705. module_platform_driver(st_fdma_platform_driver);
  706. MODULE_LICENSE("GPL v2");
  707. MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
  708. MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
  709. MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
  710. MODULE_ALIAS("platform: " DRIVER_NAME);