stm32-dmamux.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. /*
  2. *
  3. * Copyright (C) STMicroelectronics SA 2017
  4. * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
  5. * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
  6. *
  7. * License terms: GPL V2.0.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License version 2 as published by
  11. * the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
  16. * details.
  17. *
  18. * DMA Router driver for STM32 DMA MUX
  19. *
  20. * Based on TI DMA Crossbar driver
  21. *
  22. */
  23. #include <linux/clk.h>
  24. #include <linux/delay.h>
  25. #include <linux/err.h>
  26. #include <linux/init.h>
  27. #include <linux/module.h>
  28. #include <linux/of_device.h>
  29. #include <linux/of_dma.h>
  30. #include <linux/reset.h>
  31. #include <linux/slab.h>
  32. #include <linux/spinlock.h>
  33. #define STM32_DMAMUX_CCR(x) (0x4 * (x))
  34. #define STM32_DMAMUX_MAX_DMA_REQUESTS 32
  35. #define STM32_DMAMUX_MAX_REQUESTS 255
  36. struct stm32_dmamux {
  37. u32 master;
  38. u32 request;
  39. u32 chan_id;
  40. };
  41. struct stm32_dmamux_data {
  42. struct dma_router dmarouter;
  43. struct clk *clk;
  44. struct reset_control *rst;
  45. void __iomem *iomem;
  46. u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
  47. u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
  48. spinlock_t lock; /* Protects register access */
  49. unsigned long *dma_inuse; /* Used DMA channel */
  50. u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
  51. * [0] holds number of DMA Masters.
  52. * To be kept at very end end of this structure
  53. */
  54. };
  55. static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg)
  56. {
  57. return readl_relaxed(iomem + reg);
  58. }
  59. static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val)
  60. {
  61. writel_relaxed(val, iomem + reg);
  62. }
  63. static void stm32_dmamux_free(struct device *dev, void *route_data)
  64. {
  65. struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev);
  66. struct stm32_dmamux *mux = route_data;
  67. unsigned long flags;
  68. /* Clear dma request */
  69. spin_lock_irqsave(&dmamux->lock, flags);
  70. stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
  71. clear_bit(mux->chan_id, dmamux->dma_inuse);
  72. if (!IS_ERR(dmamux->clk))
  73. clk_disable(dmamux->clk);
  74. spin_unlock_irqrestore(&dmamux->lock, flags);
  75. dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n",
  76. mux->request, mux->master, mux->chan_id);
  77. kfree(mux);
  78. }
  79. static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
  80. struct of_dma *ofdma)
  81. {
  82. struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
  83. struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
  84. struct stm32_dmamux *mux;
  85. u32 i, min, max;
  86. int ret;
  87. unsigned long flags;
  88. if (dma_spec->args_count != 3) {
  89. dev_err(&pdev->dev, "invalid number of dma mux args\n");
  90. return ERR_PTR(-EINVAL);
  91. }
  92. if (dma_spec->args[0] > dmamux->dmamux_requests) {
  93. dev_err(&pdev->dev, "invalid mux request number: %d\n",
  94. dma_spec->args[0]);
  95. return ERR_PTR(-EINVAL);
  96. }
  97. mux = kzalloc(sizeof(*mux), GFP_KERNEL);
  98. if (!mux)
  99. return ERR_PTR(-ENOMEM);
  100. spin_lock_irqsave(&dmamux->lock, flags);
  101. mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
  102. dmamux->dma_requests);
  103. if (mux->chan_id == dmamux->dma_requests) {
  104. spin_unlock_irqrestore(&dmamux->lock, flags);
  105. dev_err(&pdev->dev, "Run out of free DMA requests\n");
  106. ret = -ENOMEM;
  107. goto error_chan_id;
  108. }
  109. set_bit(mux->chan_id, dmamux->dma_inuse);
  110. spin_unlock_irqrestore(&dmamux->lock, flags);
  111. /* Look for DMA Master */
  112. for (i = 1, min = 0, max = dmamux->dma_reqs[i];
  113. i <= dmamux->dma_reqs[0];
  114. min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i])
  115. if (mux->chan_id < max)
  116. break;
  117. mux->master = i - 1;
  118. /* The of_node_put() will be done in of_dma_router_xlate function */
  119. dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
  120. if (!dma_spec->np) {
  121. dev_err(&pdev->dev, "can't get dma master\n");
  122. ret = -EINVAL;
  123. goto error;
  124. }
  125. /* Set dma request */
  126. spin_lock_irqsave(&dmamux->lock, flags);
  127. if (!IS_ERR(dmamux->clk)) {
  128. ret = clk_enable(dmamux->clk);
  129. if (ret < 0) {
  130. spin_unlock_irqrestore(&dmamux->lock, flags);
  131. dev_err(&pdev->dev, "clk_prep_enable issue: %d\n", ret);
  132. goto error;
  133. }
  134. }
  135. spin_unlock_irqrestore(&dmamux->lock, flags);
  136. mux->request = dma_spec->args[0];
  137. /* craft DMA spec */
  138. dma_spec->args[3] = dma_spec->args[2];
  139. dma_spec->args[2] = dma_spec->args[1];
  140. dma_spec->args[1] = 0;
  141. dma_spec->args[0] = mux->chan_id - min;
  142. dma_spec->args_count = 4;
  143. stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id),
  144. mux->request);
  145. dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
  146. mux->request, mux->master, mux->chan_id);
  147. return mux;
  148. error:
  149. clear_bit(mux->chan_id, dmamux->dma_inuse);
  150. error_chan_id:
  151. kfree(mux);
  152. return ERR_PTR(ret);
  153. }
  154. static const struct of_device_id stm32_stm32dma_master_match[] = {
  155. { .compatible = "st,stm32-dma", },
  156. {},
  157. };
  158. static int stm32_dmamux_probe(struct platform_device *pdev)
  159. {
  160. struct device_node *node = pdev->dev.of_node;
  161. const struct of_device_id *match;
  162. struct device_node *dma_node;
  163. struct stm32_dmamux_data *stm32_dmamux;
  164. struct resource *res;
  165. void __iomem *iomem;
  166. int i, count, ret;
  167. u32 dma_req;
  168. if (!node)
  169. return -ENODEV;
  170. count = device_property_read_u32_array(&pdev->dev, "dma-masters",
  171. NULL, 0);
  172. if (count < 0) {
  173. dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
  174. return -ENODEV;
  175. }
  176. stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) +
  177. sizeof(u32) * (count + 1), GFP_KERNEL);
  178. if (!stm32_dmamux)
  179. return -ENOMEM;
  180. dma_req = 0;
  181. for (i = 1; i <= count; i++) {
  182. dma_node = of_parse_phandle(node, "dma-masters", i - 1);
  183. match = of_match_node(stm32_stm32dma_master_match, dma_node);
  184. if (!match) {
  185. dev_err(&pdev->dev, "DMA master is not supported\n");
  186. of_node_put(dma_node);
  187. return -EINVAL;
  188. }
  189. if (of_property_read_u32(dma_node, "dma-requests",
  190. &stm32_dmamux->dma_reqs[i])) {
  191. dev_info(&pdev->dev,
  192. "Missing MUX output information, using %u.\n",
  193. STM32_DMAMUX_MAX_DMA_REQUESTS);
  194. stm32_dmamux->dma_reqs[i] =
  195. STM32_DMAMUX_MAX_DMA_REQUESTS;
  196. }
  197. dma_req += stm32_dmamux->dma_reqs[i];
  198. of_node_put(dma_node);
  199. }
  200. if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) {
  201. dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n");
  202. return -ENODEV;
  203. }
  204. stm32_dmamux->dma_requests = dma_req;
  205. stm32_dmamux->dma_reqs[0] = count;
  206. stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev,
  207. BITS_TO_LONGS(dma_req),
  208. sizeof(unsigned long),
  209. GFP_KERNEL);
  210. if (!stm32_dmamux->dma_inuse)
  211. return -ENOMEM;
  212. if (device_property_read_u32(&pdev->dev, "dma-requests",
  213. &stm32_dmamux->dmamux_requests)) {
  214. stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS;
  215. dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
  216. stm32_dmamux->dmamux_requests);
  217. }
  218. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  219. iomem = devm_ioremap_resource(&pdev->dev, res);
  220. if (IS_ERR(iomem))
  221. return PTR_ERR(iomem);
  222. spin_lock_init(&stm32_dmamux->lock);
  223. stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
  224. if (IS_ERR(stm32_dmamux->clk)) {
  225. ret = PTR_ERR(stm32_dmamux->clk);
  226. if (ret == -EPROBE_DEFER)
  227. dev_info(&pdev->dev, "Missing controller clock\n");
  228. return ret;
  229. }
  230. stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL);
  231. if (!IS_ERR(stm32_dmamux->rst)) {
  232. reset_control_assert(stm32_dmamux->rst);
  233. udelay(2);
  234. reset_control_deassert(stm32_dmamux->rst);
  235. }
  236. stm32_dmamux->iomem = iomem;
  237. stm32_dmamux->dmarouter.dev = &pdev->dev;
  238. stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
  239. platform_set_drvdata(pdev, stm32_dmamux);
  240. if (!IS_ERR(stm32_dmamux->clk)) {
  241. ret = clk_prepare_enable(stm32_dmamux->clk);
  242. if (ret < 0) {
  243. dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
  244. return ret;
  245. }
  246. }
  247. /* Reset the dmamux */
  248. for (i = 0; i < stm32_dmamux->dma_requests; i++)
  249. stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
  250. if (!IS_ERR(stm32_dmamux->clk))
  251. clk_disable(stm32_dmamux->clk);
  252. return of_dma_router_register(node, stm32_dmamux_route_allocate,
  253. &stm32_dmamux->dmarouter);
  254. }
  255. static const struct of_device_id stm32_dmamux_match[] = {
  256. { .compatible = "st,stm32h7-dmamux" },
  257. {},
  258. };
  259. static struct platform_driver stm32_dmamux_driver = {
  260. .probe = stm32_dmamux_probe,
  261. .driver = {
  262. .name = "stm32-dmamux",
  263. .of_match_table = stm32_dmamux_match,
  264. },
  265. };
  266. static int __init stm32_dmamux_init(void)
  267. {
  268. return platform_driver_register(&stm32_dmamux_driver);
  269. }
  270. arch_initcall(stm32_dmamux_init);
  271. MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
  272. MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
  273. MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
  274. MODULE_LICENSE("GPL v2");