platform.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. /*
  2. * Platform driver for the Synopsys DesignWare DMA Controller
  3. *
  4. * Copyright (C) 2007-2008 Atmel Corporation
  5. * Copyright (C) 2010-2011 ST Microelectronics
  6. * Copyright (C) 2013 Intel Corporation
  7. *
  8. * Some parts of this driver are derived from the original dw_dmac.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/device.h>
  16. #include <linux/clk.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/dmaengine.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/of.h>
  22. #include <linux/of_dma.h>
  23. #include <linux/acpi.h>
  24. #include <linux/acpi_dma.h>
  25. #include "internal.h"
  26. #define DRV_NAME "dw_dmac"
  27. static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
  28. struct of_dma *ofdma)
  29. {
  30. struct dw_dma *dw = ofdma->of_dma_data;
  31. struct dw_dma_slave slave = {
  32. .dma_dev = dw->dma.dev,
  33. };
  34. dma_cap_mask_t cap;
  35. if (dma_spec->args_count != 3)
  36. return NULL;
  37. slave.src_id = dma_spec->args[0];
  38. slave.dst_id = dma_spec->args[0];
  39. slave.m_master = dma_spec->args[1];
  40. slave.p_master = dma_spec->args[2];
  41. if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
  42. slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
  43. slave.m_master >= dw->pdata->nr_masters ||
  44. slave.p_master >= dw->pdata->nr_masters))
  45. return NULL;
  46. dma_cap_zero(cap);
  47. dma_cap_set(DMA_SLAVE, cap);
  48. /* TODO: there should be a simpler way to do this */
  49. return dma_request_channel(cap, dw_dma_filter, &slave);
  50. }
  51. #ifdef CONFIG_ACPI
  52. static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
  53. {
  54. struct acpi_dma_spec *dma_spec = param;
  55. struct dw_dma_slave slave = {
  56. .dma_dev = dma_spec->dev,
  57. .src_id = dma_spec->slave_id,
  58. .dst_id = dma_spec->slave_id,
  59. .m_master = 0,
  60. .p_master = 1,
  61. };
  62. return dw_dma_filter(chan, &slave);
  63. }
  64. static void dw_dma_acpi_controller_register(struct dw_dma *dw)
  65. {
  66. struct device *dev = dw->dma.dev;
  67. struct acpi_dma_filter_info *info;
  68. int ret;
  69. info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
  70. if (!info)
  71. return;
  72. dma_cap_zero(info->dma_cap);
  73. dma_cap_set(DMA_SLAVE, info->dma_cap);
  74. info->filter_fn = dw_dma_acpi_filter;
  75. ret = acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
  76. if (ret)
  77. dev_err(dev, "could not register acpi_dma_controller\n");
  78. }
  79. static void dw_dma_acpi_controller_free(struct dw_dma *dw)
  80. {
  81. struct device *dev = dw->dma.dev;
  82. acpi_dma_controller_free(dev);
  83. }
  84. #else /* !CONFIG_ACPI */
  85. static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
  86. static inline void dw_dma_acpi_controller_free(struct dw_dma *dw) {}
  87. #endif /* !CONFIG_ACPI */
  88. #ifdef CONFIG_OF
  89. static struct dw_dma_platform_data *
  90. dw_dma_parse_dt(struct platform_device *pdev)
  91. {
  92. struct device_node *np = pdev->dev.of_node;
  93. struct dw_dma_platform_data *pdata;
  94. u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
  95. u32 nr_masters;
  96. u32 nr_channels;
  97. if (!np) {
  98. dev_err(&pdev->dev, "Missing DT data\n");
  99. return NULL;
  100. }
  101. if (of_property_read_u32(np, "dma-masters", &nr_masters))
  102. return NULL;
  103. if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
  104. return NULL;
  105. if (of_property_read_u32(np, "dma-channels", &nr_channels))
  106. return NULL;
  107. if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
  108. return NULL;
  109. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  110. if (!pdata)
  111. return NULL;
  112. pdata->nr_masters = nr_masters;
  113. pdata->nr_channels = nr_channels;
  114. if (of_property_read_bool(np, "is_private"))
  115. pdata->is_private = true;
  116. /*
  117. * All known devices, which use DT for configuration, support
  118. * memory-to-memory transfers. So enable it by default.
  119. */
  120. pdata->is_memcpy = true;
  121. if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
  122. pdata->chan_allocation_order = (unsigned char)tmp;
  123. if (!of_property_read_u32(np, "chan_priority", &tmp))
  124. pdata->chan_priority = tmp;
  125. if (!of_property_read_u32(np, "block_size", &tmp))
  126. pdata->block_size = tmp;
  127. if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
  128. for (tmp = 0; tmp < nr_masters; tmp++)
  129. pdata->data_width[tmp] = arr[tmp];
  130. } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
  131. for (tmp = 0; tmp < nr_masters; tmp++)
  132. pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
  133. }
  134. if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
  135. for (tmp = 0; tmp < nr_channels; tmp++)
  136. pdata->multi_block[tmp] = mb[tmp];
  137. } else {
  138. for (tmp = 0; tmp < nr_channels; tmp++)
  139. pdata->multi_block[tmp] = 1;
  140. }
  141. if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
  142. if (tmp > CHAN_PROTCTL_MASK)
  143. return NULL;
  144. pdata->protctl = tmp;
  145. }
  146. return pdata;
  147. }
  148. #else
  149. static inline struct dw_dma_platform_data *
  150. dw_dma_parse_dt(struct platform_device *pdev)
  151. {
  152. return NULL;
  153. }
  154. #endif
  155. static int dw_probe(struct platform_device *pdev)
  156. {
  157. struct dw_dma_chip *chip;
  158. struct device *dev = &pdev->dev;
  159. struct resource *mem;
  160. const struct dw_dma_platform_data *pdata;
  161. int err;
  162. chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
  163. if (!chip)
  164. return -ENOMEM;
  165. chip->irq = platform_get_irq(pdev, 0);
  166. if (chip->irq < 0)
  167. return chip->irq;
  168. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  169. chip->regs = devm_ioremap_resource(dev, mem);
  170. if (IS_ERR(chip->regs))
  171. return PTR_ERR(chip->regs);
  172. err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  173. if (err)
  174. return err;
  175. pdata = dev_get_platdata(dev);
  176. if (!pdata)
  177. pdata = dw_dma_parse_dt(pdev);
  178. chip->dev = dev;
  179. chip->id = pdev->id;
  180. chip->pdata = pdata;
  181. chip->clk = devm_clk_get(chip->dev, "hclk");
  182. if (IS_ERR(chip->clk))
  183. return PTR_ERR(chip->clk);
  184. err = clk_prepare_enable(chip->clk);
  185. if (err)
  186. return err;
  187. pm_runtime_enable(&pdev->dev);
  188. err = dw_dma_probe(chip);
  189. if (err)
  190. goto err_dw_dma_probe;
  191. platform_set_drvdata(pdev, chip);
  192. if (pdev->dev.of_node) {
  193. err = of_dma_controller_register(pdev->dev.of_node,
  194. dw_dma_of_xlate, chip->dw);
  195. if (err)
  196. dev_err(&pdev->dev,
  197. "could not register of_dma_controller\n");
  198. }
  199. if (ACPI_HANDLE(&pdev->dev))
  200. dw_dma_acpi_controller_register(chip->dw);
  201. return 0;
  202. err_dw_dma_probe:
  203. pm_runtime_disable(&pdev->dev);
  204. clk_disable_unprepare(chip->clk);
  205. return err;
  206. }
  207. static int dw_remove(struct platform_device *pdev)
  208. {
  209. struct dw_dma_chip *chip = platform_get_drvdata(pdev);
  210. if (ACPI_HANDLE(&pdev->dev))
  211. dw_dma_acpi_controller_free(chip->dw);
  212. if (pdev->dev.of_node)
  213. of_dma_controller_free(pdev->dev.of_node);
  214. dw_dma_remove(chip);
  215. pm_runtime_disable(&pdev->dev);
  216. clk_disable_unprepare(chip->clk);
  217. return 0;
  218. }
  219. static void dw_shutdown(struct platform_device *pdev)
  220. {
  221. struct dw_dma_chip *chip = platform_get_drvdata(pdev);
  222. /*
  223. * We have to call dw_dma_disable() to stop any ongoing transfer. On
  224. * some platforms we can't do that since DMA device is powered off.
  225. * Moreover we have no possibility to check if the platform is affected
  226. * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
  227. * unconditionally. On the other hand we can't use
  228. * pm_runtime_suspended() because runtime PM framework is not fully
  229. * used by the driver.
  230. */
  231. pm_runtime_get_sync(chip->dev);
  232. dw_dma_disable(chip);
  233. pm_runtime_put_sync_suspend(chip->dev);
  234. clk_disable_unprepare(chip->clk);
  235. }
  236. #ifdef CONFIG_OF
  237. static const struct of_device_id dw_dma_of_id_table[] = {
  238. { .compatible = "snps,dma-spear1340" },
  239. {}
  240. };
  241. MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
  242. #endif
  243. #ifdef CONFIG_ACPI
  244. static const struct acpi_device_id dw_dma_acpi_id_table[] = {
  245. { "INTL9C60", 0 },
  246. { }
  247. };
  248. MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
  249. #endif
  250. #ifdef CONFIG_PM_SLEEP
  251. static int dw_suspend_late(struct device *dev)
  252. {
  253. struct dw_dma_chip *chip = dev_get_drvdata(dev);
  254. dw_dma_disable(chip);
  255. clk_disable_unprepare(chip->clk);
  256. return 0;
  257. }
  258. static int dw_resume_early(struct device *dev)
  259. {
  260. struct dw_dma_chip *chip = dev_get_drvdata(dev);
  261. int ret;
  262. ret = clk_prepare_enable(chip->clk);
  263. if (ret)
  264. return ret;
  265. return dw_dma_enable(chip);
  266. }
  267. #endif /* CONFIG_PM_SLEEP */
  268. static const struct dev_pm_ops dw_dev_pm_ops = {
  269. SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
  270. };
  271. static struct platform_driver dw_driver = {
  272. .probe = dw_probe,
  273. .remove = dw_remove,
  274. .shutdown = dw_shutdown,
  275. .driver = {
  276. .name = DRV_NAME,
  277. .pm = &dw_dev_pm_ops,
  278. .of_match_table = of_match_ptr(dw_dma_of_id_table),
  279. .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
  280. },
  281. };
  282. static int __init dw_init(void)
  283. {
  284. return platform_driver_register(&dw_driver);
  285. }
  286. subsys_initcall(dw_init);
  287. static void __exit dw_exit(void)
  288. {
  289. platform_driver_unregister(&dw_driver);
  290. }
  291. module_exit(dw_exit);
  292. MODULE_LICENSE("GPL v2");
  293. MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
  294. MODULE_ALIAS("platform:" DRV_NAME);