sram.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /*
  2. * Generic on-chip SRAM allocation driver
  3. *
  4. * Copyright (C) 2012 Philipp Zabel, Pengutronix
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
  18. * MA 02110-1301, USA.
  19. */
  20. #include <linux/clk.h>
  21. #include <linux/genalloc.h>
  22. #include <linux/io.h>
  23. #include <linux/list_sort.h>
  24. #include <linux/of_address.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/slab.h>
  27. #define SRAM_GRANULARITY 32
  28. struct sram_dev {
  29. struct device *dev;
  30. void __iomem *virt_base;
  31. struct gen_pool *pool;
  32. struct clk *clk;
  33. };
  34. struct sram_reserve {
  35. struct list_head list;
  36. u32 start;
  37. u32 size;
  38. };
  39. static int sram_reserve_cmp(void *priv, struct list_head *a,
  40. struct list_head *b)
  41. {
  42. struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
  43. struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
  44. return ra->start - rb->start;
  45. }
  46. static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
  47. {
  48. struct device_node *np = sram->dev->of_node, *child;
  49. unsigned long size, cur_start, cur_size;
  50. struct sram_reserve *rblocks, *block;
  51. struct list_head reserve_list;
  52. unsigned int nblocks;
  53. int ret = 0;
  54. INIT_LIST_HEAD(&reserve_list);
  55. size = resource_size(res);
  56. /*
  57. * We need an additional block to mark the end of the memory region
  58. * after the reserved blocks from the dt are processed.
  59. */
  60. nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
  61. rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
  62. if (!rblocks)
  63. return -ENOMEM;
  64. block = &rblocks[0];
  65. for_each_available_child_of_node(np, child) {
  66. struct resource child_res;
  67. ret = of_address_to_resource(child, 0, &child_res);
  68. if (ret < 0) {
  69. dev_err(sram->dev,
  70. "could not get address for node %s\n",
  71. child->full_name);
  72. of_node_put(child);
  73. goto err_chunks;
  74. }
  75. if (child_res.start < res->start || child_res.end > res->end) {
  76. dev_err(sram->dev,
  77. "reserved block %s outside the sram area\n",
  78. child->full_name);
  79. ret = -EINVAL;
  80. of_node_put(child);
  81. goto err_chunks;
  82. }
  83. block->start = child_res.start - res->start;
  84. block->size = resource_size(&child_res);
  85. list_add_tail(&block->list, &reserve_list);
  86. dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
  87. block->start, block->start + block->size);
  88. block++;
  89. }
  90. /* the last chunk marks the end of the region */
  91. rblocks[nblocks - 1].start = size;
  92. rblocks[nblocks - 1].size = 0;
  93. list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
  94. list_sort(NULL, &reserve_list, sram_reserve_cmp);
  95. cur_start = 0;
  96. list_for_each_entry(block, &reserve_list, list) {
  97. /* can only happen if sections overlap */
  98. if (block->start < cur_start) {
  99. dev_err(sram->dev,
  100. "block at 0x%x starts after current offset 0x%lx\n",
  101. block->start, cur_start);
  102. ret = -EINVAL;
  103. goto err_chunks;
  104. }
  105. /* current start is in a reserved block, so continue after it */
  106. if (block->start == cur_start) {
  107. cur_start = block->start + block->size;
  108. continue;
  109. }
  110. /*
  111. * allocate the space between the current starting
  112. * address and the following reserved block, or the
  113. * end of the region.
  114. */
  115. cur_size = block->start - cur_start;
  116. dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
  117. cur_start, cur_start + cur_size);
  118. ret = gen_pool_add_virt(sram->pool,
  119. (unsigned long)sram->virt_base + cur_start,
  120. res->start + cur_start, cur_size, -1);
  121. if (ret < 0)
  122. goto err_chunks;
  123. /* next allocation after this reserved block */
  124. cur_start = block->start + block->size;
  125. }
  126. err_chunks:
  127. kfree(rblocks);
  128. return ret;
  129. }
  130. static int sram_probe(struct platform_device *pdev)
  131. {
  132. struct sram_dev *sram;
  133. struct resource *res;
  134. size_t size;
  135. int ret;
  136. sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
  137. if (!sram)
  138. return -ENOMEM;
  139. sram->dev = &pdev->dev;
  140. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  141. if (!res) {
  142. dev_err(sram->dev, "found no memory resource\n");
  143. return -EINVAL;
  144. }
  145. size = resource_size(res);
  146. if (!devm_request_mem_region(sram->dev, res->start, size, pdev->name)) {
  147. dev_err(sram->dev, "could not request region for resource\n");
  148. return -EBUSY;
  149. }
  150. sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
  151. if (IS_ERR(sram->virt_base))
  152. return PTR_ERR(sram->virt_base);
  153. sram->pool = devm_gen_pool_create(sram->dev,
  154. ilog2(SRAM_GRANULARITY), -1);
  155. if (!sram->pool)
  156. return -ENOMEM;
  157. ret = sram_reserve_regions(sram, res);
  158. if (ret)
  159. return ret;
  160. sram->clk = devm_clk_get(sram->dev, NULL);
  161. if (IS_ERR(sram->clk))
  162. sram->clk = NULL;
  163. else
  164. clk_prepare_enable(sram->clk);
  165. platform_set_drvdata(pdev, sram);
  166. dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
  167. gen_pool_size(sram->pool) / 1024, sram->virt_base);
  168. return 0;
  169. }
  170. static int sram_remove(struct platform_device *pdev)
  171. {
  172. struct sram_dev *sram = platform_get_drvdata(pdev);
  173. if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
  174. dev_err(sram->dev, "removed while SRAM allocated\n");
  175. if (sram->clk)
  176. clk_disable_unprepare(sram->clk);
  177. return 0;
  178. }
  179. #ifdef CONFIG_OF
  180. static const struct of_device_id sram_dt_ids[] = {
  181. { .compatible = "mmio-sram" },
  182. {}
  183. };
  184. #endif
  185. static struct platform_driver sram_driver = {
  186. .driver = {
  187. .name = "sram",
  188. .of_match_table = of_match_ptr(sram_dt_ids),
  189. },
  190. .probe = sram_probe,
  191. .remove = sram_remove,
  192. };
  193. static int __init sram_init(void)
  194. {
  195. return platform_driver_register(&sram_driver);
  196. }
  197. postcore_initcall(sram_init);