of_iommu.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * OF helpers for IOMMU
  3. *
  4. * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc.,
  17. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18. */
  19. #include <linux/export.h>
  20. #include <linux/iommu.h>
  21. #include <linux/limits.h>
  22. #include <linux/of.h>
  23. #include <linux/of_iommu.h>
  24. #include <linux/of_pci.h>
  25. #include <linux/slab.h>
  26. static const struct of_device_id __iommu_of_table_sentinel
  27. __used __section(__iommu_of_table_end);
  28. /**
  29. * of_get_dma_window - Parse *dma-window property and returns 0 if found.
  30. *
  31. * @dn: device node
  32. * @prefix: prefix for property name if any
  33. * @index: index to start to parse
  34. * @busno: Returns busno if supported. Otherwise pass NULL
  35. * @addr: Returns address that DMA starts
  36. * @size: Returns the range that DMA can handle
  37. *
  38. * This supports different formats flexibly. "prefix" can be
  39. * configured if any. "busno" and "index" are optionally
  40. * specified. Set 0(or NULL) if not used.
  41. */
  42. int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
  43. unsigned long *busno, dma_addr_t *addr, size_t *size)
  44. {
  45. const __be32 *dma_window, *end;
  46. int bytes, cur_index = 0;
  47. char propname[NAME_MAX], addrname[NAME_MAX], sizename[NAME_MAX];
  48. if (!dn || !addr || !size)
  49. return -EINVAL;
  50. if (!prefix)
  51. prefix = "";
  52. snprintf(propname, sizeof(propname), "%sdma-window", prefix);
  53. snprintf(addrname, sizeof(addrname), "%s#dma-address-cells", prefix);
  54. snprintf(sizename, sizeof(sizename), "%s#dma-size-cells", prefix);
  55. dma_window = of_get_property(dn, propname, &bytes);
  56. if (!dma_window)
  57. return -ENODEV;
  58. end = dma_window + bytes / sizeof(*dma_window);
  59. while (dma_window < end) {
  60. u32 cells;
  61. const void *prop;
  62. /* busno is one cell if supported */
  63. if (busno)
  64. *busno = be32_to_cpup(dma_window++);
  65. prop = of_get_property(dn, addrname, NULL);
  66. if (!prop)
  67. prop = of_get_property(dn, "#address-cells", NULL);
  68. cells = prop ? be32_to_cpup(prop) : of_n_addr_cells(dn);
  69. if (!cells)
  70. return -EINVAL;
  71. *addr = of_read_number(dma_window, cells);
  72. dma_window += cells;
  73. prop = of_get_property(dn, sizename, NULL);
  74. cells = prop ? be32_to_cpup(prop) : of_n_size_cells(dn);
  75. if (!cells)
  76. return -EINVAL;
  77. *size = of_read_number(dma_window, cells);
  78. dma_window += cells;
  79. if (cur_index++ == index)
  80. break;
  81. }
  82. return 0;
  83. }
  84. EXPORT_SYMBOL_GPL(of_get_dma_window);
  85. struct of_iommu_node {
  86. struct list_head list;
  87. struct device_node *np;
  88. const struct iommu_ops *ops;
  89. };
  90. static LIST_HEAD(of_iommu_list);
  91. static DEFINE_SPINLOCK(of_iommu_lock);
  92. void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops)
  93. {
  94. struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
  95. if (WARN_ON(!iommu))
  96. return;
  97. of_node_get(np);
  98. INIT_LIST_HEAD(&iommu->list);
  99. iommu->np = np;
  100. iommu->ops = ops;
  101. spin_lock(&of_iommu_lock);
  102. list_add_tail(&iommu->list, &of_iommu_list);
  103. spin_unlock(&of_iommu_lock);
  104. }
  105. const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
  106. {
  107. struct of_iommu_node *node;
  108. const struct iommu_ops *ops = NULL;
  109. spin_lock(&of_iommu_lock);
  110. list_for_each_entry(node, &of_iommu_list, list)
  111. if (node->np == np) {
  112. ops = node->ops;
  113. break;
  114. }
  115. spin_unlock(&of_iommu_lock);
  116. return ops;
  117. }
  118. static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
  119. {
  120. struct of_phandle_args *iommu_spec = data;
  121. iommu_spec->args[0] = alias;
  122. return iommu_spec->np == pdev->bus->dev.of_node;
  123. }
  124. static const struct iommu_ops
  125. *of_pci_iommu_configure(struct pci_dev *pdev, struct device_node *bridge_np)
  126. {
  127. const struct iommu_ops *ops;
  128. struct of_phandle_args iommu_spec;
  129. /*
  130. * Start by tracing the RID alias down the PCI topology as
  131. * far as the host bridge whose OF node we have...
  132. * (we're not even attempting to handle multi-alias devices yet)
  133. */
  134. iommu_spec.args_count = 1;
  135. iommu_spec.np = bridge_np;
  136. pci_for_each_dma_alias(pdev, __get_pci_rid, &iommu_spec);
  137. /*
  138. * ...then find out what that becomes once it escapes the PCI
  139. * bus into the system beyond, and which IOMMU it ends up at.
  140. */
  141. iommu_spec.np = NULL;
  142. if (of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map",
  143. "iommu-map-mask", &iommu_spec.np, iommu_spec.args))
  144. return NULL;
  145. ops = of_iommu_get_ops(iommu_spec.np);
  146. if (!ops || !ops->of_xlate ||
  147. iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
  148. ops->of_xlate(&pdev->dev, &iommu_spec))
  149. ops = NULL;
  150. of_node_put(iommu_spec.np);
  151. return ops;
  152. }
  153. const struct iommu_ops *of_iommu_configure(struct device *dev,
  154. struct device_node *master_np)
  155. {
  156. struct of_phandle_args iommu_spec;
  157. struct device_node *np;
  158. const struct iommu_ops *ops = NULL;
  159. int idx = 0;
  160. if (dev_is_pci(dev))
  161. return of_pci_iommu_configure(to_pci_dev(dev), master_np);
  162. /*
  163. * We don't currently walk up the tree looking for a parent IOMMU.
  164. * See the `Notes:' section of
  165. * Documentation/devicetree/bindings/iommu/iommu.txt
  166. */
  167. while (!of_parse_phandle_with_args(master_np, "iommus",
  168. "#iommu-cells", idx,
  169. &iommu_spec)) {
  170. np = iommu_spec.np;
  171. ops = of_iommu_get_ops(np);
  172. if (!ops || !ops->of_xlate ||
  173. iommu_fwspec_init(dev, &np->fwnode, ops) ||
  174. ops->of_xlate(dev, &iommu_spec))
  175. goto err_put_node;
  176. of_node_put(np);
  177. idx++;
  178. }
  179. return ops;
  180. err_put_node:
  181. of_node_put(np);
  182. return NULL;
  183. }
  184. static int __init of_iommu_init(void)
  185. {
  186. struct device_node *np;
  187. const struct of_device_id *match, *matches = &__iommu_of_table;
  188. for_each_matching_node_and_match(np, matches, &match) {
  189. const of_iommu_init_fn init_fn = match->data;
  190. if (init_fn(np))
  191. pr_err("Failed to initialise IOMMU %s\n",
  192. of_node_full_name(np));
  193. }
  194. return 0;
  195. }
  196. postcore_initcall_sync(of_iommu_init);